diff --git "a/1494.jsonl" "b/1494.jsonl" new file mode 100644--- /dev/null +++ "b/1494.jsonl" @@ -0,0 +1,699 @@ +{"seq_id":"21885371","text":"import re\nfrom functools import reduce\n\nfile = \"codes.txt\"\noutput = 'stocks_list.txt'\nwith open(file, 'r') as f:\n s = f.read()\nf = open(output, 'w')\nfor c in s:\n if c == '(':\n f.write(' ')\n elif c == ')':\n f.write('\\n')\n else:\n f.write(c)\nf.close()\n\n\ndef get_stock_list():\n with open(output, 'r') as out:\n content = out.read()\n li = re.findall(r'\\d+', content)\n li.sort()\n return li\n\n\n# print(len(get_stock_list()))\n# l = reduce(lambda x, y: x + y, list(map(lambda x: len(x) == 6 and 1, get_stock_list())))\n# print(l)\n\n# for i in range(0, 2964, 340):\n# print(get_stock_list()[i:i + 300])\n\nprint(get_stock_list())","sub_path":"app/get_stocks.py","file_name":"get_stocks.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"127948343","text":"import tkinter\n\nlen_words = 0\n\ndef read_file():\n global len_words\n with open('read.txt','r+',encoding = 'utf-8') as fp:\n file_text = fp.read()\n len_words = len(file_text)\n number.set(len_words)\n address.set('read.txt')\n text_t.insert('insert',file_text)\n\n#write file function\ndef write_file():\n global len_words\n\n file_txt = write_entry.get()\n len_words = len(file_txt)\n number.set(len_words)\n address.set('write.txt')\n\n with open('write.txt','w+',encoding = 'utf-8') as fp:\n fp.write(file_txt)\n\n#create the main window\nmain_window = tkinter.Tk()\n\n#create frames\ntext_frame = tkinter.Frame(main_window)\ntop_frame = tkinter.Frame(main_window)\nmiddle_frame = tkinter.Frame(main_window)\naddress_frame = tkinter.Frame(main_window)\nbottom_frame = tkinter.Frame(main_window)\n\n#create and write read widgets\ntext_t = tkinter.Text(top_frame,width=45, height=5)\ntext_t.pack()\n\n#create and pack write widgets\nwrite_label = tkinter.Label(top_frame,text = '写入文本:')\nwrite_entry = tkinter.Entry(top_frame,width = 35)\n\nwrite_label.pack(side = 'left')\nwrite_entry.pack(side = 'left')\n\n#create count and pack widgets\nwords_label = tkinter.Label(middle_frame,text = '文本字数:')\n\nnumber = tkinter.StringVar() #To update number\nnumber_label = tkinter.Label(middle_frame,textvariable = number)\n\nwords_label.pack(side = 'left')\nnumber_label.pack(side = 'left')\n\n#create and pack address widgets\naddress_label = tkinter.Label(address_frame,text = '文本地址:')\n\naddress = tkinter.StringVar() #To update address\nadd_label = tkinter.Label(address_frame,textvariable = address)\n\naddress_label.pack(side = 'left')\nadd_label.pack(side = 'left')\n\n#create buttons\nread_button = tkinter.Button(bottom_frame,text = '读文件',width=15, height=2, command = read_file)\nwrite_button = tkinter.Button(bottom_frame,text = '写文件', width=15, height=2, command = write_file)\n\nread_button.pack(side = 'left')\nwrite_button.pack(side = 'left')\n\n#pack the frames\ntext_frame.pack()\ntop_frame.pack()\nmiddle_frame.pack()\naddress_frame.pack()\nbottom_frame.pack()\n\n#start the main loop\ntkinter.mainloop()\n\n","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"548105852","text":"# -*- coding: utf-8 -*-\n\"\"\"\nA game:\n how to Guess a Number\n\nCreated on Tue Dec 26 09:05:21 2017\n\n@author: ABSBIN\n\n\"\"\"\n \n \nimport sys\nimport numpy as np\n\ndef main():\n print(\"Hello World!\")\n print(\"Guess number between 1 to 100\")\n randomNumber=35\n randomNumber=np.random.randint(1,100)\n \n \n found=False\n while not found:\n userGuess= int(input(\"Your guess: \"))\n if userGuess == randomNumber:\n print(\"you got it!\")\n found= True\n elif userGuess >randomNumber:\n print(\"You guess is Higher\")\n else:\n print(\" You guess is Lower\")\n \n\nif __name__==\"__main__\":\n main()\n","sub_path":"davin_reddy/python_0.py","file_name":"python_0.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"391408437","text":"from django.core.urlresolvers import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom .models import RequestsLogger\n\n\ndef index(request):\n template_name = 'rlogger/index.html'\n\n sort_order = request.GET.get('sort', '0')\n reverse_mode = request.GET.get('mode', '0')\n if reverse_mode == '1':\n sort_order = '1' if sort_order == '0' else '0'\n if sort_order == '1':\n events = RequestsLogger.objects.order_by('priority', 'created_on')[:10]\n else:\n events = RequestsLogger.objects.order_by('-priority', 'created_on')[:10]\n context = {'events': events, 'sort_order': sort_order}\n return render(request, template_name, context)\n\n\n@login_required(login_url='/accounts/login/')\ndef event(request, event_id):\n event = get_object_or_404(RequestsLogger, pk=event_id)\n action = request.GET['action']\n sort_order = request.GET['sort']\n if action == 'up':\n event.priority += 1\n elif action == 'down':\n event.priority -= 1\n event.save()\n url = '%s?sort=%s' % (reverse('requests:index'), sort_order)\n return HttpResponseRedirect(url)\n","sub_path":"apps/rlogger/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"577740169","text":"# -*- coding: utf-8 -*-\n\"\"\"Tests/demo of ocfl-object.py client.\"\"\"\nimport json\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport unittest\n\n\nclass TestAll(unittest.TestCase):\n \"\"\"TestAll class to run tests.\"\"\"\n\n tmpdir = None\n n = 0\n m = 0\n demo = False\n keep_tmpdirs = False\n\n def setUp(self):\n \"\"\"Setup for each test.\"\"\"\n type(self).n += 1 # access class variable not copy\n self.m = 0\n self.tmpdir = tempfile.mkdtemp(prefix='test' + str(self.n) + '_')\n if self.demo:\n print(\"\\n## %d. %s\" % (self.n, self.shortDescription()))\n\n def tearDown(self):\n \"\"\"Teardown for each test.\"\"\"\n if self.tmpdir is not None and not self.keep_tmpdirs:\n shutil.rmtree(self.tmpdir)\n\n def run_ocfl_store(self, desc, options, text=None, treedir='object',\n include_objdir=True, include_dstdir=False):\n \"\"\"Run the ocfl-store.py script.\"\"\"\n self.m += 1\n if self.demo:\n print(\"\\n### %d.%d %s\\n\" % (self.n, self.m, desc))\n if text:\n print(text + '\\n')\n cmd = ['python', 'ocfl-object.py']\n if include_objdir:\n cmd += ['--objdir', os.path.join(self.tmpdir, treedir)]\n elif include_dstdir:\n cmd += ['--dstdir', self.tmpdir]\n cmd += options\n code = 0\n try:\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')\n except subprocess.CalledProcessError as e:\n out = e.output.decode('utf-8')\n code = e.returncode\n out = \"```\\n> \" + ' '.join(cmd) + \"\\n\" + out + \"```\\n\"\n if self.demo:\n out = re.sub(self.tmpdir, 'tmp', out)\n print(out)\n else:\n return out\n if code == 0 and include_objdir:\n tree = subprocess.check_output('cd %s; tree -a %s' % (self.tmpdir, treedir),\n stderr=subprocess.STDOUT,\n shell=True).decode('utf-8')\n print(\"```\\n\" + tree + \"```\\n\")\n elif code == 0 and include_dstdir:\n tree = subprocess.check_output('cd %s; tree -a .' % (self.tmpdir),\n stderr=subprocess.STDOUT,\n shell=True).decode('utf-8')\n print(\"```\\n\" + tree + \"```\\n\")\n else:\n print(\"Exited with code %d\" % (code))\n return out\n\n def test01_create_inventory_dryrun(self):\n \"\"\"Test object inventory creation with output to stdout.\"\"\"\n out = self.run_ocfl_store(\"Inventory for new object with just v1\",\n ['--create', '--id', 'http://example.org/obj1', '--src', 'fixtures/1.0/content/cf1/v1'],\n text=\"Without an `--objdir` argument the script just writes out the inventory for the object that would have been created.\",\n include_objdir=False)\n self.assertIn('\"id\": \"http://example.org/obj1\"', out)\n self.assertIn('### Inventory for v1', out)\n out = self.run_ocfl_store(\"Inventory for new object with three versions\",\n ['--build', '--id', 'http://example.org/obj2', '--src', 'fixtures/1.0/content/cf3'],\n text=\"Without an `--objdir` argument the script just writes out the inventory for each version in the object that would have been created.\",\n include_objdir=False)\n self.assertIn('\"id\": \"http://example.org/obj2\"', out)\n self.assertIn('### Inventory for v1', out)\n self.assertIn('### Inventory for v2', out)\n self.assertIn('### Inventory for v3', out)\n\n def test02_create_v1(self):\n \"\"\"Test object creation with just v1.\"\"\"\n out = self.run_ocfl_store(\"New object with just v1\",\n ['--create', '--id', 'http://example.org/obj1', '--src', 'fixtures/1.0/content/cf1/v1', '-v'])\n self.assertIn('Created object http://example.org/obj1', out)\n\n def test03_create_multi(self):\n \"\"\"Test object build with three versions.\"\"\"\n out = self.run_ocfl_store(\"New object with three versions\",\n ['--build', '--id', 'http://example.org/obj2', '--src', 'fixtures/1.0/content/cf3', '-v'])\n self.assertIn('Built object http://example.org/obj2 with 3 versions', out)\n\n def test04_extract(self):\n \"\"\"Test extract of version.\"\"\"\n out = self.run_ocfl_store(\"Extract v1\",\n ['--extract', 'v1', '--objdir', 'fixtures/1.0/objects/spec-ex-full', '-v'],\n include_objdir=False,\n include_dstdir=True)\n # Excpect:\n # v1\n # ├── [ 0] empty.txt\n # ├── [ 102] foo\n # │   └── [ 272] bar.xml\n # └── [ 2021] image.tiff\n self.assertEqual(os.path.getsize(os.path.join(self.tmpdir, 'v1/empty.txt')), 0)\n self.assertFalse(os.path.exists(os.path.join(self.tmpdir, 'v1/empty2.txt')))\n self.assertEqual(os.path.getsize(os.path.join(self.tmpdir, 'v1/foo/bar.xml')), 272)\n self.assertEqual(os.path.getsize(os.path.join(self.tmpdir, 'v1/image.tiff')), 2021)\n out = self.run_ocfl_store(\"Extract v2\",\n ['--extract', 'v2', '--objdir', 'fixtures/1.0/objects/spec-ex-full', '-v'],\n include_objdir=False,\n include_dstdir=True)\n # Expect:\n # v2\n # ├── [ 0] empty.txt\n # ├── [ 0] empty2.txt\n # └── [ 102] foo\n # └── [ 272] bar.xml\n self.assertEqual(os.path.getsize(os.path.join(self.tmpdir, 'v2/empty.txt')), 0)\n self.assertEqual(os.path.getsize(os.path.join(self.tmpdir, 'v2/empty2.txt')), 0)\n self.assertEqual(os.path.getsize(os.path.join(self.tmpdir, 'v2/foo/bar.xml')), 272)\n self.assertFalse(os.path.exists(os.path.join(self.tmpdir, 'v2/image.tiff')))\n\n def test20_errors(self):\n \"\"\"Test error conditions.\"\"\"\n out = self.run_ocfl_store(\"No valid command argument\",\n [],\n include_objdir=False)\n self.assertIn('one of the arguments ', out)\n out = self.run_ocfl_store(\"No identifier\",\n ['--create'],\n include_objdir=False)\n self.assertIn('Must specify --srcdir', out)\n out = self.run_ocfl_store(\"No identifier\",\n ['--create', '--srcdir', 'tmp'],\n include_objdir=False)\n self.assertIn('Identifier is not set!', out)\n\n\nif __name__ == '__main__':\n # Run in demo mode if run directly instead of through py.test\n TestAll.demo = True\n print(\"# Demo output from \" + __file__)\n unittest.main()\n","sub_path":"tests/test_ocfl_object_script.py","file_name":"test_ocfl_object_script.py","file_ext":"py","file_size_in_byte":7136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"579654520","text":"from django.urls import path\n\nfrom account.views import *\n\nurlpatterns = [\n path('logining', logining, name=\"login\"),\n path('logout/', logout_view, name=\"logout\"),\n path('register', register, name=\"register\"),\n path('code//', code, name='code'),\n path('admin-panel/', code, name='admin-panel'),\n path('user-update/', update_user, name='user_update_panel'),\n]\n","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"415176102","text":"# -*- coding:utf8 -*-\n\n\nclass Node(object):\n def __init__(self, value):\n self.value = value\n self.left_tree = None\n self.right_tree = None\n\n\nclass BinaryTree(object):\n\n def __init__(self):\n self.root = None\n\n def add_node(self, item):\n node = Node(item)\n if self.root is None:\n self.root = node\n return\n queue = [self.root]\n while queue:\n current_node = queue.pop(0)\n if current_node.left_tree is None:\n current_node.left_tree = node\n return\n else:\n queue.append(current_node.left_tree)\n if current_node.right_tree is None:\n current_node.right_tree = node\n return\n else:\n queue.append(current_node.right_tree)\n\n def broad_travel(self):\n if self.root is None:\n return\n queue = [self.root]\n while queue:\n node = queue.pop(0)\n print(node.value, end=\" \")\n if node.left_tree:\n queue.append(node.left_tree)\n if node.right_tree:\n queue.append(node.right_tree)\n\n def pre_travel(self, node):\n if node is None:\n return\n print(node.value, end=\" \")\n self.pre_travel(node.left_tree)\n self.pre_travel(node.right_tree)\n\n def middle_travel(self, node):\n if node is None:\n return\n self.middle_travel(node.left_tree)\n print(node.value, end=\" \")\n self.middle_travel(node.right_tree)\n\n def post_travel(self, node):\n if node is None:\n return\n self.post_travel(node.left_tree)\n self.post_travel(node.right_tree)\n print(node.value, end=\" \")\n\n def pre_travel1(self, node):\n if node is None:\n return\n\n import queue\n stack = queue.LifoQueue()\n stack.put(node)\n res = []\n\n while not stack.empty():\n current_node = stack.get()\n res.append(current_node.value)\n\n if current_node.right_tree:\n stack.put(current_node.right_tree)\n if current_node.left_tree:\n stack.put(current_node.left_tree)\n return res\n\n def middle_travel1(self, node):\n if node is None:\n return\n\n import queue\n stack = queue.LifoQueue()\n cur = node\n res = []\n\n while not stack.empty() or cur:\n while cur:\n stack.put(cur)\n # print(cur.value, 111)\n cur = cur.left_tree\n node = stack.get()\n res.append(node.value)\n cur = node.right_tree\n return res\n\n def post_travel1(self, node):\n if node is None:\n return\n\n import queue\n stack = queue.LifoQueue()\n stack.put(node)\n res = []\n\n while not stack.empty():\n cur = stack.get()\n res.append(cur.value)\n if cur.left_tree:\n stack.put(cur.left_tree)\n if cur.right_tree:\n stack.put(cur.right_tree)\n\n return res\n\n\nif __name__ == '__main__':\n tree = BinaryTree()\n tree.add_node(0)\n tree.broad_travel()\n print()\n tree.pre_travel(tree.root)\n print()\n tree.middle_travel(tree.root)\n print()\n tree.post_travel(tree.root)\n","sub_path":"python/algorithm/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"561780294","text":"import hashlib\nimport urllib\n\nfrom math import ceil\nfrom flask import url_for\nfrom flask import request\nfrom pillarsdk import File\n\n\nclass Pagination(object):\n \"\"\"Pagination snippet coming from http://flask.pocoo.org/snippets/44/\n \"\"\"\n\n def __init__(self, page, per_page, total_count):\n self.page = page\n self.per_page = per_page\n self.total_count = total_count\n\n @property\n def pages(self):\n return int(ceil(self.total_count / float(self.per_page)))\n\n @property\n def has_prev(self):\n return self.page > 1\n\n @property\n def has_next(self):\n return self.page < self.pages\n\n def iter_pages(self, left_edge=2, left_current=2,\n right_current=5, right_edge=2):\n last = 0\n for num in xrange(1, self.pages + 1):\n if num <= left_edge or \\\n (num > self.page - left_current - 1 and \\\n num < self.page + right_current) or \\\n num > self.pages - right_edge:\n if last + 1 != num:\n yield None\n yield num\n last = num\n\n\ndef url_for_other_page(page):\n args = request.view_args.copy()\n args['page'] = page\n return url_for(request.endpoint, **args)\n\n\ndef percentage(items, total):\n if total == 0: return 0.0\n return float(items) * 100 / float(total)\n\n\ndef attach_project_pictures(project, api):\n \"\"\"Utility function that queries for file objects referenced in picture\n header and square. In eve we currently can't embed objects in nested\n properties, this is the reason why this exists.\n This function should be moved in the API, attached to a new Project object.\n \"\"\"\n if project.properties.picture_square:\n # Collect the picture square file object\n project.properties.picture_square = File.find(\n project.properties.picture_square, api=api)\n if project.properties.picture_header:\n # Collect the picture header file object\n project.properties.picture_header = File.find(\n project.properties.picture_header, api=api)\n\n\ndef gravatar(email, size=64, consider_settings=True):\n parameters = {'s':str(size), 'd':'mm'}\n return \"https://www.gravatar.com/avatar/\" + \\\n hashlib.md5(str(email)).hexdigest() + \\\n \"?\" + urllib.urlencode(parameters)\n\n","sub_path":"pillar-web/application/helpers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"264075532","text":"import connexion\nimport pika\nfrom json import dumps\n\nfrom prontogram.models.message import Message # noqa: E501\nfrom datetime import datetime, timezone\n\n\ndef send_message(message=None): # noqa: E501\n \"\"\"sendMessage\n\n Sends the message to ProntoGram for being dispatched to the actual user. API for: ACMESky # noqa: E501\n\n :param message: \n :type message: dict | bytes\n\n :rtype: None\n \"\"\"\n if connexion.request.is_json:\n message = Message.from_dict(connexion.request.get_json()) # noqa: E501\n connection = pika.BlockingConnection(pika.ConnectionParameters('prontogram_mq'))\n channel = connection.channel()\n\n \"\"\"\n Creates a queue with name message.receiver if it does not exist, otherwise does nothing.\n Flag durable set to True requires the queue to be persistent on restart.\n \"\"\"\n channel.queue_declare(queue=message.receiver, durable=True)\n\n # Updating the date format to ISO 8601.\n message.send_time = datetime.now(tz=timezone.utc).isoformat()\n\n # Publishing the message on the queue.\n channel.basic_publish(exchange='',\n routing_key=message.receiver,\n body=bytes(dumps(message.to_dict(), default=str), 'utf-8'),\n properties=pika.BasicProperties(delivery_mode=2)\n )\n\n connection.close()\n\n return \"\", 200\n","sub_path":"prontogram/controllers/default_controller.py","file_name":"default_controller.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"462033280","text":"import random\nimport numpy as np\nfrom numpy.lib.stride_tricks import as_strided\nimport matplotlib.pyplot as plt\nimport cv2\nimport trade_environment\nfrom trade_environment import TradeEnvironment\n\nenv = TradeEnvironment('test.dat', 240, (1, 100, 110))\nenv.reset(False)\n\nvalues = env.episode_values\nmaxs = values.max(axis=1)\nmins = values.min(axis=1)\nptps = maxs - mins\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\n\n\ndef make_kernel(ksize):\n\treturn np.ones(ksize) / ksize, ksize, ksize // 2\n\n\ndef convolve(values, kernel):\n\treturn np.convolve(values, kernel[0], mode='valid'), kernel[2]\n\n\ndef detect_feature_indices(values):\n\treturn np.nonzero(np.diff(np.sign(np.diff(values))))[0] + 2\n\n\ndef remove_small_gap(values, gap_size):\n\treturn np.nonzero(gap_size <= np.abs(np.diff(values)))[0] + 1\n\n\ndef signs_calc(values, kernel):\n\tkernel_size_half = kernel.shape[0] // 2\n\tma = np.convolve(values, kernel, mode='valid')\n\tindices1 = detect_feature_indices(ma)\n\tvalues1 = ma[indices1]\n\treturn ma, indices1 + kernel_size_half, values1, kernel_size_half\n\t# indices2 = detect_feature_indices(values1)\n\t# indices = indices1[indices2]\n\t# values = ma[indices]\n\t# return ma, indices + kernel_size_half, values, kernel_size_half\n\n\ndef signs_x_indices(signs):\n\treturn np.arange(signs[0].shape[0]) + signs[3]\n\n\ndef detect_turning_points(values, gap):\n\t\"\"\"指定数列の折返しポイントの地点を検出する.\n\n\tArgs:\n\t\tvalues: 数列.\n\t\tgap: 折返し判定閾値、この値を超えて反転したら折返しと判断する.\n\n\tReturns:\n\t\t(折返しインデックス, 検出途中に生成した一定値以上距離を保って付いてくる値の数列) のタプル.\n\t\"\"\"\n\tindices = []\n\tstalkers = np.empty((len(values),), dtype=np.int32)\n\tlast_value = values[0]\n\tstalker = last_value\n\tstalkers[0] = stalker\n\tfor i in range(1, len(values)):\n\t\tv = values[i]\n\t\td = v - stalker\n\t\tif last_value < stalker and stalker <= v or stalker < last_value and v <= stalker:\n\t\t\tindices.append(i)\n\t\tif d < -gap:\n\t\t\tstalker = v + gap\n\t\telif gap < d:\n\t\t\tstalker = v - gap\n\t\tstalkers[i] = stalker\n\t\tlast_value = v\n\treturn np.array(indices, dtype=np.int32) - 1, stalkers\n\n\ndef detect_order_indices(values, max_los, search_length):\n\tindices = []\n\n\tindex = 0\n\tindex_end = len(values)\n\tstart_value = values[index]\n\tindices.append(index)\n\n\twhile True:\n\t\tindex += 1\n\t\tif index_end <= index:\n\t\t\tbreak\n\t\tnext_values = values[index:min(index + search_length, index_end)]\n\t\tdeltas = next_values - start_value\n\t\tdeltas_abs = np.abs(deltas)\n\n\t\tend_index = search_length\n\t\tover_los_indices = np.nonzero(max_los <= deltas_abs)[0]\n\t\tif len(over_los_indices):\n\t\t\tover_los_signs_dif = np.diff(np.sign(deltas[over_los_indices]))\n\t\t\tif len(over_los_signs_dif):\n\t\t\t\tturn_indices = np.nonzero(over_los_signs_dif)[0]\n\t\t\t\tif len(turn_indices):\n\t\t\t\t\tend_index = turn_indices[0].item() + 1\n\n\t\tindex += np.argsort(deltas_abs[:end_index])[-1].item()\n\t\tstart_value = values[index]\n\t\tindices.append(index)\n\n\treturn indices\n\n\nkernel = make_kernel(10)\n\no = values[:, 0]\nh = values[:, 1]\nl = values[:, 2]\nc = values[:, 3]\nx = np.arange(c.shape[0])\n\nc_ma = convolve(c, kernel)\nx_ma = np.arange(c_ma[0].shape[0]) + c_ma[1]\n\nax.plot(x, c, label='close')\nax.plot(x_ma, c_ma[0], label='close ma')\n\n# cgap_indices = remove_small_gap(c, 10)\n# cgap_values = c[cgap_indices]\n# ax.plot(x[cgap_indices], cgap_values)\n\nod_indices = detect_order_indices(c, 10, 30)\nod_values = c[od_indices]\nax.plot(x[od_indices], od_values, label='order points', marker='o')\n\ntp_indices, stalkers = detect_turning_points(c, 5)\ntp_values = c[tp_indices]\n# ax.plot(x, stalkers, label='stalker')\nax.plot(x[tp_indices], tp_values, label='turning point', marker='o')\n\n# ksize = 30\n# ksize_half = ksize // 2\n# kernel = np.ones(ksize) / ksize\n# ma = np.convolve(c, kernel, mode='valid')\n# ma_x = np.arange(ma.shape[0]) + ksize_half\n# ax.plot(ma_x, ma)\n\n# feature_indices = detect_feature_indices(ma)\n# feature_x = ma_x[feature_indices]\n# feature_values = ma[feature_indices]\n# ax.plot(feature_x, feature_values)\n\n# gapremoved_indices = remove_small_gap(feature_values, 10)\n# gapremoved_x = feature_x[gapremoved_indices]\n# gapremoved_values = feature_values[gapremoved_indices]\n# ax.plot(gapremoved_x, gapremoved_values)\n\n# ksizes = [30]\n# ma_high = [signs_calc(h, np.ones(ksize) / ksize) for ksize in ksizes]\n# ma_low = [signs_calc(l, np.ones(ksize) / ksize) for ksize in ksizes]\n# ma_closes = [signs_calc(c, np.ones(ksize) / ksize) for ksize in ksizes]\n\n# # ax.plot(h)\n# xbase = np.arange(c.shape[0])\n# for sign in ma_high:\n# \tx = signs_x_indices(sign)\n# \tax.plot(x, sign[0])\n\n# \tidx = np.nonzero(5 < np.abs(np.diff(sign[2])))\n# \tax.plot(x, sign[0])\n\n# \tax.plot(xbase[sign[1]], sign[2])\n\n# x = np.arange(c.shape[0])\n# ma_x = signs_x_indices(ma_high)\n# ax.plot(x[ma_high[1]], ma_high[2])\n# ax.plot(ma_x, ma_high[0])\n# ax.plot(ma_x, ma_low[0])\n\n# maxwins = trade_environment.running_max_min_view(maxs + ptps, 10, 1).max(axis=1)\n# minwins = trade_environment.running_max_min_view(mins - ptps, 10, 1).min(axis=1)\n# # maxwins = trade_environment.running_max_min_view(maxwins, 10, 1).min(axis=1)\n# # minwins = trade_environment.running_max_min_view(minwins, 10, 1).max(axis=1)\n# # x = np.arange(15, 15 + maxwins.shape[0])\n# x = np.arange(maxwins.shape[0])\n# ax.plot(x, maxwins)\n# ax.plot(x, minwins)\n\n# n = maxwins.shape[0]\n# maxs = maxs[:n]\n# mins = mins[:n]\n# c = values[:, 3][:n]\n# down_indices = np.nonzero((c < minwins).astype('i4'))[0]\n# x = np.arange(c.shape[0])\n# # ax.plot(x, maxs)\n# # ax.plot(x, mins)\n# ax.plot(x, c)\n\n# x = x[down_indices]\n# c = c[down_indices]\n# ax.plot(x, c)\n\nplt.legend()\nplt.show()\n","sub_path":"cpf/python/training/auto_trade/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":5622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"300692028","text":"#coding=utf-8\n\nimport pytest\nimport allure\nfrom api.mission import missionApi\nfrom Common import Log\nimport datetime\n\nclass TestMission:\n \"\"\"\n 主题议程模块测试\n \"\"\"\n\n log = Log.MyLog()\n expected_msg = \"success\"\n\n\n @allure.severity('normal')\n @allure.story('Collections')\n @pytest.mark.parametrize(\"payload\",\n [\n {\"name\": \"自由讨论\",\n \"desc\": \"sdfgdsfgfdsg\",\n \"ext\": \"{\\\"tagId\\\":\\\"626549294551269829\\\",\\\"meetingType\\\":1,\\\"timeUse\\\":\\\"5分钟\\\"}\",\n \"tagId\": \"626549294551269829\"},\n {\"name\": \"主题演讲\",\n \"desc\": \"\\n \\n \\n \\n \\n Document\\n \\n \\n \\n
461616
\\n \\n \",\n \"ext\": \"{\\\"tagId\\\":\\\"626544899323331013\\\",\\\"meetingType\\\":0,\\\"speaker\\\":\\\"15166841990 Qiucw\\\",\\\"speakerId\\\":\\\"636786270315479359\\\",\\\"timeUse\\\":\\\"9分钟\\\",\\\"teachStyle\\\":\\\"直播演讲\\\",\\\"audio\\\":{\\\"result\\\":\\\"\\\",\\\"name\\\":\\\"\\\"},\\\"video\\\":{\\\"result\\\":\\\"\\\",\\\"name\\\":\\\"\\\"},\\\"content\\\":\\\"\\\\n \\\\n \\\\n \\\\n \\\\n Document\\\\n \\\\n \\\\n \\\\n
461616
\\\\n \\\\n \\\"}\",\n \"tagId\": \"626544899323331013\"}\n ])\n def test_createAgenda(self, payload):\n '''\n 添加议程\n 版本v1.0.0\n :param payload:\n :return:\n '''\n self.log.info(\"开始测试添加议程\")\n a = missionApi.createMeeting(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{\n \"desc\": \"学习会议的文稿\",\n \"name\": \"新建的学习会\",\n \"content\":\"学习会议的文稿\"\n }])\n def test_createMeeting(self,payload):\n '''\n 用户创建学习会\n 版本v1.0.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"开始测试创建学习会\")\n a = missionApi.createMeeting(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{\"agendaIds\":[\"666910263752000147\"],\n \"content\":\"反反复复个广告发个广告广告方法个 v 尺寸\",\n \"desc\": \"反反复复个广告发个广告广告方法个 v 尺寸\",\n \"ext\": \"{\\\"video\\\":{\\\"result\\\":\\\"\\\",\\\"name\\\":\\\"\\\"},\\\"audio\\\":{\\\"result\\\":\\\"\\\",\\\"name\\\":\\\"\\\"},\\\"content\\\":{\\\"contentHtml\\\":\\\"\\\",\\\"contentText\\\":\\\"\\\"}}\",\n \"name\": \"测试\",\n \"pid\":\"666892345685312147\",\n \"startTime\":datetime.datetime.strftime(datetime.datetime.now()+datetime.timedelta(minutes=5),'%Y-%m-%d %H:%M:%S')}])\n def test_createTheme(self,payload):\n '''\n 添加主题\n 版本v1.0.0\n :param payload:\n :return:\n '''\n self.log.info(\"开始测试添加主题\")\n a = missionApi.createTheme(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{}])\n def test_delAgendaById(self, payload):\n '''\n 删除议程\n 版本v1.0.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"删除议程\")\n a = missionApi.delAgendaById(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{}])\n def test_delMeetingById(self, payload):\n '''\n 删除我的学习会\n 版本v1.1.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"删除我的学习会\")\n a = missionApi.delMeetingById(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{}])\n def test_delThemeById(self, payload):\n '''\n 删除主题会\n 版本v1.1.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"删除主题会\")\n a = missionApi.delThemeById(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{\"desc\": \"再次编辑\",\n \"name\": \"测试编辑学习会\",\n \"content\":\"再次编辑\",\n \"id\":\"667115786527048181\"}])\n def test_editMeeting(self,payload):\n '''\n 编辑我的学习会\n 版本v1.1.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"编辑我的学习会\")\n a = missionApi.editMeeting(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{\"agendaIds\":[\"667150077277504157\"],\n \"content\":\"再次修改主题会议内容\",\n \"desc\": \"再次修改主题会议内容\",\n \"ext\": \"{\\\"video\\\":{\\\"result\\\":\\\"\\\",\\\"name\\\":\\\"\\\"},\\\"audio\\\":{\\\"result\\\":\\\"\\\",\\\"name\\\":\\\"\\\"},\\\"content\\\":{\\\"contentHtml\\\":\\\"\\\",\\\"contentText\\\":\\\"\\\"}}\",\n \"id\": \"667150233641157277\",\n \"name\": \"测试编辑主题会\",\n \"pid\":\"666387367187186323\",\n \"startTime\":\"2020-04-25 23:00:00\"}])\n def test_editTheme(self,payload):\n '''\n 编辑主题内容\n 版本v1.1.0\n :param payload:\n :return:\n '''\n self.log.info(\"开始编辑主题内容\")\n a = missionApi.editTheme(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{\n \"idx\": 0,\n \"size\": 20\n }])\n def test_getMeetingDetail(self, payload):\n '''\n 学习会详���\n 版本v1.1.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"学习会详情\")\n a = missionApi.getMeetingDetail(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{}])\n def test_getMeetingList(self, payload):\n '''\n 用户查询所有学习会列表\n 版本v1.1.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"用户查询所有学习会列表\")\n a = missionApi.getMeetingList(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{\n \"idx\": 0,\n \"size\": 20\n }])\n def test_getMyMeetingList(self, payload):\n '''\n 用户查看我的会议列表\n 版本v1.1.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"用户查看我的会议列表\")\n a = missionApi.getMyMeetingList(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{\n \"idx\": 0,\n \"size\": 20,\n \"userId\": \"636804209588568383\"\n }])\n def test_missionThemeList(self, payload):\n '''\n 任务主题列表查询(首页列表)\n 版本v1.0.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"任务主题列表查询(首页列表)\")\n a = missionApi.missionThemeList(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{}])\n def test_queryThemeInfo(self, payload):\n '''\n 主题详情查询\n 版本v1.0.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"主题详情查询\")\n a = missionApi.queryThemeInfo(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{\n \"ext\": \"{\\\"tagId\\\":\\\"626549294551269829\\\",\\\"meetingType\\\":1,\\\"timeUse\\\":\\\"4分钟\\\"}\",\n \"id\": \"667150077277504157\",\n \"name\": \"自由讨论\",\n \"tagId\": \"626549294551269829\"\n }])\n def test_updateAgenda(self, payload):\n '''\n 修改议程\n 版本v1.0.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"修改议程\")\n a = missionApi.updateAgenda(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{\n \"missionId\": \"666387367187186323\",\n \"userId\": \"636804209588568383\"\n }])\n def test_userAddMission(self, payload):\n '''\n 用户加入学习会\n 版本v1.0.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"用户加入学习会\")\n a = missionApi.userAddMission(payload)\n assert a[\"msg\"] == self.expected_msg\n\n\n @pytest.mark.parametrize(\"payload\", [{\n \"missionId\": \"666387367187186323\",\n \"userId\": \"636804209588568383\"\n }])\n def test_userExitMission(self, payload):\n '''\n 用户退出学习会\n 版本v1.1.0\n :param payload: 参数\n :return:\n '''\n self.log.info(\"用户退出学习会\")\n a = missionApi.userExitMission(payload)\n assert a[\"msg\"] == self.expected_msg","sub_path":"TestCase/test_mission.py","file_name":"test_mission.py","file_ext":"py","file_size_in_byte":9924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"581251147","text":"\"\"\"\nCopyright (c) 2018-2020 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport enum\nimport math\nimport re\nimport warnings\nfrom collections import OrderedDict\nfrom copy import copy\nfrom functools import partial\nfrom pathlib import Path\n\nfrom ..utils import get_path, cast_to_bool\n\n\nclass ConfigError(ValueError):\n pass\n\n\nclass BaseValidator:\n def __init__(self, on_error=None, additional_validator=None):\n self.on_error = on_error\n self.additional_validator = additional_validator\n\n self.field_uri = None\n\n def validate(self, entry, field_uri=None):\n field_uri = field_uri or self.field_uri\n if self.additional_validator and not self.additional_validator(entry, field_uri):\n self.raise_error(entry, field_uri)\n\n def raise_error(self, value, field_uri, reason=None):\n if self.on_error:\n self.on_error(value, field_uri, reason)\n\n error_message = 'Invalid value \"{value}\" for {field_uri}'.format(value=value, field_uri=field_uri)\n if reason:\n error_message = '{error_message}: {reason}'.format(error_message=error_message, reason=reason)\n\n raise ConfigError(error_message.format(value, field_uri))\n\n\nclass _ExtraArgumentBehaviour(enum.Enum):\n WARN = 'warn'\n IGNORE = 'ignore'\n ERROR = 'error'\n\n\ndef _is_dict_like(entry):\n return hasattr(entry, '__iter__') and hasattr(entry, '__getitem__')\n\n\nclass ConfigValidator(BaseValidator):\n WARN_ON_EXTRA_ARGUMENT = _ExtraArgumentBehaviour.WARN\n ERROR_ON_EXTRA_ARGUMENT = _ExtraArgumentBehaviour.ERROR\n IGNORE_ON_EXTRA_ARGUMENT = _ExtraArgumentBehaviour.IGNORE\n acceptable_unknown_options = ['connector']\n\n def __init__(self, config_uri, on_extra_argument=WARN_ON_EXTRA_ARGUMENT, fields=None, **kwargs):\n super().__init__(**kwargs)\n self.on_extra_argument = on_extra_argument\n\n self.fields = OrderedDict()\n self.field_uri = config_uri\n\n if fields:\n for name in fields.keys():\n self.fields[name] = fields[name]\n if fields[name].field_uri is None:\n fields[name].field_uri = \"{}.{}\".format(config_uri, name)\n else:\n for name in dir(self):\n value = getattr(self, name)\n if not isinstance(value, BaseField):\n continue\n\n field_copy = copy(value)\n field_copy.field_uri = \"{}.{}\".format(config_uri, name)\n self.fields[name] = field_copy\n\n def validate(self, entry, field_uri=None):\n super().validate(entry, field_uri)\n field_uri = field_uri or self.field_uri\n if not _is_dict_like(entry):\n raise ConfigError(\"{} is expected to be dict-like\".format(field_uri))\n\n extra_arguments = []\n for key in entry:\n if key not in self.fields and key not in self.acceptable_unknown_options:\n extra_arguments.append(key)\n continue\n\n if key in self.acceptable_unknown_options:\n continue\n\n self.fields[key].validate(entry[key])\n\n required_fields = set(name for name, value in self.fields.items() if value.required())\n missing_arguments = required_fields.difference(entry)\n\n if missing_arguments:\n arguments = ', '.join(map(str, missing_arguments))\n self.raise_error(\n entry, field_uri, \"Invalid config for {}: missing required fields: {}\".format(field_uri, arguments)\n )\n\n if extra_arguments:\n unknown_options_error = \"specifies unknown options: {}\".format(extra_arguments)\n message = \"{} {}\".format(field_uri, unknown_options_error)\n\n if self.on_extra_argument == _ExtraArgumentBehaviour.WARN:\n warnings.warn(message)\n if self.on_extra_argument == _ExtraArgumentBehaviour.ERROR:\n self.raise_error(entry, field_uri, message)\n\n @property\n def known_fields(self):\n return set(self.fields)\n\n def raise_error(self, value, field_uri, reason=None):\n if self.on_error:\n self.on_error(value, field_uri, reason)\n else:\n raise ConfigError(reason)\n\n\nclass BaseField(BaseValidator):\n def __init__(self, optional=False, description=None, default=None, **kwargs):\n super().__init__(**kwargs)\n self.optional = optional\n self.description = description\n self.default = default\n\n def validate(self, entry, field_uri=None):\n super().validate(entry, field_uri)\n field_uri = field_uri or self.field_uri\n if self.required() and entry is None:\n raise ConfigError(\"{} is not allowed to be None\".format(field_uri))\n\n @property\n def type(self):\n return None\n\n def required(self):\n return not self.optional and self.default is None\n\n def parameters(self):\n parameters_dict = {}\n for key, _ in self.__dict__.items():\n if not key.startswith('_') and hasattr(self, key) and not hasattr(BaseValidator(), key):\n if isinstance(self.__dict__[key], BaseField):\n parameters_dict[key] = self.__dict__[key].parameters()\n else:\n parameters_dict[key] = self.__dict__[key]\n parameters_dict['type'] = type((self.type or str)()).__name__\n\n return parameters_dict\n\n\nclass StringField(BaseField):\n def __init__(self, choices=None, regex=None, case_sensitive=False, allow_own_choice=False, **kwargs):\n super().__init__(**kwargs)\n self.choices = choices if case_sensitive or not choices else list(map(str.lower, choices))\n self.allow_own_choice = allow_own_choice\n self.case_sensitive = case_sensitive\n self.set_regex(regex)\n\n def set_regex(self, regex):\n if regex is None:\n self._regex = regex\n self._regex = re.compile(regex, flags=re.IGNORECASE if not self.case_sensitive else 0) if regex else None\n\n def validate(self, entry, field_uri=None):\n super().validate(entry, field_uri)\n if entry is None:\n return\n\n field_uri = field_uri or self.field_uri\n source_entry = entry\n\n if not isinstance(entry, str):\n raise ConfigError(\"{} is expected to be str\".format(source_entry))\n\n if not self.case_sensitive:\n entry = entry.lower()\n\n if self.choices and entry not in self.choices and not self.allow_own_choice:\n reason = \"unsupported option, expected one of: {}\".format(', '.join(map(str, self.choices)))\n self.raise_error(source_entry, field_uri, reason)\n\n if self._regex and not self._regex.match(entry):\n self.raise_error(source_entry, field_uri, reason=None)\n\n @property\n def type(self):\n return str\n\n\nclass DictField(BaseField):\n def __init__(self, key_type=None, value_type=None, validate_keys=True, validate_values=True, allow_empty=True,\n **kwargs):\n super().__init__(**kwargs)\n self.validate_keys = validate_keys if key_type else False\n self.validate_values = validate_values if value_type else False\n self.key_type = _get_field_type(key_type)\n self.value_type = _get_field_type(value_type)\n\n self.allow_empty = allow_empty\n\n def validate(self, entry, field_uri=None):\n super().validate(entry, field_uri)\n if entry is None:\n return\n\n field_uri = field_uri or self.field_uri\n if not isinstance(entry, dict):\n raise ConfigError(\"{} is expected to be dict\".format(field_uri))\n\n if not entry and not self.allow_empty:\n self.raise_error(entry, field_uri, \"value is empty\")\n\n for k, v in entry.items():\n if self.validate_keys:\n uri = \"{}.keys.{}\".format(field_uri, k)\n self.key_type.validate(k, uri)\n\n if self.validate_values:\n uri = \"{}.{}\".format(field_uri, k)\n\n self.value_type.validate(v, uri)\n\n @property\n def type(self):\n return dict\n\n\nclass ListField(BaseField):\n def __init__(self, value_type=None, validate_values=True, allow_empty=True, **kwargs):\n super().__init__(**kwargs)\n self.validate_values = validate_values if value_type else False\n self.value_type = _get_field_type(value_type)\n self.allow_empty = allow_empty\n\n def validate(self, entry, field_uri=None):\n super().validate(entry, field_uri)\n if entry is None:\n return\n\n if not isinstance(entry, list):\n raise ConfigError(\"{} is expected to be list\".format(field_uri))\n\n if not entry and not self.allow_empty:\n self.raise_error(entry, field_uri, \"value is empty\")\n\n if self.validate_values:\n for i, val in enumerate(entry):\n self.value_type.validate(val, \"{}[{}]\".format(val, i))\n\n @property\n def type(self):\n return list\n\n\nclass InputField(BaseField):\n INPUTS_TYPES = ('CONST_INPUT', 'INPUT', 'IMAGE_INFO', 'ORIG_IMAGE_INFO', 'LSTM_INPUT', 'IGNORE_INPUT')\n LAYOUT_TYPES = ('NCHW', 'NHWC', 'NCWH', 'NWHC')\n PRECISIONS = ('FP32', 'FP16', 'U8', 'U16', 'I8', 'I16', 'I32', 'I64')\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.name = StringField(description=\"Input name.\")\n self.input_type = StringField(choices=InputField.INPUTS_TYPES, description=\"Type name.\")\n self.value = BaseField(description=\"Input value.\")\n self.layout = StringField(optional=True, choices=InputField.LAYOUT_TYPES,\n description=\"Layout: \" + ', '.join(InputField.LAYOUT_TYPES))\n self.shape = BaseField(optional=True, description=\"Input shape.\")\n self.precision = StringField(optional=True, description='Input precision', choices=InputField.PRECISIONS)\n\n def validate(self, entry, field_uri=None):\n entry['optional'] = entry['type'] not in ['CONST_INPUT', 'LSTM_INPUT']\n super().validate(entry, field_uri)\n\n\nclass ListInputsField(ListField):\n def __init__(self, **kwargs):\n super().__init__(allow_empty=False, value_type=InputField(description=\"Input type.\"), **kwargs)\n\n def validate(self, entry, field_uri=None):\n super().validate(entry, field_uri)\n names_set = set()\n for input_layer in entry:\n input_name = input_layer['name']\n if input_name not in names_set:\n names_set.add(input_name)\n else:\n self.raise_error(entry, field_uri, '{} repeated name'.format(input_name))\n\n\nclass NumberField(BaseField):\n def __init__(self, value_type=float, min_value=None, max_value=None, allow_inf=False, allow_nan=False, **kwargs):\n super().__init__(**kwargs)\n self._value_type = value_type\n self.min = min_value\n self.max = max_value\n self._allow_inf = allow_inf\n self._allow_nan = allow_nan\n\n def validate(self, entry, field_uri=None):\n super().validate(entry, field_uri)\n if entry is None:\n return\n\n field_uri = field_uri or self.field_uri\n if self.type != float and isinstance(entry, float):\n raise ConfigError(\"{} is expected to be int\".format(field_uri))\n if not isinstance(entry, int) and not isinstance(entry, float):\n raise ConfigError(\"{} is expected to be number\".format(field_uri))\n\n if self.min is not None and entry < self.min:\n reason = \"value is less than minimal allowed - {}\".format(self.min)\n self.raise_error(entry, field_uri, reason)\n if self.max is not None and entry > self.max:\n reason = \"value is greater than maximal allowed - {}\".format(self.max)\n self.raise_error(entry, field_uri, reason)\n\n if math.isinf(entry) and not self._allow_inf:\n self.raise_error(entry, field_uri, \"value is infinity\")\n if math.isnan(entry) and not self._allow_nan:\n self.raise_error(entry, field_uri, \"value is NaN\")\n\n @property\n def type(self):\n return self._value_type\n\n\nclass PathField(BaseField):\n def __init__(self, is_directory=False, check_exists=True, file_or_directory=False, **kwargs):\n super().__init__(**kwargs)\n self.is_directory = is_directory\n self.check_exists = check_exists\n self.file_or_directory = file_or_directory\n\n def validate(self, entry, field_uri=None):\n super().validate(entry, field_uri)\n if entry is None:\n return\n\n field_uri = field_uri or self.field_uri\n try:\n get_path(entry, self.is_directory, self.check_exists, self.file_or_directory)\n except TypeError:\n self.raise_error(entry, field_uri, \"values is expected to be path-like\")\n except FileNotFoundError:\n self.raise_error(entry, field_uri, \"path does not exist\")\n except NotADirectoryError:\n self.raise_error(entry, field_uri, \"path is not a directory\")\n except IsADirectoryError:\n self.raise_error(entry, field_uri, \"path is a directory, regular file expected\")\n\n @property\n def type(self):\n return Path\n\n\nclass BoolField(BaseField):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def validate(self, entry, field_uri=None):\n super().validate(entry, field_uri)\n if entry is None:\n return\n\n field_uri = field_uri or self.field_uri\n if not isinstance(entry, bool):\n raise ConfigError(\"{} is expected to be bool\".format(field_uri))\n\n @property\n def type(self):\n return cast_to_bool\n\n def parameters(self):\n parameters_dict = {}\n for key, _ in self.__dict__.items():\n if not key.startswith('_') and hasattr(self, key) and not hasattr(BaseValidator(), key):\n if isinstance(self.__dict__[key], BaseField):\n parameters_dict[key] = self.__dict__[key].parameters()\n else:\n parameters_dict[key] = self.__dict__[key]\n parameters_dict['type'] = type(bool()).__name__\n return parameters_dict\n\n\ndef _get_field_type(key_type):\n if not isinstance(key_type, BaseField):\n type_ = _TYPE_TO_FIELD_CLASS.get(key_type)\n if callable(type_):\n return type_()\n\n return key_type\n\n\n_TYPE_TO_FIELD_CLASS = {\n int: partial(NumberField, value_type=int),\n float: partial(NumberField, value_type=float),\n dict: partial(DictField, validate_keys=False, validate_values=False),\n list: partial(ListField, validate_values=False),\n Path: PathField,\n str: StringField,\n bool: BoolField,\n}\n","sub_path":"tools/accuracy_checker/accuracy_checker/config/config_validator.py","file_name":"config_validator.py","file_ext":"py","file_size_in_byte":15265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"522120807","text":"class Queue:\n\tdef __init__(self):\n\t\tself.queue=[]\n\n\tdef isEmpty(self):\n\t\tif not self.queue:\n\t\t\t#print(\"Queue is empty\")\n\t\t\treturn True\n\t\telse:\n\t\t\tprint(\"Queue is not empty\")\n\t\t\t#return False\n\n\tdef push(self,data):\n\t\tif type(data) is list:\n\t\t\tfor i in range(0,len(data)):\n\t\t\t\tself.queue.append(data[i])\n\t\telse:\n\t\t\tself.queue.append(data)\n\n\tdef pop(self):\n\t\tif not self.queue:\n\t\t\tprint(\"cannot pop out of queue as queue is empty\")\n\t\t\treturn False\n\t\telse:\n\t\t\t return self.queue.pop(0)\n\n\tdef viewQueue(self):\n\t\ttemp=[]\n\t\ttemp=list(self.queue)\n\t\tif not temp:\n\t\t\tprint(\"Queue is empty\")\n\t\twhile temp:\n\t\t\tprint (temp[0])\n\t\t\ttemp.pop(0)\n\t\n\tdef top(self):\n\t\tif not self.queue:\n\t\t\treturn False \n\t\telse:\n\t\t\treturn self.queue[0]","sub_path":"easy_ds/Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"184690581","text":"from functools import partial\n\nimport pytest\n\nfrom ethereum import rlp\nfrom ethereum.frontier.fork import (\n calculate_intrinsic_cost,\n validate_transaction,\n)\nfrom ethereum.frontier.fork_types import Transaction\nfrom ethereum.utils.hexadecimal import hex_to_uint\nfrom tests.helpers import TEST_FIXTURES\n\nfrom ..helpers.fork_types_helpers import load_test_transaction\n\nETHEREUM_TESTS_PATH = TEST_FIXTURES[\"ethereum_tests\"][\"fixture_path\"]\n\ntest_dir = f\"{ETHEREUM_TESTS_PATH}/TransactionTests\"\n\nload_frontier_transaction = partial(load_test_transaction, network=\"Frontier\")\n\n\n@pytest.mark.parametrize(\n \"test_file_high_nonce\",\n [\n \"ttNonce/TransactionWithHighNonce64Minus1.json\",\n \"ttNonce/TransactionWithHighNonce64.json\",\n \"ttNonce/TransactionWithHighNonce64Plus1.json\",\n ],\n)\ndef test_high_nonce(test_file_high_nonce: str) -> None:\n test = load_frontier_transaction(test_dir, test_file_high_nonce)\n\n tx = rlp.decode_to(Transaction, test[\"tx_rlp\"])\n\n assert validate_transaction(tx) == False\n\n\n@pytest.mark.parametrize(\n \"test_file_nonce\",\n [\n \"ttNonce/TransactionWithHighNonce32.json\",\n \"ttNonce/TransactionWithHighNonce64Minus2.json\",\n ],\n)\ndef test_nonce(test_file_nonce: str) -> None:\n test = load_frontier_transaction(test_dir, test_file_nonce)\n\n tx = rlp.decode_to(Transaction, test[\"tx_rlp\"])\n\n result_intrinsic_gas_cost = hex_to_uint(\n test[\"test_result\"][\"intrinsicGas\"]\n )\n\n assert validate_transaction(tx) == True\n assert calculate_intrinsic_cost(tx) == result_intrinsic_gas_cost\n","sub_path":"tests/frontier/test_transaction.py","file_name":"test_transaction.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"488327923","text":"import json\nimport copy\n\nwith open('ws') as f:\n ws = json.load(f)\n\nwith open('sublist') as f:\n sublist = json.load(f)\n\nwith open('evaluations') as f:\n evals = json.load(f)\n\n # Special case 6.871 evals.\n # evals['6.871'] = evals['HST.956']\n\nclasses = {}\n\ndef all_virtual(sections):\n for s in sections:\n if s[1] != 'Virtual':\n return False\n return True\n\nfor c in ws:\n classes[c] = {\n 'no': c,\n 'co': ws[c]['course'],\n 'cl': ws[c]['class'],\n 'tb': ws[c]['tba'],\n \n 's': ws[c]['sections'],\n 'l': ws[c]['l'],\n 'r': ws[c]['r'],\n 'b': ws[c]['b'],\n 'lr': ws[c]['l_raw'],\n 'rr': ws[c]['r_raw'],\n 'br': ws[c]['b_raw']}\n\n classes[c]['hh'] = ws[c]['HASS-H']\n classes[c]['ha'] = ws[c]['HASS-A']\n classes[c]['hs'] = ws[c]['HASS-S']\n classes[c]['he'] = ws[c]['HASS-E']\n classes[c]['ci'] = ws[c]['CI-H']\n classes[c]['cw'] = ws[c]['CI-HW']\n classes[c]['re'] = ws[c]['REST']\n classes[c]['la'] = ws[c]['LAB']\n classes[c]['pl'] = ws[c]['pLAB']\n classes[c]['u1'] = ws[c]['units1']\n classes[c]['u2'] = ws[c]['units2']\n classes[c]['u3'] = ws[c]['units3']\n classes[c]['le'] = ws[c]['level']\n classes[c]['sa'] = ws[c]['same_as']\n classes[c]['mw'] = ws[c]['meets_with']\n classes[c]['t'] = ws[c]['terms']\n classes[c]['pr'] = ws[c]['prereq']\n classes[c]['d'] = ws[c]['desc']\n classes[c]['n'] = ws[c]['name']\n classes[c]['i'] = ws[c]['in-charge']\n classes[c]['v'] = all_virtual(ws[c]['l'] + ws[c]['r'] + ws[c]['b'])\n\n if c in sublist:\n classes[c]['nx'] = sublist[c]['no_next']\n classes[c]['rp'] = sublist[c]['repeat']\n classes[c]['u'] = sublist[c]['url']\n try:\n classes[c]['f'] = sublist[c]['final']\n except:\n print('failed to get final for', c)\n classes[c]['f'] = False\n else:\n classes[c]['nx'] = False\n classes[c]['rp'] = False\n classes[c]['u'] = ''\n classes[c]['f'] = False\n\n if c in evals:\n total_rating = 0\n total_hours = 0\n total_size = 0\n terms = 0\n \n for t in evals[c]:\n if t['resp'] > 0:\n total_rating += t['rating']\n total_hours += t['oc_hours'] + t['ic_hours']\n total_size += t['eligible']\n terms += 1\n \n if terms == 0:\n terms = 1\n \n classes[c]['ra'] = round(total_rating / terms, 1)\n classes[c]['h'] = round(total_hours / terms, 1)\n classes[c]['si'] = round(total_size / terms, 1)\n else:\n classes[c]['ra'] = 0\n classes[c]['h'] = 0\n classes[c]['si'] = 0\n\n# Special case 2.008 schedule.\n# classes['2.008']['s'] = ['l', 'b']\n# classes['2.008']['r'] = []\n\n\"\"\" try:\n # Special case 14.01/14.02 rec-only sections.\n classes['14.01R'] = copy.deepcopy(classes['14.01'])\n classes['14.01']['r'] = classes['14.01']['r'][:2]\n classes['14.01']['rr'] = classes['14.01']['rr'][:2]\n classes['14.01R']['no'] = '14.01R'\n classes['14.01R']['s'] = ['r']\n classes['14.01R']['r'] = classes['14.01R']['r'][2:]\n classes['14.01R']['rr'] = classes['14.01R']['rr'][2:]\n classes['14.01R']['n'] += \" (recitation only)\"\n del classes['14.01R']['l']\n\n\n classes['14.02R'] = copy.deepcopy(classes['14.02'])\n classes['14.02']['r'] = classes['14.02']['r'][:2]\n classes['14.02']['rr'] = classes['14.02']['rr'][:2]\n classes['14.02R']['no'] = '14.02R'\n classes['14.02R']['s'] = ['r']\n classes['14.02R']['r'] = classes['14.02R']['r'][2:]\n classes['14.02R']['rr'] = classes['14.02R']['rr'][2:]\n classes['14.02R']['n'] += \" (recitation only)\"\n del classes['14.02R']['l'] \n\n\nexcept Exception as e:\n print(e) \"\"\"\n\nwith open('full.json', 'w') as f:\n f.write('var classes = ')\n json.dump(classes, f, separators=(',', ':'))\n f.write(';')\n\n \n \n \n","sub_path":"new_scripts/combiner_ws.py","file_name":"combiner_ws.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"182316219","text":"#Project Euler problem 34\n# find the sum of all numbers which are equal to the sum of the factorial of their digits\ntotalsum=0\nfor i in range(3,100000): #Select a wide enough range.\n b=str(i)\n sum=0\n for j in b:\n a=1\n for k in range(2,int(j)+1):\n a*=k\n sum+=a\n if sum==i:\n totalsum+=sum\nprint(totalsum)\n#Correct!\n","sub_path":"ProjectEulerP34.py","file_name":"ProjectEulerP34.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"257101342","text":"\r\nimport numpy as np\r\nimport json\r\ndef load_tester(path):\r\n with open(path) as f:\r\n data = json.load(f)\r\n print(data)\r\n return np.asarray(data)\r\n\r\nd = load_tester('/home/superadmin/Downloads/via_export_coco (3).json')\r\n\r\nprint(type(d))\r\n\r\nnp.save('mask.npy',d)","sub_path":"scripts/36_json_npy_file.py","file_name":"36_json_npy_file.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"20512048","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\ntry:\n import pyros_msgs.opt_as_array # This will duck punch the standard message type initialization code.\n from pyros_msgs.msg import test_opt_bool_as_array # a message type just for testing\n\nexcept ImportError:\n # Because we need to access Ros message types here (from ROS env or from virtualenv, or from somewhere else)\n import pyros_setup\n # We rely on default configuration to point us ot the proper distro\n pyros_setup.configurable_import().configure().activate()\n import pyros_msgs.opt_as_array # This will duck punch the standard message type initialization code.\n from pyros_msgs.msg import test_opt_bool_as_array # a message type just for testing\n\n# patching\npyros_msgs.opt_as_array.duck_punch(test_opt_bool_as_array, ['data'])\n\nimport nose\n\n\ndef test_init_rosdata():\n msg = test_opt_bool_as_array(data=[True])\n assert msg.data == [True]\n\n msg = test_opt_bool_as_array(data=[False])\n assert msg.data == [False]\n\n msg = test_opt_bool_as_array(data=[])\n assert msg.data == []\n\n\ndef test_init_data():\n msg = test_opt_bool_as_array(data=True)\n assert msg.data == [True]\n\n msg = test_opt_bool_as_array(data=False)\n assert msg.data == [False]\n\n\ndef test_init_raw():\n msg = test_opt_bool_as_array(True)\n assert msg.data == [True]\n\n msg = test_opt_bool_as_array(False)\n assert msg.data == [False]\n\n\ndef test_init_default():\n msg = test_opt_bool_as_array()\n assert msg.data == []\n\n\ndef test_init_except():\n with nose.tools.assert_raises(AttributeError) as cm:\n test_opt_bool_as_array(42)\n assert cm.exception.message == \"field data has value [42] which is not of type bool[]\"\n\n\n# Just in case we run this directly\nif __name__ == '__main__':\n nose.runmodule(__name__)\n","sub_path":"tests/opt_as_array/test_opt_bool.py","file_name":"test_opt_bool.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"435777174","text":"import sqlite3\r\n\r\ncon = sqlite3.connect('carros.db')\r\n\r\ncursor = con.cursor()\r\n\r\ncursor.execute('''\r\nCREATE TABLE carros(\r\n id INTEGER PRIMARY KEY NOT NULL,\r\n nome VARCHAR(100) NOT NULL,\r\n cor VARCHAR(20) NOT NULL\r\n);\r\n''')\r\n\r\ncon.commit()\r\n\r\ncon.close()","sub_path":"exercicio S1.py","file_name":"exercicio S1.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"264442839","text":"#!/usr/bin/python\nimport argparse\nimport sys\nfrom argparse import RawTextHelpFormatter\nfrom itertools import zip_longest as izip_longest\n\nimport numpy as np\nfrom termcolor import colored\n\nimport libs.fingerprint as fingerprint\nfrom libs.config import get_config\nfrom libs.db_sqlite import SqliteDatabase\nfrom libs.reader_microphone import MicrophoneReader\nfrom libs.visualiser_console import VisualiserConsole as visual_peak\nfrom libs.visualiser_plot import VisualiserPlot as visual_plot\nfrom contextlib import redirect_stdout\n\n\n# from libs.db_mongo import MongoDatabase\n\ndef writeTofile(data, filename):\n with open(filename, 'wb') as file:\n file.write(data)\n print(\"Stored blob data into: \", filename, \"\\n\")\n\ndef align_matches(matches):\n diff_counter = {}\n largest = 0\n largest_count = 0\n song_id = -1\n\n\n for tup in matches:\n sid, diff = tup\n\n if diff not in diff_counter:\n diff_counter[diff] = {}\n\n if sid not in diff_counter[diff]:\n diff_counter[diff][sid] = 0\n\n diff_counter[diff][sid] += 1\n\n if diff_counter[diff][sid] > largest_count:\n largest = diff\n largest_count = diff_counter[diff][sid]\n song_id = sid\n\n songM = db.get_song_by_id(song_id)\n #genreM= db.get_song_by_id(song_id)\n #artistM=db.get_song_by_id(song_id)\n\n\n\n\n nseconds = round(float(largest) / fingerprint.DEFAULT_FS *\n fingerprint.DEFAULT_WINDOW_SIZE *\n fingerprint.DEFAULT_OVERLAP_RATIO, 5)\n\n return {\n \"SONG_ID\": song_id,\n \"SONG_NAME\": songM[1],\n \"CONFIDENCE\": largest_count,\n \"OFFSET\": int(largest),\n \"OFFSET_SECS\": nseconds,\n \"GENRE\": songM[3],\n \"ARTIST\":songM[4],\n \"ART\":songM[5],\n \"ALBUM\": songM[6]\n\n }\n\n\ndef grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return (filter(None, values)\n for values in izip_longest(fillvalue=fillvalue, *args))\n\n\ndef find_matches(samples, Fs=fingerprint.DEFAULT_FS):\n hashes = fingerprint.fingerprint(1,samples, Fs=Fs)\n return return_matches(hashes)\n\n\ndef return_matches(hashes):\n mapper = {}\n for hash, offset in hashes:\n mapper[hash.upper()] = offset\n values = mapper.keys()\n\n # https://www.sqlite.org/limits.html\n # To prevent excessive memory allocations,\n # the maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER, which defaults to 999 for SQLites\n for split_values in map(list, grouper(values, 999)):\n # @todo move to db related files\n query = \"\"\"\n SELECT upper(hash), song_fk, offset\n FROM fingerprints\n WHERE upper(hash) IN (%s)\n \"\"\"\n query = query % ', '.join('?' * len(split_values))\n\n x = db.executeAll(query, split_values)\n matches_found = len(x)\n\n if matches_found > 0:\n msg = ' ** found %d hash matches (step %d/%d)'\n #print(colored(msg, 'green') % (\n #matches_found,\n #len(split_values),\n #len(values)\n #))\n pass\n else:\n msg = ' ** not matches found (step %d/%d)'\n #print(colored(msg, 'red') % (len(split_values), len(values)))\n\n for hash_code, sid, offset in x:\n # (sid, db_offset - song_sampled_offset)\n if isinstance(offset, bytes):\n # offset come from fingerprint.py and numpy extraction/processing\n offset = np.frombuffer(offset, dtype=np.int)[0]\n yield sid, offset - mapper[hash_code]\n\n\nif __name__ == '__main__':\n sys.stdout = open(\"out.txt\", \"w\")\n config = get_config()\n\n db = SqliteDatabase()\n\n\n\n seconds = 6\n\n chunksize = 2 ** 12 # 4096\n channels = 1 # int(config['channels']) # 1=mono, 2=stereo\n\n record_forever = False\n visualise_console = bool(config['mic.visualise_console'])\n visualise_plot = bool(config['mic.visualise_plot'])\n\n reader = MicrophoneReader(None)\n\n reader.start_recording(seconds=seconds,\n chunksize=chunksize,\n channels=channels)\n\n msg = ' * started recording..'\n #print(colored(msg, attrs=['dark']))\n\n while True:\n bufferSize = int(reader.rate / reader.chunksize * seconds)\n\n for i in range(0, bufferSize):\n nums = reader.process_recording()\n\n if visualise_console:\n msg = colored(' %05d', attrs=['dark']) + colored(' %s', 'green')\n #print(msg % visual_peak.calc(nums))\n else:\n msg = ' processing %d of %d..' % (i, bufferSize)\n #print(colored(msg, attrs=['dark']))\n\n if not record_forever:\n break\n\n if visualise_plot:\n data = reader.get_recorded_data()[0]\n visual_plot.show(data)\n\n reader.stop_recording()\n\n msg = ' * recording has been stopped'\n #print(colored(msg, attrs=['dark']))\n\n data = reader.get_recorded_data()\n\n msg = ' * recorded %d samples'\n #print(colored(msg, attrs=['dark']) % len(data[0]))\n\n # reader.save_recorded('test.wav')\n\n Fs = fingerprint.DEFAULT_FS\n channel_amount = len(data)\n\n result = set()\n matches = []\n\n for channeln, channel in enumerate(data):\n # TODO: Remove prints or change them into optional logging.\n msg = ' fingerprinting channel %d/%d'\n #print(colored(msg, attrs=['dark']) % (channeln + 1, channel_amount))\n\n matches.extend(find_matches(channel))\n\n msg = ' finished channel %d/%d, got %d hashes'\n #print(colored(msg, attrs=['dark']) % (channeln + 1,\n # channel_amount, len(matches)))\n\n total_matches_found = len(matches)\n\n #print('')\n\n if total_matches_found > 0:\n msg = ' ** totally found %d hash matches'\n #print(colored(msg, 'green') % total_matches_found)\n\n song = align_matches(matches)\n\n msg = ' \\n=> Song: %s \\n'\n #msg += ' offset: %d (%d secs)\\n'\n #msg += ' confidence: %d\\n'\n msg += ' Genre: %s\\n'\n msg += ' Artist: %s\\n'\n msg += ' Album:%s\\n'\n msg += '%s\\n'\n\n\n\n\n\n print(colored(msg, 'green') % (song['SONG_NAME'],\n #song['SONG_ID'],\n #song['OFFSET'], song['OFFSET_SECS'],\n #song['CONFIDENCE'],\n song['GENRE'],\n song['ARTIST'],\n song['ALBUM'],\n song['SONG_NAME'] + song['ARTIST']))\n photo=song['ART']\n photoPath = \"example\" + \".jpg\"\n\n writeTofile(photo, photoPath)\n\n else:\n msg = ' \\n\\nNo matches found\\n\\n\\n '\n print(colored(msg, 'red'))\n\n sys.stdout.close()\n\n\n\n\n\n","sub_path":"recognize-from-microphone.py","file_name":"recognize-from-microphone.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"603092051","text":"\nclass atm:\n balance = 1000\n\n def __init__(self, pin):\n self.pin = pin\n \n \n \n def deposit(self):\n amount = eval(input('enter amount to deposit: '))\n self.balance = self.balance + amount\n print('your deposited amount is ', amount, '\\n Your new balance is ', d1.balance)\n print('Transaction successful.')\n \n def atmPin(self):\n pin = input('enter pin to proceed: ')\n if len(pin) == 5:\n return\n else:\n print('enter correct number')\n\n def getBalance(self):\n self.balance = self.balance\n select = eval(input('select account. \\n1: Savings \\n2: current \\nEnter: '))\n if select == 1:\n print(self.balance)\n elif select == 2:\n print(d1.balance)\n else:\n print('service not available')\n\n def transfer(self):\n amount = eval(input('enter amount to transfer: '))\n self.balance = self.balance - amount\n d1.balance = d1.balance + amount\n print(' verify amount to transfer:', amount)\n input(\"Press enter to Proceed.\")\n print('transaction successful')\n \n\n def withdrawal(self):\n amount = eval(input('enter amount to withdraw: '))\n self.balance = self.balance - amount\n print(' verify amount to withdraw:', amount)\n input(\"Press enter to Proceed.\")\n print('transaction successful')\n \n\n def atmProcess(self):\n print('Welcome to Python Bank \\nATM Services.')\n print('tips for the day \\ndo not disclose your pin to anyone to avoid theft.')\n d1.atmPin()\n choice = eval(input('Choose Service \\n1: Deposit \\n2: Withdrawal \\n3: Check balance \\n4: Transfer \\nEnter: '))\n if choice == 1:\n d1.deposit()\n elif choice == 2:\n d1.withdrawal()\n elif choice == 3:\n d1.getBalance()\n elif choice == 4:\n d1.transfer()\n\n else:\n print('you have entered a wrong number.')\n\n \n\n\n\n\n\n\nclass elecBill:\n #unitPrice = 0.7979\n vat = 0.1250\n natElecL = 0.0200\n striL = 0.0300\n nhilGet = 0.0500\n serCharg = 0.4075\n montBill = {}\n\n def __init__(self, unitPrice):\n self.unitPrice = 0.7979\n\n def meterRead(self):\n #self.prev = prev\n #self.cur = cur\n #prev = eval(input('Enter previous meter readings: '))\n #cur = eval(input('Enter current meter readings: '))\n if cur > prev:\n metread = cur - prev\n print(metread, 'kWh')\n\n else:\n return\n\n def monthBill(self):\n self.montBill = round((metread * self.unitPrice),3)\n #return monthBill \n print('Bill is: ', self.montBill)\n\n def vatR(self):\n vaR = ((cur - prev) * self.unitPrice)\n self.va = round((self.vat * ((32*self.serCharg) + vaR)),2)\n print('Vat on bill: ', self.va)\n def natElecLe(self):\n nat = ((cur - prev) * self.unitPrice)\n self.natElec = round((self.natElecL * nat),2)\n print('National Electr. Levy: ', self.natElec)\n def striLi(self):\n light = ((cur - prev) * self.unitPrice)\n self.striLii = round((self.striL * light),2)\n print('street Light: ', self.striLii)\n def nhilGetF(self):\n get = ((cur - prev) * self.unitPrice)\n self.nhilGe = round((self.nhilGet * ((32*self.serCharg) + get)),2)\n print('NHIS and GETFUND Levy: ', self.nhilGe)\n def servChar(self):\n servCha = round((32 * self.serCharg),2)\n print('Service charges for 32days: ', servCha)\n def totalThisMonth(self):\n #current = ((cur -prev ) * self.unitPrice)\n #totalThisMont = round(((current) + (current * self.serCharg) + (current * self.nhilGet) + \n #(current * self.striL) + (current * self.natElecL) + (current * self.vat)),2)\n totalThisMont = round(((self.montBill) + (32 * self.serCharg) + ((self.montBill + (32 * self.serCharg)) * self.nhilGet) + (self.montBill * self.striL) + \n (self.montBill * self.natElecL) + (((self.montBill + (32 * self.serCharg)) * self.vat))),3)\n print('Total current bill: ', totalThisMont)\n\n\n \n def elecCal(self):\n choice = eval(input('choose 1: monthly calcuation \\n2: other services \\nEnter: '))\n if choice == 1:\n d.meterRead()\n d.monthBill()\n d.vatR()\n d.natElecLe()\n d.striLi()\n d.nhilGetF()\n d.servChar()\n d.totalThisMonth()\n elif choice == 2:\n choose = eval(input('1: NHIS and GETFUND \\n2: Service charges \\n3: Value Added Tax(VAT) \\n4: Meter Readings \\n5: Stright Light \\n6: National Electrification Levy \\n7: This Month Bill \\n Enter: '))\n if choose ==1:\n d.nhilGetF()\n elif choose ==2:\n d.servChar()\n elif choose ==3:\n d.vatR()\n elif choose == 4:\n d.meterRead()\n elif choose == 5:\n d.striLi()\n elif choose == 6:\n d.natElecLe()\n else:\n print('Wrong selection.')\n \n \n \n \n else:\n print('start again.')\n\n#print('Electricity bill calculation.')\n#prev = eval(input('Enter previous meter readings: '))\n#cur = eval(input('Enter current meter readings: '))\n#metread = cur - prev\n#d = elecBill(0.7979)\n\n#d.elecCal()\n\nd1 = atm(1234)\nd2 = atm(5678)\n\n\nd1.atmProcess()\n\n","sub_path":"class atm and meter.py","file_name":"class atm and meter.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"635668288","text":"# Descrição: um programa em python GUI for a chat bot\n\n# Biblioteca\nfrom tkinter import *\n\nstorage_adapter=\"chatterbot.storage.MongoDatabaseAdapter\"\nfrom chatterbot import ChatBot\n\n# Uncomment the following lines to enable verbose logging\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n#Criando o objeto tK do tkinter\nroot = Tk()\n\n# Colocando o titulo da janela\nroot.title('Chat Bot Hank')\n\n# Inserindo o tamanho da janela ou Geometria\nroot.geometry('400x500')\n\n#Create a main menu bar\nmain_menu = Menu(root)\n\n#Create submenu\nfile_menu = Menu(root)\nfile_menu.add_command(label='Novo', font=('ubuntu'))\nfile_menu.add_command(label='Salvar como', font=('ubuntu'))\nfile_menu.add_command(label='Sair', font=('ubuntu'))\n\n\nmain_menu.add_cascade(label='File', menu=file_menu)\nmain_menu.add_command(label='Edit')\nmain_menu.add_command(label='Quit')\nroot.config(menu=main_menu)\n\n#Create the chat window\nchatWindow = Text(root, bd=1, bg='black', width=50, height=0)\nchatWindow.place(x=6, y=6, height=385, width=370)\n\n#Create tk message window\nmessageWindow = Text(root, bg='black', width=30, height=4,)\nmessageWindow.place(x=128, y=400, height=88, width=260)\n\n#Create a button to send the message the message\nButton = Button(root, text='ENVIAR', bg='blue', activebackground='light blue', width=12, height=5, font=('ubuntu', 20))\nButton.place(x=6, y=400, height=88, width=120)\n\n#Add a scroll bar\nscrollbar = Scrollbar(root, command=chatWindow.yview())\nscrollbar.place(x=375, y=5, height=385)\n\nroot.mainloop()\n","sub_path":"pythonProject/Interfaces/RealPython/ChatBot_SimpleGUI.py","file_name":"ChatBot_SimpleGUI.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"445048930","text":"import random\nfrom threading import Thread\nimport unittest\nimport uuid\nimport logger\nimport time\n\nfrom membase.helper.spatial_helper import SpatialHelper\nfrom membase.helper.failover_helper import FailoverHelper\n\n\nclass SpatialViewTests(unittest.TestCase):\n def setUp(self):\n self.log = logger.Logger.get_logger()\n self.helper = SpatialHelper(self, \"default\")\n self.helper.setup_cluster()\n\n\n def tearDown(self):\n self.helper.cleanup_cluster()\n\n\n def test_create_multiple_development_spatial(self):\n self.log.info(\"description : create multiple spatial views without \"\n \"running any spatial view query\")\n rest = self.helper.rest\n bucket = self.helper.bucket\n prefix = str(uuid.uuid4())\n name = \"dev_test_spatial_multiple\"\n\n design_names = [\"{0}-{1}-{2}\".format(name, i, prefix) \\\n for i in range(0, 5)]\n for design_name in design_names:\n self.helper.create_index_fun(design_name)\n response = rest.get_spatial(bucket, design_name)\n self.assertTrue(response)\n self.assertEquals(response[\"_id\"],\n \"_design/{0}\".format(design_name))\n self.log.info(response)\n\n\n def test_insert_x_docs(self):\n num_docs = self.helper.input.param(\"num-docs\", 100)\n self.log.info(\"description : create a spatial view on {0} documents\"\\\n .format(num_docs))\n design_name = \"dev_test_insert_{0}_docs\".format(num_docs)\n prefix = str(uuid.uuid4())[:7]\n\n inserted_keys = self._setup_index(design_name, num_docs, prefix)\n self.assertEqual(len(inserted_keys), num_docs)\n\n\n # Does verify the full docs and not only the keys\n def test_insert_x_docs_full_verification(self):\n num_docs = self.helper.input.param(\"num-docs\", 100)\n self.log.info(\"description : create a spatial view with {0} docs\"\n \" and verify the full documents\".format(num_docs))\n design_name = \"dev_test_insert_{0}_docs_full_verification\"\\\n .format(num_docs)\n prefix = str(uuid.uuid4())[:7]\n\n self.helper.create_index_fun(design_name)\n inserted_docs = self.helper.insert_docs(num_docs, prefix,\n return_docs=True)\n self.helper.query_index_for_verification(design_name, inserted_docs,\n full_docs=True)\n\n\n def test_insert_x_delete_y_docs(self):\n num_docs = self.helper.input.param(\"num-docs\", 15000)\n num_deleted_docs = self.helper.input.param(\"num-deleted-docs\", 10000)\n self.log.info(\"description : create spatial view with {0} docs \"\n \" and delete {1} docs\".format(num_docs,\n num_deleted_docs))\n design_name = \"dev_test_insert_{0}_delete_{1}_docs\"\\\n .format(num_docs, num_deleted_docs)\n prefix = str(uuid.uuid4())[:7]\n\n inserted_keys = self._setup_index(design_name, num_docs, prefix)\n\n # Delete documents and very that the documents got deleted\n deleted_keys = self.helper.delete_docs(num_deleted_docs, prefix)\n results = self.helper.get_results(design_name, 2*num_docs)\n result_keys = self.helper.get_keys(results)\n self.assertEqual(len(result_keys), num_docs-len(deleted_keys))\n self.helper.verify_result(inserted_keys, deleted_keys + result_keys)\n\n\n def test_insert_x_update_y_docs(self):\n num_docs = self.helper.input.param(\"num-docs\", 15000)\n num_updated_docs = self.helper.input.param(\"num-updated-docs\", 100)\n self.log.info(\"description : create spatial view with {0} docs \"\n \" and update {1} docs\".format(num_docs,\n num_updated_docs))\n design_name = \"dev_test_insert_{0}_delete_{1}_docs\"\\\n .format(num_docs, num_updated_docs)\n prefix = str(uuid.uuid4())[:7]\n\n self._setup_index(design_name, num_docs, prefix)\n\n # Update documents and verify that the documents got updated\n updated_keys = self.helper.insert_docs(num_updated_docs, prefix,\n dict(updated=True))\n results = self.helper.get_results(design_name, 2*num_docs)\n result_updated_keys = self._get_updated_docs_keys(results)\n self.assertEqual(len(updated_keys), len(result_updated_keys))\n self.helper.verify_result(updated_keys, result_updated_keys)\n\n\n def test_get_spatial_during_x_min_load_y_working_set(self):\n num_docs = self.helper.input.param(\"num-docs\", 10000)\n duration = self.helper.input.param(\"load-time\", 1)\n self.log.info(\"description : this test will continuously insert data \"\n \"and get the spatial view results for {0} minutes\")\n design_name = \"dev_test_insert_and_get_spatial_{0}_mins\"\\\n .format(duration)\n prefix = str(uuid.uuid4())[:7]\n\n self.helper.create_index_fun(design_name)\n\n self.docs_inserted = []\n self.shutdown_load_data = False\n load_thread = Thread(\n target=self._insert_data_till_stopped,\n args=(num_docs, prefix))\n load_thread.start()\n\n self._get_results_for_x_minutes(design_name, duration)\n\n self.shutdown_load_data = True\n load_thread.join()\n\n # self.docs_inserted was set by the insertion thread\n # (_insert_data_till_stopped)\n self.helper.query_index_for_verification(design_name,\n self.docs_inserted)\n\n\n # Create the index and insert documents including verififaction that\n # the index contains them\n # Returns the keys of the inserted documents\n def _setup_index(self, design_name, num_docs, prefix):\n self.helper.create_index_fun(design_name)\n inserted_keys = self.helper.insert_docs(num_docs, prefix)\n self.helper.query_index_for_verification(design_name, inserted_keys)\n\n return inserted_keys\n\n\n # Return the keys for all docs that contain a key called \"updated\"\n # in the value\n def _get_updated_docs_keys(self, results):\n keys = []\n if results:\n rows = results[\"rows\"]\n for row in rows:\n if \"updated\" in row[\"value\"]:\n keys.append(row[\"id\"].encode(\"ascii\", \"ignore\"))\n self.log.info(\"{0} documents to updated\".format(len(keys)))\n return keys\n\n\n def _get_results_for_x_minutes(self, design_name, duration, delay=5):\n random.seed(0)\n start = time.time()\n while (time.time() - start) < duration * 60:\n limit = random.randint(1, 1000)\n self.log.info(\"{0} seconds has passed ....\".format(\n (time.time() - start)))\n results = self.helper.get_results(design_name, limit)\n keys = self.helper.get_keys(results)\n self.log.info(\"spatial view returned {0} rows\".format(len(keys)))\n time.sleep(delay)\n\n def _insert_data_till_stopped(self, num_docs, prefix):\n while not self.shutdown_load_data:\n # Will be read after the function is terminated\n self.docs_inserted = self.helper.insert_docs(\n num_docs, prefix, wait_for_persistence=False)\n\n\n def test_x_docs_failover(self):\n num_docs = self.helper.input.param(\"num-docs\", 10000)\n self.log.info(\"description : test failover with {0} documents\"\\\n .format(num_docs))\n design_name = \"dev_test_failover_{0}\".format(num_docs)\n prefix = str(uuid.uuid4())[:7]\n\n fh = FailoverHelper(self.helper.servers, self)\n\n inserted_keys = self._setup_index(design_name, num_docs, prefix)\n failover_nodes = fh.failover(1)\n self.helper.query_index_for_verification(design_name, inserted_keys)\n\n # The test cleanup expects all nodes running, hence spin the\n # full cluster up again\n fh.undo_failover(failover_nodes)\n","sub_path":"pytests/spatialviewtests.py","file_name":"spatialviewtests.py","file_ext":"py","file_size_in_byte":8139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"90215164","text":"# config.py\n\nimport os\nfrom flask import Flask\n\nWEB_ADDRESS = '0.0.0.0'\nWEB_PORT = 5000\n\n# __file__ : 현재 수행중인 코드를 담고 있는 파일의 위치\n# os.path.abspath(파일명) : 해당 파일의 절대 경로 확인\n# os.path.dirname(경로+파일) : 파일에서 디렉토리 명만 알아냄\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\n# 파일의 경로 지정하기\nTEMPLATES = os.path.join(PROJECT_ROOT, 'droneapp/templates')\nSTATIC_FOLDER = os.path.join(PROJECT_ROOT, 'droneapp/static')\n\n# 디버그 모드 설정\nDEBUG = False\n\n# 로그 파일\nLOG_FILE = 'tello.log'\n\n# Flask 인스턴스를 생성\napp = Flask(__name__, template_folder=TEMPLATES, static_folder=STATIC_FOLDER)\n\nif DEBUG: # 만약 디버그 모드라면\n app.debug = DEBUG # flask의 debug모드 활성화","sub_path":"drone-control-with-flask/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"450329844","text":"#!/usr/bin/env python\n\n\"\"\" CHALLENGE, PART 2:\n\nFuel itself requires fuel just like a module - take its mass, divide by three, round down, and subtract 2. However, that fuel also requires fuel, and that fuel requires fuel, and so on. \nAny mass that would require negative fuel should instead be treated as if it requires zero fuel; the remaining mass, if any, is instead handled by wishing really hard, which has no mass and is outside the scope of this calculation.\n\nSo, for each module mass, calculate its fuel and add it to the total. Then, treat the fuel amount you just calculated as the input mass and repeat the process, continuing until a fuel requirement is zero or negative. \n\nWhat is the sum of the fuel requirements for all of the modules on your spacecraft when also taking into account the mass of the added fuel? \n(Calculate the fuel requirements for each module separately, then add them all up at the end.)\n\n\"\"\"\n\nimport math\nimport sys\n\ndef total_fuel(fuel): # recursion!\n fuels_fuel = math.floor(fuel / 3) - 2 # the fuel required for each amount of fuel\n if fuels_fuel <= 0:\n return 0 # return zero if less than zero to avoid negatives\n else:\n return fuels_fuel + total_fuel(fuels_fuel) # total together the total amount of fuel needed\n\nwith open(sys.argv[1]) as file: # same as part 1...\n lines = file.readlines()\n total = 0\n for line in lines:\n total += total_fuel(int(line)) # recursion to determine the fuel for each mass of fuel\n print(total)","sub_path":"day-01/python/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"571836070","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#########################################################################\n# File Name: test_getparas.py\n# Author: jyxu\n# mail: ahcigar@foxmail.com\n# Created Time: 四 9/27 15:44:33 2018\n#########################################################################\nimport unittest\n###\nimport numpy as np\nfrom get_paras import *\nclass TestGetParas(unittest.TestCase):\n '''Test get_paras.py'''\n '''def calc_distance(abc, XYZ1, XYZ2):\n XYZ must be absolute coordinates [X, Y, Z]\n # 1----2\n ###\n XYZ1 = (abc.T*XYZ1).T.sum(0)\n XYZ2 = (abc.T*XYZ2).T.sum(0)\n return round(np.linalg.norm(XYZ1-XYZ2), 8)'''\n def test_cal_distance(self):\n \"\"\"Test calculate distance\"\"\"\n abc = np.array([[1,0,0],[0,1,0],[0,0,1]])\n XYZ1 = np.array([1,0,0])\n XYZ2 = np.array([1,0,0])\n XYZ3 = np.array([2,0,0])\n self.assertEqual(0, calc_distance(abc, XYZ1, XYZ2))\n self.assertNotEqual(0, calc_distance(abc, XYZ1, XYZ3))\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"LasAndClf-dev/get_data_packages/GetData3_DAH10/test_getparas.py","file_name":"test_getparas.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"207900704","text":"# -*- coding: utf-8 -*-\n#Python Imports\nfrom datetime import date, datetime, time\nimport xlsxwriter\nimport base64\n\n#Odoo Imports\nfrom odoo import models, fields, api\nfrom odoo.exceptions import UserError, ValidationError\n\nclass ExcelExportMixin(models.AbstractModel):\n _name = 'export.excel.mixin'\n _description = 'This model represents an abstract parent class used to manage excel reports'\n \n name = fields.Char()\n active = fields.Boolean(default=True)\n \n start_date = fields.Date(\n string = 'Date from',\n )\n \n end_date = fields.Date(\n string = 'Date to',\n )\n \n #attachment\n attachment_id = fields.Many2one(\n 'ir.attachment',\n string = 'Excel File',\n readonly = True,\n )\n \n is_generated = fields.Boolean(\n readonly = True,\n default = False,\n )\n \n notes = fields.Char()\n \n #internal/external partner to share the info with\n partner_ids = fields.Many2many(\n 'res.partner',\n string = 'Audience',\n )\n \n ################\n # TOOL METHODS #\n ################\n\n def get_excel(self):\n return {\n 'type': 'ir.actions.act_url',\n 'name': 'get_export_file',\n 'url': '/web/content/%s/%s?download=true' % (self.attachment_id.id, self.attachment_id.name),\n }\n \n def generate_excel(self,data=[{'col1_name':'','col2name':''}]):\n self.ensure_one()\n #build book and sheet\n filename = self.name + '.xlsx'\n workbook = xlsxwriter.Workbook(filename)\n worksheet = workbook.add_worksheet() \n \n #write Header row\n j = 0\n for key,value in data[0].items():\n worksheet.write(0, j, key)\n j += 1\n \n #write the data rows\n i = 1\n for row in data:\n j = 0\n for key,value in row.items():\n worksheet.write(i, j, value)\n j += 1\n i += 1\n \n #close & encode\n workbook.close() \n \n try:\n company = self.company_id.id\n except:\n company = False\n \n #Encode and save as attachment\n with open(filename, \"rb\") as f:\n data = f.read()\n encoded = base64.b64encode(data)\n attachment_data = {\n 'res_id': self.id,\n 'res_model': self._name,\n 'company_id': company,\n 'name': filename,\n 'type': 'binary',\n 'datas_fname': filename,\n 'datas': encoded,\n }\n self.attachment_id = self.env['ir.attachment'].create(attachment_data)\n self.is_generated = True\n \n ######################\n # VALIDATION METHODS #\n ######################\n @api.constrains('start_date', 'end_date')\n def _check_dates(self):\n for export in self:\n if export.start_date and export.end_date:\n if export.start_date > export.end_date:\n raise ValidationError(\"Date configuration error: Date to: {} is before Date from: {}\".format(export.start_date,export.end_date))\n ","sub_path":"vcls-interfaces/models/excel_export_mixin.py","file_name":"excel_export_mixin.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"182493417","text":"#!/usr/bin/env python3\n\nfrom QUBEKit.utils.constants import BOHR_TO_ANGS, ELECTRON_CHARGE, J_TO_KCAL_P_MOL, M_TO_ANGS, PI, VACUUM_PERMITTIVITY\nfrom QUBEKit.utils.decorators import for_all_methods, timer_logger\nfrom QUBEKit.utils.file_handling import extract_charge_data\n\nfrom functools import lru_cache\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.cm import ScalarMappable\n\n# DO NOT REMOVE THIS IMPORT. ALTHOUGH IT IS NOT EXPLICITLY CALLED, IT IS NEEDED FOR 3D PLOTTING.\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport numpy as np\nfrom scipy.optimize import minimize\n\n\n@for_all_methods(timer_logger)\nclass Charges:\n \"\"\"\n * Identify atoms which need a v-site.\n * Generate sample points in shells around that atom (shells are 1.4-2.0x the vdW radius).\n * Calculate the multipole expansion esp at all of those sample points.\n * Identify the vectors along which a single virtual site would sit, and two virtual sites would sit.\n * Move the virtual sites along this vector and vary the charges.\n * Calculate the monopole esp at all the sample points with each move.\n * Fit the positions and charges of the virtual sites, minimising the difference between the\n full multipole esp and the monopole esp with a virtual site.\n * Store the final locations and charges of the virtual sites, as well as the errors.\n * Plot the results\n\n Numpy arrays are used throughout for faster calculation of esp values.\n \"\"\"\n\n # van der Waal's radii of atoms common in organic compounds; units: Angstroms\n vdw_radii = {\n 'H': 1.44,\n 'B': 2.04,\n 'C': 1.93,\n 'N': 1.83,\n 'O': 1.75,\n 'F': 1.68,\n 'P': 2.07,\n 'S': 2.02,\n 'Cl': 1.97,\n 'I': 2.25,\n }\n\n def __init__(self, molecule):\n\n self.molecule = molecule\n self.coords = self.molecule.coords['qm'] if self.molecule.coords['qm'] is not [] else self.molecule.coords[\n 'input']\n\n self.ddec_data, self.dipole_moment_data, self.quadrupole_moment_data = extract_charge_data(\n self.molecule.ddec_version)\n\n # List of tuples where each tuple is the xyz atom coords, followed by their partial charge\n self.atom_points = [(coord, atom.partial_charge) # [((x, y, z), q), ... ]\n for coord, atom in zip(self.coords, self.molecule.atoms)]\n\n # List of tuples where each tuple is the xyz coords of the v-site(s),\n # followed by their charge and index of the parent atom\n self.v_sites_coords = [] # [((x, y, z), q, atom_index), ... ]\n\n # Kept separate for graphing comparisons\n self.one_site_coords = None # [((x, y, z), q, atom_index), ... ]\n self.two_site_coords = None # [((x, y, z), q, atom_index), ... ]\n\n self.site_errors = {\n 0: None,\n 1: None,\n 2: None,\n }\n\n for atom_index, atom in enumerate(self.molecule.atoms):\n if atom.atomic_symbol in ['N', 'O', 'F', 'P', 'S', 'Cl', 'Br', 'I']:\n self.sample_points = self.generate_sample_points_atom(atom_index)\n self.no_site_esps = self.generate_esp_atom(atom_index)\n self.fit(atom_index)\n self.plot()\n self.write_xyz()\n\n @staticmethod\n def spherical_to_cartesian(spherical_coords):\n \"\"\"\n :return: Cartesian (x, y, z) coords from the spherical (r, theta, phi) coords.\n \"\"\"\n r, theta, phi = spherical_coords\n return np.array([r * np.sin(theta) * np.cos(phi), r * np.sin(theta) * np.sin(phi), r * np.cos(theta)])\n\n @staticmethod\n def xyz_distance(point1, point2):\n \"\"\"\n :param point1: coordinates of a point\n :param point2: coordinates of another point\n :return: distance between the two points\n \"\"\"\n return np.linalg.norm(point1 - point2)\n\n @staticmethod\n def monopole_esp_one_charge(charge, dist):\n \"\"\"\n Calculate the esp from a monopole at a given distance\n :param charge: charge at atom centre\n :param dist: distance from sample_coords to atom_coords\n (provided as argument to prevent repeated calculation)\n :return: monopole esp value\n \"\"\"\n return (charge * ELECTRON_CHARGE * ELECTRON_CHARGE) / (\n 4 * PI * VACUUM_PERMITTIVITY * dist)\n\n @staticmethod\n def monopole_esp_two_charges(charge1, charge2, dist1, dist2):\n \"\"\"\n Calculate the esp from a monopole with two charges, each a different distance from the point of measurement\n :return: monopole esp value\n \"\"\"\n return ((ELECTRON_CHARGE * ELECTRON_CHARGE) / (4 * PI * VACUUM_PERMITTIVITY)) * (\n charge1 / dist1 + charge2 / dist2)\n\n @staticmethod\n def monopole_esp_three_charges(charge1, charge2, charge3, dist1, dist2, dist3):\n \"\"\"\n Calculate the esp from a monopole with three charges, each a different distance from the point of measurement\n :return: monopole esp value\n \"\"\"\n return ((ELECTRON_CHARGE * ELECTRON_CHARGE) / (4 * PI * VACUUM_PERMITTIVITY)) * (\n charge1 / dist1 + charge2 / dist2 + charge3 / dist3)\n\n @staticmethod\n def dipole_esp(dist_vector, dipole_moment, dist):\n \"\"\"\n Calculate the esp from a dipole at a given sample point.\n :param dist_vector: atom_coords - sample_coords\n :param dipole_moment: dipole moment xyz components from Chargemol output\n :param dist: distance from sample_coords to atom_coords\n (provided as argument to prevent repeated calculation)\n :return: dipole esp value\n \"\"\"\n return (dipole_moment * ELECTRON_CHARGE * ELECTRON_CHARGE).dot(dist_vector) / (\n 4 * PI * VACUUM_PERMITTIVITY * dist ** 3)\n\n @staticmethod\n def quadrupole_moment_tensor(q_xy, q_xz, q_yz, q_x2_y2, q_3z2_r2):\n \"\"\"\n :params: quadrupole moment components from Chargemol output\n :return: quadrupole moment tensor, M\n \"\"\"\n return np.array([\n [q_x2_y2 / 2 - q_3z2_r2 / 6, q_xy, q_xz],\n [q_xy, -q_x2_y2 / 2 - q_3z2_r2 / 6, q_yz],\n [q_xz, q_yz, q_3z2_r2 / 3]\n ])\n\n @staticmethod\n def quadrupole_esp(dist_vector, m_tensor, dist):\n \"\"\"\n Calculate the esp from a quadrupole at a given distance\n :param dist_vector: atom_coords - sample_coords\n :param m_tensor: quadrupole moment tensor calculated from Chargemol output\n :param dist: distance from sample_coords to atom_coords\n (provided as argument to prevent repeated calculation)\n :return: quadrupole esp value\n \"\"\"\n return (3 * ELECTRON_CHARGE * ELECTRON_CHARGE * dist_vector.dot(m_tensor * (BOHR_TO_ANGS ** 2)).dot(\n dist_vector)) / (8 * PI * VACUUM_PERMITTIVITY * dist ** 5)\n\n @lru_cache(maxsize=None)\n def generate_sample_points_relative(self, vdw_radius):\n \"\"\"\n Generate evenly distributed points in a series of shells around the point (0, 0, 0)\n This uses fibonacci spirals to produce an even spacing of points on a sphere.\n\n radius of points are between 1.4-2.0x the vdW radius\n :return: list of numpy arrays where each array is the xyz coordinates of a sample point.\n \"\"\"\n\n min_points_per_shell = 32\n shells = 5\n phi = PI * (3.0 - np.sqrt(5.0))\n\n relative_sample_points = []\n for shell in range(shells):\n shell += 1\n points_in_shell = min_points_per_shell * shell * shell\n # 1.4-2.0x the vdw_radius\n shell_radius = (1.4 + ((2.0 - 1.4) / shells) * shell) * vdw_radius\n\n for i in range(points_in_shell):\n y = 1 - (i / (points_in_shell - 1)) * 2\n y_rad = np.sqrt(1 - y * y) * shell_radius\n y *= shell_radius\n\n theta = i * phi\n\n x = np.cos(theta) * y_rad\n z = np.sin(theta) * y_rad\n\n relative_sample_points.append(np.array([x, y, z]))\n\n return relative_sample_points\n\n def generate_sample_points_atom(self, atom_index):\n \"\"\"\n * Get the vdw radius of the atom which is being analysed\n * Using the relative sample points generated from generate_sample_points_relative():\n * Offset all of the points by the position of the atom coords\n :param atom_index: index of the atom around which a v-site will be fit\n :return: list of numpy arrays where each array is the xyz coordinates of a sample point.\n \"\"\"\n\n atom = self.molecule.atoms[atom_index]\n atom_coords = self.coords[atom_index]\n vdw_radius = self.vdw_radii[atom.atomic_symbol]\n\n sample_points = self.generate_sample_points_relative(vdw_radius)\n for point in sample_points:\n point += atom_coords\n\n return sample_points\n\n def generate_esp_atom(self, atom_index):\n \"\"\"\n Using the multipole expansion, calculate the esp at each sample point around an atom.\n :param atom_index: The index of the atom being analysed.\n :return: Ordered list of esp values at each sample point around the atom.\n \"\"\"\n\n atom_coords = self.coords[atom_index]\n\n charge = self.ddec_data[atom_index].charge\n dip_data = self.dipole_moment_data[atom_index]\n dipole_moment = np.array([*dip_data.values()]) * BOHR_TO_ANGS\n\n quad_data = self.quadrupole_moment_data[atom_index]\n\n no_site_esps = []\n for point in self.sample_points:\n dist = Charges.xyz_distance(point, atom_coords)\n dist_vector = point - atom_coords\n\n mono_esp = Charges.monopole_esp_one_charge(charge, dist)\n dipo_esp = Charges.dipole_esp(dist_vector, dipole_moment, dist)\n\n m_tensor = Charges.quadrupole_moment_tensor(*quad_data.values())\n quad_esp = Charges.quadrupole_esp(dist_vector, m_tensor, dist)\n\n v_total = (mono_esp + dipo_esp + quad_esp) * M_TO_ANGS * J_TO_KCAL_P_MOL\n no_site_esps.append(v_total)\n\n return no_site_esps\n\n def generate_atom_mono_esp_two_charges(self, atom_index, site_charge, site_coords):\n \"\"\"\n With a virtual site, calculate the monopole esp at each sample point around an atom.\n :param atom_index: The index of the atom being analysed.\n :param site_charge: The charge of the virtual site.\n :param site_coords: numpy array of the xyz position of the virtual site.\n :return: Ordered list of esp values at each sample point around the atom.\n \"\"\"\n\n atom_coords = self.coords[atom_index]\n # New charge of the atom, having removed the v-site's charge.\n atom_charge = self.ddec_data[atom_index].charge - site_charge\n\n v_site_esps = []\n for point in self.sample_points:\n dist = Charges.xyz_distance(point, atom_coords)\n site_dist = Charges.xyz_distance(point, site_coords)\n\n mono_esp = Charges.monopole_esp_two_charges(atom_charge, site_charge, dist, site_dist)\n v_site_esps.append(mono_esp * M_TO_ANGS * J_TO_KCAL_P_MOL)\n\n return v_site_esps\n\n def generate_atom_mono_esp_three_charges(self, atom_index, q_a, q_b, site_a_coords, site_b_coords):\n \"\"\"\n Calculate the esp at each sample point when two virtual sites are placed around an atom.\n :param atom_index: The index of the atom being analysed.\n :param q_a: charge of v-site a\n :param q_b: charge of v-site b\n :param site_a_coords: coords of v-site a\n :param site_b_coords: coords of v-site b\n :return: ordered list of esp values at each sample point\n \"\"\"\n\n atom_coords = self.coords[atom_index]\n atom_charge = self.ddec_data[atom_index].charge - (q_a + q_b)\n\n v_site_esps = []\n for point in self.sample_points:\n dist = Charges.xyz_distance(point, atom_coords)\n site_a_dist = Charges.xyz_distance(point, site_a_coords)\n site_b_dist = Charges.xyz_distance(point, site_b_coords)\n\n mono_esp = Charges.monopole_esp_three_charges(atom_charge, q_a, q_b, dist, site_a_dist, site_b_dist)\n v_site_esps.append(mono_esp * M_TO_ANGS * J_TO_KCAL_P_MOL)\n\n return v_site_esps\n\n def get_vector_from_coords(self, atom_index, n_sites=1, alt=False):\n \"\"\"\n Given the coords of the atom which will have a v-site and its neighbouring atom(s) coords,\n calculate the vector along which the virtual site will sit.\n :param atom_index: The index of the atom being analysed.\n :param n_sites: The number of virtual sites being placed around the atom.\n :param alt: When placing two sites on an atom with two bonds, there are two placements.\n Is this the usual placement, or the alternative (rotated 90 degrees around the bisecting vector).\n :return Vector(s) along which the v-site will sit. (np array)\n \"\"\"\n\n atom = self.molecule.atoms[atom_index]\n atom_coords = self.coords[atom_index]\n\n # e.g. halogens\n if len(atom.bonds) == 1:\n bonded_index = atom.bonds[0] # [0] is used since bonds is a one item list\n bonded_coords = self.coords[bonded_index]\n r_ab = atom_coords - bonded_coords\n if n_sites == 1:\n return r_ab\n return r_ab, r_ab\n\n # e.g. oxygen\n if len(atom.bonds) == 2:\n bonded_index_b, bonded_index_c = atom.bonds\n bonded_coords_b = self.coords[bonded_index_b]\n bonded_coords_c = self.coords[bonded_index_c]\n r_ab = atom_coords - bonded_coords_b\n r_ac = atom_coords - bonded_coords_c\n if n_sites == 1:\n return r_ab + r_ac\n if alt:\n return (r_ab + r_ac), np.cross(r_ab, r_ac)\n return (r_ab + r_ac), np.cross((r_ab + r_ac), np.cross(r_ab, r_ac))\n\n # e.g. nitrogen\n if len(atom.bonds) == 3:\n bonded_index_b, bonded_index_c, bonded_index_d = atom.bonds\n bonded_coords_b = self.coords[bonded_index_b]\n bonded_coords_c = self.coords[bonded_index_c]\n bonded_coords_d = self.coords[bonded_index_d]\n r_vec = np.cross((bonded_coords_b - bonded_coords_c), (bonded_coords_d - bonded_coords_c))\n if n_sites == 1:\n return r_vec\n else:\n if atom.atomic_symbol == 'N':\n h_s = []\n for atom_index in atom.bonds:\n if self.molecule.atoms[atom_index].atomic_symbol == 'H':\n h_s.append(atom_index)\n # Special case (amine group); position is slightly different\n if len(h_s) == 2:\n h_a_coords = self.coords[h_s[0]]\n h_b_coords = self.coords[h_s[1]]\n r_ha = atom_coords - h_a_coords\n r_hb = atom_coords - h_b_coords\n\n return r_vec, r_ha + r_hb\n return r_vec, r_vec\n\n def esp_from_lambda_and_charge(self, atom_index, q, lam, vec):\n \"\"\"\n Place a v-site at the correct position along the vector by scaling according to the lambda\n calculate the esp from the atom and the v-site.\n :param atom_index: index of the atom with a virtual site to be fit to\n :param q: charge of the virtual site\n :param lam: scaling of the vector along which the v-site sits\n :param vec: the vector along which the v-site sits\n :return: Ordered list of esp values at each sample point\n \"\"\"\n\n # This is the current position of the v-site (moved by the fit() method)\n site_coords = (vec * lam) + self.coords[atom_index]\n return self.generate_atom_mono_esp_two_charges(atom_index, q, site_coords)\n\n def sites_coords_from_vecs_and_lams(self, atom_index, lam_a, lam_b, vec_a, vec_b):\n \"\"\"\n Get the two virtual site coordinates from the vectors they sit along and the atom they are attached to.\n :param atom_index: The index of the atom being analysed.\n :param lam_a: scale factor for vec_a\n :param lam_b: scale factor for vec_b\n :param vec_a: vector deciding virtual site position\n :param vec_b: vector deciding virtual site position\n :return: tuple of np arrays which are the xyz coordinates of the v-sites\n \"\"\"\n\n if len(self.molecule.atoms[atom_index].bonds) == 2:\n site_a_coords = (vec_a * lam_a) + (vec_b * lam_b) + self.coords[atom_index]\n site_b_coords = (vec_a * lam_a) - (vec_b * lam_b) + self.coords[atom_index]\n else:\n site_a_coords = (vec_a * lam_a) + self.coords[atom_index]\n site_b_coords = (vec_b * lam_b) + self.coords[atom_index]\n\n return site_a_coords, site_b_coords\n\n def esp_from_lambdas_and_charges(self, atom_index, q_a, q_b, lam_a, lam_b, vec_a, vec_b):\n \"\"\"\n Place v-sites at the correct positions along the vectors by scaling according to the lambdas\n calculate the esp from the atom and the v-sites.\n :param atom_index: The index of the atom being analysed.\n :param q_a: charge of v-site a\n :param q_b: charge of v-site b\n :param lam_a: scale factor for vec_a\n :param lam_b: scale factor for vec_b\n :param vec_a: vector deciding virtual site position\n :param vec_b: vector deciding virtual site position\n :return: Ordered list of esp values at each sample point\n \"\"\"\n\n site_a_coords, site_b_coords = self.sites_coords_from_vecs_and_lams(atom_index, lam_a, lam_b, vec_a, vec_b)\n\n return self.generate_atom_mono_esp_three_charges(atom_index, q_a, q_b, site_a_coords, site_b_coords)\n\n def fit(self, atom_index, max_err=1.005):\n \"\"\"\n * Take the atom which will have a v-site fit around it\n * Calculate all possible vectors depending on 1 site, 2 site, rot by 90 deg etc\n * Fit\n * Store all v-site coords (one site, two sites)\n * Which fit had lowest error?\n :param max_err: If the addition of a v-site only reduces the error by a factor of max_err, ignore it.\n :param atom_index: The index of the atom being analysed.\n \"\"\"\n\n def one_site_objective_function(q_lam, vec):\n site_esps = self.esp_from_lambda_and_charge(atom_index, *q_lam, vec)\n error = sum(abs(no_site_esp - site_esp)\n for no_site_esp, site_esp in zip(self.no_site_esps, site_esps))\n return error\n\n def two_sites_objective_function(q_q_lam_lam, vec_a, vec_b):\n site_esps = self.esp_from_lambdas_and_charges(atom_index, *q_q_lam_lam, vec_a, vec_b)\n error = sum(abs(no_site_esp - site_esp)\n for no_site_esp, site_esp in zip(self.no_site_esps, site_esps))\n return error\n\n bounds = ((-1.0, 1.0), (-1.0, 1.0), (0.01, 0.5), (0.01, 0.5))\n n_sample_points = len(self.no_site_esps)\n\n # No site\n vec = self.get_vector_from_coords(atom_index, n_sites=1)\n no_site_error = one_site_objective_function((0, 1), vec)\n self.site_errors[0] = no_site_error / n_sample_points\n\n # One site\n one_site_fit = minimize(one_site_objective_function, np.array([0, 1]), args=vec, bounds=bounds[1:3])\n self.site_errors[1] = one_site_fit.fun / n_sample_points\n q, lam = one_site_fit.x\n one_site_coords = [((vec * lam) + self.coords[atom_index], q, atom_index)]\n self.one_site_coords = one_site_coords\n\n # Two sites (first orientation)\n vec_a, vec_b = self.get_vector_from_coords(atom_index, n_sites=2)\n two_site_fit = minimize(two_sites_objective_function, np.array([0, 0, 1, 1]), args=(vec_a, vec_b),\n bounds=bounds)\n self.site_errors[2] = two_site_fit.fun / n_sample_points\n q_a, q_b, lam_a, lam_b = two_site_fit.x\n site_a_coords, site_b_coords = self.sites_coords_from_vecs_and_lams(atom_index, lam_a, lam_b, vec_a, vec_b)\n two_site_coords = [(site_a_coords, q_a, atom_index), (site_b_coords, q_b, atom_index)]\n self.two_site_coords = two_site_coords\n\n # Two sites (alternative orientation)\n if len(self.molecule.atoms[atom_index].bonds) == 2:\n vec_a, vec_b = self.get_vector_from_coords(atom_index, n_sites=2, alt=True)\n alt_two_site_fit = minimize(two_sites_objective_function, np.array([0, 0, 1, 1]), args=(vec_a, vec_b),\n bounds=bounds)\n self.site_errors[2] = alt_two_site_fit.fun / n_sample_points\n q_a, q_b, lam_a, lam_b = alt_two_site_fit.x\n site_a_coords, site_b_coords = self.sites_coords_from_vecs_and_lams(atom_index, lam_a, lam_b, vec_a, vec_b)\n alt_two_site_coords = [(site_a_coords, q_a, atom_index), (site_b_coords, q_b, atom_index)]\n self.two_site_coords = alt_two_site_coords\n\n if self.site_errors[0] < min(self.site_errors[1] * max_err, self.site_errors[2] * max_err):\n print('No virtual site placement has reduced the error significantly.')\n elif self.site_errors[1] < self.site_errors[2] * max_err:\n print('The addition of one virtual site was found to be best.')\n self.v_sites_coords.extend(self.one_site_coords)\n else:\n print('The addition of two virtual sites was found to be best.')\n self.v_sites_coords.extend(self.two_site_coords)\n\n print(\n f'Errors (kcal/mol):\\n'\n f'No Site One Site Two Sites\\n'\n f'{self.site_errors[0]:.4f} {self.site_errors[1]:.4f} {self.site_errors[2]:.4f}'\n )\n\n def plot(self):\n \"\"\"\n Figure with three subplots.\n All plots show the atoms and bonds as balls and sticks; virtual sites are x's; sample points are dots.\n * Plot showing the positions of the sample points.\n * Plot showing the position of a single virtual site.\n * Plot showing the positions of two virtual sites.\n Errors are included to show the impact of virtual site placements.\n \"\"\"\n\n fig = plt.figure(figsize=plt.figaspect(0.33), tight_layout=True)\n # fig.suptitle('Virtual Site Placements', fontsize=20)\n\n norm = plt.Normalize(vmin=-1.0, vmax=1.0)\n cmap = 'cool'\n\n samp_plt = fig.add_subplot(1, 3, 1, projection='3d')\n one_plt = fig.add_subplot(1, 3, 2, projection='3d')\n two_plt = fig.add_subplot(1, 3, 3, projection='3d')\n\n plots = [samp_plt, one_plt, two_plt]\n # Add atom positions to all subplots\n for plot in plots:\n plot.scatter(\n xs=[i[0][0] for i in self.atom_points],\n ys=[i[0][1] for i in self.atom_points],\n zs=[i[0][2] for i in self.atom_points],\n c=[i[1] for i in self.atom_points],\n marker='o',\n s=200,\n cmap=cmap,\n norm=norm,\n )\n\n # Plot the bonds as connecting lines\n for bond in self.molecule.topology.edges:\n plot.plot(\n xs=[self.coords[bond[0]][0], self.coords[bond[1]][0]],\n ys=[self.coords[bond[0]][1], self.coords[bond[1]][1]],\n zs=[self.coords[bond[0]][2], self.coords[bond[1]][2]],\n c='darkslategrey',\n alpha=0.5\n )\n\n # Left subplot contains the sample point positions\n samp_plt.scatter(\n xs=[i[0] for i in self.sample_points],\n ys=[i[1] for i in self.sample_points],\n zs=[i[2] for i in self.sample_points],\n c='darkslategrey',\n marker='o',\n s=5\n )\n samp_plt.title.set_text(f'Sample Points Positions\\nError: {self.site_errors[0]: .5}')\n\n # Centre subplot contains the single v-site\n one_plt.scatter(\n xs=[i[0][0] for i in self.one_site_coords],\n ys=[i[0][1] for i in self.one_site_coords],\n zs=[i[0][2] for i in self.one_site_coords],\n c=[i[1] for i in self.one_site_coords],\n marker='x',\n s=200,\n cmap=cmap,\n norm=norm,\n )\n one_plt.title.set_text(f'One Site Position\\nError: {self.site_errors[1]: .5}')\n\n # Right subplot contains the two v-sites\n two_plt.scatter(\n xs=[i[0][0] for i in self.two_site_coords],\n ys=[i[0][1] for i in self.two_site_coords],\n zs=[i[0][2] for i in self.two_site_coords],\n c=[i[1] for i in self.two_site_coords],\n marker='x',\n s=200,\n cmap=cmap,\n norm=norm,\n )\n error = self.site_errors[2]\n two_plt.title.set_text(f'Two Sites Positions\\nError: {error: .5}')\n\n sm = ScalarMappable(norm=norm, cmap=cmap)\n sm.set_array([])\n cbar = fig.colorbar(sm)\n cbar.ax.set_title('charge')\n\n plt.tight_layout()\n plt.savefig(f'{self.molecule.name}_virtual_sites.png')\n\n def write_xyz(self):\n \"\"\"\n Write an xyz file containing the atom and virtual site coordinates.\n \"\"\"\n\n with open(f'{self.molecule.name}.xyz', 'w+') as xyz_file:\n xyz_file.write(\n f'{len(self.molecule.atoms) + len(self.v_sites_coords)}\\n'\n f'xyz file generated with QUBEKit. '\n f'Error with v-site: {min(self.site_errors.values()): .5f} kcal/mol\\n'\n )\n for i, atom in enumerate(self.coords):\n xyz_file.write(\n f'{self.molecule.atoms[i].atomic_symbol} {atom[0]: .10f} {atom[1]: .10f} {atom[2]: .10f}'\n f' {self.molecule.atoms[i].partial_charge}\\n')\n\n for site in self.v_sites_coords:\n if site[2] == i:\n xyz_file.write(\n f'X {site[0][0]: .10f} {site[0][1]: .10f} {site[0][2]: .10f} {site[1]: .10f}\\n')\n","sub_path":"QUBEKit/virtual_sites.py","file_name":"virtual_sites.py","file_ext":"py","file_size_in_byte":26319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"107784781","text":"# argp@census-labs.com\r\n\r\nimport idautils\r\nimport idaapi\r\nimport ida_idaapi\r\nimport ida_search\r\nimport ida_funcs\r\nimport ida_segment\r\nimport ida_bytes\r\nimport ida_idp\r\nimport idc\r\nimport struct\r\n\r\ntrue = True\r\nfalse = False\r\nnone = None\r\n\r\nkp_flag = false\r\n\r\ntry:\r\n import keypatch\r\n kp_flag = true\r\nexcept:\r\n pass\r\n\r\nprologues = [\"7F 23 03 D5\", \"BD A9\", \"BF A9\"]\r\n\r\ndef find_panic(base_ea):\r\n pk_ea = ida_search.find_text(base_ea, 1, 1, \"double panic in \", ida_search.SEARCH_DOWN)\r\n\r\n if pk_ea != ida_idaapi.BADADDR:\r\n for xref in idautils.XrefsTo(pk_ea):\r\n func = idaapi.get_func(xref.frm)\r\n print(\"\\t[+] _panic = 0x%x\" % (func.start_ea))\r\n idc.set_name(func.start_ea, \"_panic\", idc.SN_CHECK)\r\n return func.start_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_image4_load(base_ea):\r\n ea_list = ida_search.find_imm(base_ea, ida_search.SEARCH_DOWN, 0x4D650000)\r\n\r\n if ea_list[0] != ida_idaapi.BADADDR:\r\n func_ea = ida_funcs.get_func(ea_list[0]).start_ea\r\n print(\"\\t[+] _image4_load = 0x%x\" % (func_ea))\r\n idc.set_name(func_ea, \"_image4_load\", idc.SN_CHECK)\r\n return func_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_img4decodeinit(base_ea):\r\n ea_list = ida_search.find_imm(base_ea, ida_search.SEARCH_DOWN, 0x494D0000)\r\n\r\n if ea_list[0] != ida_idaapi.BADADDR:\r\n func_ea = ida_funcs.get_func(ea_list[0]).start_ea\r\n ea_func_list = list(idautils.XrefsTo(func_ea))\r\n\r\n if ea_func_list[0].frm != ida_idaapi.BADADDR:\r\n i4d_ea = ida_funcs.get_func(ea_func_list[0].frm).start_ea\r\n print(\"\\t[+] _Img4DecodeInit = 0x%x\" % (i4d_ea))\r\n idc.set_name(i4d_ea, \"_Img4DecodeInit\", idc.SN_CHECK)\r\n return i4d_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_aes_crypto_cmd(base_ea):\r\n aes_ea = ida_search.find_text(base_ea, 1, 1, \"aes_crypto_cmd\", ida_search.SEARCH_DOWN)\r\n\r\n if aes_ea != ida_idaapi.BADADDR:\r\n for xref in idautils.XrefsTo(aes_ea):\r\n func = idaapi.get_func(xref.frm)\r\n print(\"\\t[+] _aes_crypto_cmd = 0x%x\" % (func.start_ea))\r\n idc.set_name(func.start_ea, \"_aes_crypto_cmd\", idc.SN_CHECK)\r\n return func.start_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_main_task(base_ea):\r\n du_ea = ida_search.find_text(base_ea, 1, 1, \"debug-uarts\", ida_search.SEARCH_DOWN)\r\n\r\n if du_ea != ida_idaapi.BADADDR:\r\n for xref in idautils.XrefsTo(du_ea):\r\n func = idaapi.get_func(xref.frm)\r\n print(\"\\t[+] _main_task = 0x%x\" % (func.start_ea))\r\n idc.set_name(func.start_ea, \"_main_task\", idc.SN_CHECK)\r\n return func.start_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_boot_check_panic(base_ea, base_end_ea):\r\n seq_ea = ida_search.find_binary(base_ea, base_end_ea, \"1F ?? 03 71\", 16, ida_search.SEARCH_DOWN)\r\n\r\n if seq_ea != ida_idaapi.BADADDR:\r\n func = idaapi.get_func(seq_ea)\r\n print(\"\\t[+] _boot_check_panic = 0x%x\" % (func.start_ea))\r\n idc.set_name(func.start_ea, \"_boot_check_panic\", idc.SN_CHECK)\r\n return func.start_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_update_device_tree(base_ea):\r\n udt_ea = ida_search.find_text(base_ea, 1, 1, \"development-cert\", ida_search.SEARCH_DOWN)\r\n\r\n if udt_ea != ida_idaapi.BADADDR:\r\n for xref in idautils.XrefsTo(udt_ea):\r\n func = idaapi.get_func(xref.frm)\r\n print(\"\\t[+] _UpdateDeviceTree = 0x%x\" % (func.start_ea))\r\n idc.set_name(func.start_ea, \"_UpdateDeviceTree\", idc.SN_CHECK)\r\n return func.start_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_macho_valid(base_ea):\r\n ea_list = ida_search.find_imm(base_ea, ida_search.SEARCH_DOWN, 0xFACF)\r\n\r\n if ea_list[0] == ida_idaapi.BADADDR:\r\n ea_list = ida_search.find_imm(base_ea, ida_search.SEARCH_DOWN, 0xFEEDFACF)\r\n \r\n if ea_list[0] != ida_idaapi.BADADDR:\r\n func_ea = ida_funcs.get_func(ea_list[0]).start_ea\r\n print(\"\\t[+] _macho_valid = 0x%x\" % (func_ea))\r\n idc.set_name(func_ea, \"_macho_valid\", idc.SN_CHECK)\r\n return func_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_loaded_kernelcache(ea):\r\n ea_list = list(idautils.XrefsTo(ea))\r\n\r\n if ea_list[0].frm != ida_idaapi.BADADDR:\r\n func_ea = ida_funcs.get_func(ea_list[0].frm).start_ea\r\n print(\"\\t[+] _loaded_kernelcache = 0x%x\" % (func_ea))\r\n idc.set_name(func_ea, \"_loaded_kernelcache\", idc.SN_CHECK)\r\n return func_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_load_kernelcache(ea):\r\n ea_list = list(idautils.XrefsTo(ea))\r\n\r\n if ea_list[0].frm != ida_idaapi.BADADDR:\r\n func_ea = ida_funcs.get_func(ea_list[0].frm).start_ea\r\n print(\"\\t[+] _load_kernelcache = 0x%x\" % (func_ea))\r\n idc.set_name(func_ea, \"_load_kernelcache\", idc.SN_CHECK)\r\n return func_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_do_go(base_ea):\r\n str_ea = ida_search.find_text(base_ea, 1, 1, \"Memory image not valid\", ida_search.SEARCH_DOWN)\r\n\r\n if str_ea != ida_idaapi.BADADDR:\r\n for xref in idautils.XrefsTo(str_ea):\r\n func = idaapi.get_func(xref.frm)\r\n print(\"\\t[+] _do_go = 0x%x\" % (func.start_ea))\r\n idc.set_name(func.start_ea, \"_do_go\", idc.SN_CHECK)\r\n return func.start_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_pmgr_binning_mode_get_value(base_ea):\r\n str_ea = ida_search.find_text(base_ea, 1, 1, \"Invalid low\", ida_search.SEARCH_DOWN)\r\n\r\n if str_ea != ida_idaapi.BADADDR:\r\n for xref in idautils.XrefsTo(str_ea):\r\n func = idaapi.get_func(xref.frm)\r\n print(\"\\t[+] _pmgr_binning_mode_get_value = 0x%x\" % (func.start_ea))\r\n idc.set_name(func.start_ea, \"_pmgr_binning_mode_get_value\", idc.SN_CHECK)\r\n return func.start_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_do_printf(base_ea):\r\n str_ea = ida_search.find_text(base_ea, 1, 1, \"\", ida_search.SEARCH_DOWN)\r\n\r\n if str_ea != ida_idaapi.BADADDR:\r\n for xref in idautils.XrefsTo(str_ea):\r\n func = idaapi.get_func(xref.frm)\r\n print(\"\\t[+] _do_printf = 0x%x\" % (func.start_ea))\r\n idc.set_name(func.start_ea, \"_do_printf\", idc.SN_CHECK)\r\n return func.start_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_image4_get_partial(base_ea):\r\n str_ea = idc.get_name_ea_simple(\"aImg4\")\r\n\r\n if str_ea != ida_idaapi.BADADDR:\r\n aimg4_ea = list(idautils.XrefsTo(str_ea))[0].frm\r\n\r\n if aimg4_ea == ida_idaapi.BADADDR:\r\n return ida_idaapi.BADADDR\r\n\r\n func = idaapi.get_func(aimg4_ea)\r\n print(\"\\t[+] _image4_get_partial = 0x%x\" % (func.start_ea))\r\n idc.set_name(func.start_ea, \"_image4_get_partial\", idc.SN_CHECK)\r\n return func.start_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_putchar(base_ea):\r\n str_ea = idc.get_name_ea_simple(\"aPanic\")\r\n\r\n if str_ea != ida_idaapi.BADADDR:\r\n apanic_ea = list(idautils.XrefsTo(str_ea))[0].frm\r\n\r\n if apanic_ea == ida_idaapi.BADADDR:\r\n return ida_idaapi.BADADDR\r\n\r\n opnd0 = idc.print_operand(apanic_ea + 8, 0)\r\n ins_str = idc.print_insn_mnem(apanic_ea + 8)\r\n\r\n if ins_str == \"BL\":\r\n func_ea = idc.get_name_ea_simple(opnd0)\r\n ea = func_ea\r\n\r\n while ea != ida_idaapi.BADADDR:\r\n ins_str = idc.print_insn_mnem(ea)\r\n \r\n if ins_str == \"ADD\":\r\n opnd2 = idc.print_operand(ea, 2)\r\n \r\n if opnd2 == \"#1\":\r\n ins_ea = ea - 4\r\n opnd0 = idc.print_operand(ins_ea, 0)\r\n ins_str = idc.print_insn_mnem(ins_ea)\r\n\r\n if ins_str == \"BL\":\r\n pc_ea = idc.get_name_ea_simple(opnd0)\r\n print(\"\\t[+] _putchar = 0x%x\" % (pc_ea))\r\n idc.set_name(pc_ea, \"_putchar\", idc.SN_CHECK)\r\n return pc_ea\r\n\r\n ea = ea + 4\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_macho_load(base_ea):\r\n pz_ea = idc.get_name_ea_simple(\"aPagezero\")\r\n\r\n if pz_ea != ida_idaapi.BADADDR:\r\n if len(list(idautils.XrefsTo(pz_ea))) != 3:\r\n return ida_idaapi.BADADDR\r\n\r\n func1_ea = idaapi.get_func(list(idautils.XrefsTo(pz_ea))[0].frm).start_ea\r\n func2_ea = idaapi.get_func(list(idautils.XrefsTo(pz_ea))[1].frm).start_ea\r\n func3_ea = idaapi.get_func(list(idautils.XrefsTo(pz_ea))[2].frm).start_ea\r\n\r\n if func2_ea != func3_ea:\r\n return ida_idaapi.BADADDR\r\n\r\n if func1_ea != func2_ea:\r\n print(\"\\t[+] _macho_load = 0x%x\" % (func2_ea))\r\n idc.set_name(func2_ea, \"_macho_load\", idc.SN_CHECK)\r\n return func2_ea\r\n\r\n return ida_idaapi.BADADDR\r\n\r\ndef find_interesting(base_ea, base_end):\r\n mv_ea = find_macho_valid(base_ea)\r\n\r\n if mv_ea != ida_idaapi.BADADDR:\r\n ldk_ea = find_loaded_kernelcache(mv_ea)\r\n lk_ea = find_load_kernelcache(ldk_ea)\r\n \r\n pk_ea = find_panic(base_ea)\r\n go_ea = find_do_go(base_ea)\r\n pr_ea = find_do_printf(base_ea)\r\n i4l_ea = find_image4_load(base_ea)\r\n i4d_ea = find_img4decodeinit(base_ea)\r\n aes_ea = find_aes_crypto_cmd(base_ea)\r\n udt_ea = find_update_device_tree(base_ea)\r\n ml_ea = find_macho_load(base_ea)\r\n pgv_ea = find_pmgr_binning_mode_get_value(base_ea)\r\n i4p_ea = find_image4_get_partial(base_ea)\r\n mt_ea = find_main_task(base_ea)\r\n bc_ea = find_boot_check_panic(base_ea, base_end)\r\n\r\n pc_ea = find_putchar(base_ea)\r\n\r\n if pc_ea != ida_idaapi.BADADDR and mv_ea == ida_idaapi.BADADDR:\r\n # this is a SecureROM image\r\n segm = ida_segment.getseg(base_ea)\r\n\r\n if segm:\r\n idaapi.set_segm_name(segm, \"SecureROM\", 0)\r\n print(\"[+] Identified as a SecureROM image\")\r\n\r\ndef accept_file(fd, fname):\r\n version = 0\r\n ret = 0\r\n\r\n if type(fname) == str:\r\n fd.seek(0x280)\r\n ver_str = fd.read(0x20)\r\n\r\n try:\r\n # Python 3.x.\r\n label = \"\".join(map(chr, ver_str[:5]))\r\n except TypeError:\r\n # Python 2.x.\r\n label = ver_str[:5]\r\n\r\n if \"iBoot\" == label:\r\n version = ver_str[6:] # for later\r\n ret = {\"format\" : \"iBoot (AArch64)\", \"processor\" : \"arm\"}\r\n\r\n return ret\r\n\r\ndef load_file(fd, neflags, format):\r\n global prologues\r\n size = 0\r\n base_addr = 0\r\n ea = 0\r\n nfunc = 0\r\n\r\n idaapi.set_processor_type(\"arm\", ida_idp.SETPROC_LOADER_NON_FATAL)\r\n idaapi.get_inf_structure().lflags |= idaapi.LFLG_64BIT\r\n \r\n if (neflags & idaapi.NEF_RELOAD) != 0:\r\n return 1\r\n\r\n fd.seek(0, idaapi.SEEK_END)\r\n size = fd.tell()\r\n\r\n segm = idaapi.segment_t()\r\n segm.bitness = 2 # 64-bit\r\n segm.start_ea = 0\r\n segm.end_ea = size\r\n idaapi.add_segm_ex(segm, \"iBoot\", \"CODE\", idaapi.ADDSEG_OR_DIE)\r\n\r\n fd.seek(0)\r\n fd.file2base(0, 0, size, false)\r\n\r\n idaapi.add_entry(0, 0, \"start\", 1)\r\n ida_funcs.add_func(ea)\r\n\r\n print(\"[+] Marked as code\")\r\n\r\n # heuristic\r\n while(true):\r\n mnemonic = idc.print_insn_mnem(ea)\r\n \r\n if \"LDR\" in mnemonic:\r\n base_str = idc.print_operand(ea, 1)\r\n base_addr = int(base_str.split(\"=\")[1], 16)\r\n \r\n break\r\n\r\n ea += 4\r\n\r\n print(\"[+] Rebasing to address 0x%x\" % (base_addr))\r\n idaapi.rebase_program(base_addr, idc.MSF_NOFIX)\r\n\r\n segment_start = base_addr\r\n segment_end = idc.get_segm_attr(segment_start, idc.SEGATTR_END)\r\n\r\n ea = segment_start\r\n\r\n print(\"[+] Searching and defining functions\")\r\n\r\n for prologue in prologues:\r\n while ea != ida_idaapi.BADADDR:\r\n ea = ida_search.find_binary(ea, segment_end, prologue, 16, ida_search.SEARCH_DOWN)\r\n \r\n if ea != ida_idaapi.BADADDR:\r\n if len(prologue) < 8:\r\n ea = ea - 2\r\n\r\n if (ea % 4) == 0 and ida_bytes.get_full_flags(ea) < 0x200:\r\n # print(\"[+] Defining a function at 0x%x\" % (ea))\r\n ida_funcs.add_func(ea)\r\n nfunc = nfunc + 1\r\n\r\n ea = ea + 4\r\n \r\n idc.plan_and_wait(segment_start, segment_end)\r\n\r\n print(\"[+] Identified %d new functions\" % (nfunc))\r\n\r\n print(\"[+] Looking for interesting functions\")\r\n find_interesting(segment_start, segment_end)\r\n\r\n return 1\r\n\r\n# EOF\r\n","sub_path":"iBoot64helper.py","file_name":"iBoot64helper.py","file_ext":"py","file_size_in_byte":12534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"218536659","text":"\"\"\"\r\n--- Day 1: Chronal Calibration ---\r\n\r\n\"\"\"\r\nfrom utils import load_input\r\n\r\n\r\ndef main():\r\n lines = load_input('https://pastebin.com/raw/ebDxSugK')\r\n print(part_one(lines))\r\n print(part_two(lines))\r\n\r\n\r\ndef part_one(iterable):\r\n return sum([int(change) for change in iterable])\r\n\r\n\r\ndef part_two(iterable):\r\n changes = [int(i) for i in iterable]\r\n\r\n frequencies = set()\r\n current_frequency = int()\r\n\r\n while True:\r\n for change in changes:\r\n current_frequency += change\r\n if current_frequency in frequencies:\r\n return current_frequency\r\n frequencies.add(current_frequency)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"Advent of Code/2018/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"172379740","text":"import matplotlib.pyplot as plt\nfrom nltk.corpus import webtext, stopwords\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.probability import FreqDist\nfrom wordcloud import WordCloud\n\n\ndef main():\n print('*' * 80)\n\n # текст форума firefox\n firefox_words = webtext.words('firefox.txt')\n\n tokenizer = RegexpTokenizer(r'\\w+')\n\n # токены в нижнем регистре, без английских stop words\n firefox_tokens = [\n token.lower() for token in tokenizer.tokenize(\n webtext.raw('firefox.txt')\n ) if token not in stopwords.words('english')\n ]\n\n print('-' * 80)\n print(str.format(\n '[*] words count in firefox forum: {}', len(firefox_words)\n ))\n print(str.format('[*] firefox tokens count: {}', len(firefox_tokens)))\n\n print('-' * 80)\n\n # частотное распределение (frequency distribution) в форуме Firefox\n freq_dist = FreqDist(token for token in firefox_tokens)\n\n print(str.format('[*] the most frequency term: \"{}\"', freq_dist.max()))\n # print(str.format('[*] the most frequency term: \"{}\"', freq_dist.min()))\n\n print(str.format(\n '[*] frequency distribution for \"firefox\" word: {}',\n freq_dist.freq('firefox')\n ))\n\n # визуализация 10 наиболее частых токенов\n freq_dist.plot(10, title='Firefox top 10 tokens')\n\n print('-' * 80)\n # создаем экземпляр world cloud\n world_cloud = WordCloud()\n world_cloud_firefox = world_cloud.generate_from_frequencies(freq_dist)\n plt.imshow(world_cloud_firefox, interpolation='bilinear')\n plt.axis('off')\n\n # создаем .png файл\n plt.show()\n\n print('*' * 80)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"nltk_package/probability_module/app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"267228971","text":"from flexget import plugin\n\nfrom .site_base import SiteBase\n\n\nclass NexusPHP(SiteBase):\n @staticmethod\n def build_sign_in_entry(entry, site_name, config, url, succeed_regex, base_url=None,\n wrong_regex=None):\n site_config = entry['site_config']\n if not isinstance(site_config, str):\n raise plugin.PluginError('{} site_config is not a String'.format(site_name))\n entry['url'] = url\n entry['succeed_regex'] = succeed_regex\n if base_url:\n entry['base_url'] = base_url\n if wrong_regex:\n entry['wrong_regex'] = wrong_regex\n headers = {\n 'cookie': site_config,\n 'user-agent': config.get('user-agent'),\n 'referer': base_url if base_url else url\n }\n entry['headers'] = headers\n\n def sign_in(self, entry, config):\n self.sign_in_by_get(entry, config)\n\n def get_message(self, entry, config):\n self.get_nexusphp_message(entry, config)\n","sub_path":"ptsites/nexusphp.py","file_name":"nexusphp.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"468148343","text":"import PIL.Image as img\nimport os\nimport shutil\nfrom tkinter import *\nimport tkinter.filedialog\n\n\ndef makeImg(oPath,fname):\n fname = fname[:-4]\n npcdat=\"E:/cqres-new/cqres/web_test/_out/assets/icon/neiguan/android/\"\n npcPng = \"E:/cqres-new/cqres/web_test/_out/assets/icon/neiguan/__out/android/\"\n if (os.path.exists(npcdat + fname)):\n print(\"\")\n else:\n os.makedirs(npcdat + fname)\n\n if (os.path.exists(npcPng + fname)):\n print(\"\")\n else:\n os.makedirs(npcPng + fname)\n\n if(os.path.exists(npcdat+\"idle.dat\")):\n os.remove(npcdat+\"idle.dat\")\n shutil.copy(oPath+\"/\"+fname+\".dat\", npcdat+fname+\"/idle.dat\")\n\n if (os.path.exists(npcPng + \"idle.png\")):\n os.remove(npcPng + \"idle.png\")\n shutil.copy(oPath + \"/\" + fname + \".png\", npcPng + fname + \"/idle.png\")\n\n\n\n\ndef xz():\n filename = tkinter.filedialog.askdirectory()\n sPath = filename+\"/\"\n lb.config(text=\"您选择的文件是:\" + sPath)\n dirList = os.listdir(sPath)\n for dirName in dirList:\n makeImg(sPath,dirName)\n lb.config(text=\"图片处理完毕\")\n\n\n\nroot = Tk()\nroot.title(\"拷贝资源\")\nroot.geometry('500x300')\n\nlb = Label(root,text = '点击按钮选择文件夹')\n# lb.place(x=20,y=100)\nlb.pack()\nbtn = Button(root,text=\"选择文件夹开始\",command=xz)\nbtn.place(x=200,y=200)\nroot.mainloop()","sub_path":"tool/CopyOutFile.py","file_name":"CopyOutFile.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"367236681","text":"import glob\nimport os\nimport re\nfrom multiprocessing import Process\nimport DiscoverApi\nfrom DiscoverDb import DiscoverDb\n\ndbPath = 'c:\\\\bin\\\\Discover\\\\database\\\\'\nfiles = [f for f in glob.glob(dbPath + \"**/*\", recursive=True)]\nwordRgx = r\"([A-Za-z0-9']+(\\b[\\,\\.?!])?)\"\ninfoRgx = r\"_{2,}\"\n\ndef processFile(filepath, lyricObjIndex):\n db = DiscoverDb()\n (_, headers) = DiscoverApi.requestAccessToken()\n with open(filepath, 'r') as f:\n song = { 'artist': '', 'name': '', 'lyrics': [] }\n for line in f:\n if re.match(infoRgx, line): # finished processing lyrics, now process song metadata\n _, name = [word.strip() for word in f.readline().split(' ')]\n _, artist = [word.strip() for word in f.readline().split(' ')]\n while (_ != \"Artist\"):\n _, artist = [word.strip() for word in f.readline().split(' ')]\n song['name'] = name\n song['artist'] = artist\n break\n else:\n for word in re.findall(wordRgx, line):\n song['lyrics'].append(word[0])\n f.close()\n name = song['name']\n artist = song['artist']\n lyrics = song['lyrics']\n spotifyId = DiscoverApi.searchSpotifyForSongId(headers, name, artist)\n if (spotifyId is not None):\n trackAttributes = DiscoverApi.getTrackAttributes(headers, spotifyId)\n db.storeLyricData(spotifyId, lyrics)\n db.storeSongData(spotifyId, name, artist, lyricObjIndex, 'lyrics-master')\n db.storeTrackData(trackAttributes)\n print(name, 'by', artist, 'stored')\n else:\n print(name, 'by', artist, 'could not be found on spotify.')\n db.shutdown()\n\nif __name__ == '__main__':\n lyricObjIndex = 0\n for file in files:\n if not os.path.isdir(file):\n lyricObjIndex += 1\n p = Process(target=processFile, args=(file, lyricObjIndex))\n p.start()\n p.join()","sub_path":"lyrics.py","file_name":"lyrics.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636482020","text":"import matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\nimport scipy.io as sio\r\nimport sys\r\n\r\n\r\nimport numpy as np\r\nfrom PyQt5.QtWidgets import QCalendarWidget, QFontDialog, QColorDialog, QTextEdit, QFileDialog, \\\r\n QCheckBox, QLabel,QComboBox,QPushButton, QGridLayout, QMainWindow, QWidget, QLineEdit, QMessageBox, QVBoxLayout,\\\r\n QHBoxLayout, QAction\r\nfrom PyQt5.QtCore import QCoreApplication, Qt\r\n\r\n\r\nfrom matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5\r\nif is_pyqt5():\r\n from matplotlib.backends.backend_qt5agg import (\r\n FigureCanvas, NavigationToolbar2QT as NavigationToolbar)\r\nelse:\r\n from matplotlib.backends.backend_qt4agg import (\r\n FigureCanvas, NavigationToolbar2QT as NavigationToolbar)\r\nfrom matplotlib.figure import Figure\r\n\r\n\r\n\r\n\r\n#################GAGE 12/26/18#####################\r\n\r\nclass File:\r\n def __init__(self):\r\n #Open dialog window to select file path\r\n self.file_path = QFileDialog.getOpenFileName(None, \"Open File\", \"/home\", \"Matlab Files (*.mat)\")[0]\r\n if self.file_path != \"\":\r\n #Load .mat file from file path and store contents\r\n self.mat_contents = sio.loadmat(self.file_path)\r\n #Create a list of channel sets from .mat file contents\r\n self.ecog_SETS = list(self.mat_contents.keys())\r\n\r\nclass channel:\r\n def __init__(self, ch_select, set_select, File):\r\n #Create an array of y values based on the chosen channel collection and individual channel\r\n self.y = File.mat_contents.get(File.ecog_SETS[set_select])[:,ch_select]\r\n #Create an array of x values based on the length of y values\r\n self.x = range(0,len(self.y))\r\n def plot(self):\r\n plt.plot (self.x,self.y)\r\n def gap(self,y_gap):\r\n self.y = self.y + y_gap\r\n\r\nclass ApplicationWindow(QMainWindow):\r\n def __init__(self):\r\n QMainWindow.__init__(self)\r\n self.initUI()\r\n self.setWindowTitle(\"Graphing Application\")\r\n\r\n def initUI(self):\r\n centralwidget = QWidget()\r\n\r\n # create submenu option for later reference\r\n fileDiaglogue = QAction('&Open...', self)\r\n\r\n # create instance of menu bar\r\n mainMenu = self.menuBar()\r\n # create menu option for opening file\r\n fileMenu = mainMenu.addMenu('&File')\r\n # add submenu option to tool bar\r\n fileMenu.addAction(fileDiaglogue)\r\n # connect openFile method to submenu selection\r\n fileDiaglogue.triggered.connect(self.openFile)\r\n\r\n # create dropdown menu gui\r\n self.dropLabel = QLabel('Channel Set', self)\r\n self.comboBox = QComboBox(self)\r\n self.comboBox.currentIndexChanged.connect(self.checkSet)\r\n self.comboBox.currentIndexChanged.connect(self.updateBtns)\r\n\r\n # create y-gap textbox\r\n self.yLabel = QLabel('Y Axis Gap', self)\r\n self.textbox = QLineEdit(self)\r\n\r\n # create update button\r\n update_btn = QPushButton('Update', self)\r\n update_btn.clicked.connect(self.update)\r\n\r\n # instantiate main plot canvas\r\n plot_canvas = FigureCanvas(Figure(figsize=(5, 5)))\r\n\r\n # add toolbar to layout\r\n self.addToolBar(QtCore.Qt.BottomToolBarArea, NavigationToolbar(plot_canvas, self))\r\n\r\n self._static_ax = plot_canvas.figure.subplots()\r\n # label graph axes\r\n xtext = self._static_ax.set_xlabel('my xdata') # returns a Text instance\r\n ytext = self._static_ax.set_ylabel('my ydata')\r\n\r\n #create grid for button layout\r\n self.grid = QGridLayout()\r\n # ensures no stretching occurs when maximizing/minimizing windows\r\n #self.grid.setSpacing(1)\r\n\r\n # assign grid position to each widget\r\n self.grid.addWidget(update_btn, 0, 1, 5, 10)\r\n self.grid.addWidget(self.yLabel, 1, 1)\r\n self.grid.addWidget(self.textbox, 1, 2)\r\n self.grid.addWidget(self.comboBox, 2, 2)\r\n self.grid.addWidget(self.dropLabel, 2, 1)\r\n\r\n # create grid for channel button layout\r\n self.gridButtons = QGridLayout()\r\n\r\n # create layout for the graph canvas\r\n canvasBox = QHBoxLayout()\r\n canvasBox.addWidget(plot_canvas)\r\n\r\n # create top layout\r\n topBox = QHBoxLayout()\r\n topBox.addLayout(canvasBox)\r\n topBox.addLayout(self.grid)\r\n\r\n # create main layout\r\n mainBox = QVBoxLayout()\r\n mainBox.addLayout(topBox)\r\n mainBox.addLayout(self.gridButtons)\r\n\r\n\r\n\r\n centralwidget.setLayout(mainBox)\r\n\r\n self.setCentralWidget(centralwidget)\r\n\r\n self.selected_SET = 0\r\n\r\n # method creates an instance of the File object and fills the dropdown menu with associated channel sets\r\n def openFile(self):\r\n self.file1 = File()\r\n # clear any pre-existing channel sets\r\n self.comboBox.clear()\r\n # check that a file has been chosen by the user\r\n if self.file1.file_path != \"\":\r\n #iterate through all sets and fill the dropdown with the name of each channel set\r\n for s in range(3, len(self.file1.ecog_SETS)):\r\n self.comboBox.addItem(self.file1.ecog_SETS[s])\r\n\r\n # method checks what channel set is currently selected and gets its index\r\n\r\n\r\n def checkSet(self):\r\n # iterate through all channel sets until it matches the currently selected channel set\r\n for s in range(3, len(self.file1.ecog_SETS)):\r\n if self.comboBox.currentText() == self.file1.ecog_SETS[s]:\r\n self.selected_SET = s\r\n\r\n # method creates buttons based on the number channels in the selected set\r\n def updateBtns(self):\r\n # determine the number of channels based on the number of y values in the selected set\r\n self.num_channels = len(self.file1.mat_contents.get(self.file1.ecog_SETS[self.selected_SET])[0, :])\r\n # create a array of checkboxes for later reference\r\n self.box_array = list()\r\n self.list_array = list()\r\n # determine the maximum number of rows based on the numbe\r\n # r of channels\r\n max_rows = np.ceil(self.num_channels / 10)\r\n numB = 0\r\n # for each row, determine if the row will be complete\r\n for i in range(1, max_rows.astype(int) + 1):\r\n if self.num_channels - i * 10 > 0:\r\n columns = 10\r\n else:\r\n columns = self.num_channels % 10\r\n\r\n # create a label for each row indicating the number of each button\r\n self.list_array.append(QLabel())\r\n self.list_array[i - 1].setText(str((i - 1) * 10) + '-' + str(((i - 1) * 10) + columns))\r\n self.gridButtons.addWidget(self.list_array[i - 1], i + 2, 1)\r\n\r\n for j in range(1, columns + 1):\r\n self.box_array.append(QCheckBox(self))\r\n self.gridButtons.addWidget(self.box_array[numB], i+2, j+1)\r\n numB += 1\r\n self.channels_array = list()\r\n for i in range(0, self.num_channels):\r\n self.channels_array.append(channel(i, self.selected_SET, self.file1))\r\n def checkBtns(self):\r\n # reinstantiate selected channels\r\n self.channels_selected = []\r\n # check which buttons and selected and add the respective channel to an array\r\n for b in range(0, len(self.box_array)):\r\n if self.box_array[b].checkState() == Qt.Checked:\r\n self.channels_selected.append(b)\r\n def updatePlot(self):\r\n # clear the axes before graphing selected channels\r\n self._static_ax.clear()\r\n # intantiate y_gap value for later use using the current textbox value\r\n y_gap = self.textbox.text()\r\n\r\n # check that user has entered a y gap value\r\n if y_gap == \"\":\r\n QMessageBox.about(self,\"Error\", \"Please enter a y-gap value.\")\r\n else:\r\n\r\n for j in range(0, len(self.channels_selected)):\r\n self.channels_array[self.channels_selected[j]].gap(float(y_gap) * j)\r\n self._static_ax.plot(self.channels_array[self.channels_selected[j]].x, self.channels_array[self.channels_selected[j]].y)\r\n\r\n self._static_ax.figure.canvas.draw()\r\n\r\n def update(self):\r\n # check if an instance of the File() object has been created\r\n try:\r\n self.test = self.file1\r\n # if no File() object exists instruct the user to select a file\r\n except:\r\n QMessageBox.about(self, \"Error\", \"Please load a .mat file.\")\r\n # call checkBtn() and updatePlot() method if file exists.\r\n else:\r\n self.checkBtns()\r\n self.updatePlot()\r\n\r\nif __name__ == \"__main__\":\r\n qapp = QtWidgets.QApplication(sys.argv)\r\n app = ApplicationWindow()\r\n app.show()\r\n qapp.exec_()\r\n","sub_path":"Graphing Application/Python Files/GraphingAppLayout_1-9-19.py","file_name":"GraphingAppLayout_1-9-19.py","file_ext":"py","file_size_in_byte":8792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"289950349","text":"#!/usr/bin/env python\n\nimport argparse\nimport json\n\nfrom video_utils import prepare_instagram_video\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Prepare arguments for an instagram video\")\n parser.add_argument(\n dest=\"path\",\n type=str,\n help=\"the path to the video\",\n )\n\n args = parser.parse_args()\n\n res = prepare_instagram_video(args.path)\n print(json.dumps(res))\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/prepare_instagram_video.py","file_name":"prepare_instagram_video.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"130665181","text":"'''Passando uma lista para uma função'''\n\ndef greet_users(names):\n\t'''Exibe uma suadaçao simples a cada usuário da lista.'''\n\tfor name in names:\n\t\tmsg = 'Hello, ' + name.title() + '!'\n\t\tprint(msg)\n\nusernames = ['hannah', 'ty', 'margot']\n\ngreet_users(usernames)\n","sub_path":"greet_users.py","file_name":"greet_users.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"413345851","text":"'''\nModule created on 26/11/2014\n\n@author: Regina Zhang\n\n'''\n\nimport unittest\nfrom GStestcases import GSTestCases\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom constants import common\nfrom selenium.common.exceptions import *\nimport registration_login as rl\nimport pickle\nimport sys\nfrom genome_space_test import GenomeSpaceTest as GST\n\n\nclass GSFirefox(unittest.TestCase, GSTestCases):\n\n @classmethod\n def setUpClass(cls):\n '''\n a class method overriding Unittest setUpClass method\n preparation work before the testing starts\n '''\n cls.parse_config()\n cls.driver_name = \"firefox\"\n cls.driver = webdriver.Firefox()\n driver = cls.driver\n driver.implicitly_wait(10)\n cls.wait = WebDriverWait(driver,20)\n driver.maximize_window()\n home_page = common[\"base_url\"] + common[\"home_suffix\"]\n try:\n driver.get(home_page)\n driver.implicitly_wait(20)\n assert \"No results found.\" not in driver.page_source\n except UnexpectedAlertPresentException:\n alert = driver.switch_to_alert()\n text = alert.text\n alert.accept()\n print >>sys.stderr, (\"Unexpected alert present: \" + text)\n except AssertionError:\n driver.close()\n raise Exception(\"Page not found: \" + home_page)\n if GST.developing:\n # load the cookie stored last time.\n # if cookie expired, manual deletion needed\n try:\n cookie_file_name = \"cookies_\" + cls.driver_name + \".pkl\"\n cookies = pickle.load(open(cookie_file_name,\"rb\"))\n for cookie in cookies:\n driver.add_cookie(cookie)\n GST.logged_in = True\n except IOError:\n GST.logged_in = False\n try:\n driver.get(home_page)\n assert \"No results found.\" not in driver.page_source\n except AssertionError:\n driver.close()\n raise Exception(\"Page not found: \" + home_page)\n except UnexpectedAlertPresentException:\n alert = driver.switch_to_alert()\n text = alert.text\n alert.dismiss()\n print >>sys.stderr, \"Unexpected alert present: \" + text\n\n @classmethod\n def tearDownClass(cls):\n '''\n a class method overriding the tearDownClass method in Unittest\n close browser and quit driver when the test is done\n '''\n cls.driver.close()\n cls.driver.quit()\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","sub_path":"source/GSfirefox.py","file_name":"GSfirefox.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"348574148","text":"import socket\nimport os\nfrom threading import *\n\nServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nhost = \"localhost\"\nport = 12345\n\nseprator_token = \"\"\ndisconectMessage =\"!DISCONNECT\"\nthreadCount = 0\nclientSockets = set()\nnameaccess={}\n\nServer.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\ntry:\n\tServer.bind((host, port))\n\nexcept socket.error as e:\n\tprint(str(e))\n\nprint(\"Server Listening on port \" + str(port))\n\nServer.listen(5)\n\ndef ServerListner(cs):\n\twhile True:\n\t\ttry:\n\t\t\tmsg = cs.recv(1024).decode()\n\t\texcept Exception as e:\n\t\t\tprint(f\"Error: {e}\")\n\t\t\tcs.close()\n\t\t\tbreak\n\t\t\t# clientSockets.remove(cs)\n\t\telse:\n\t\t\tif msg == disconectMessage:\n\t\t\t\tcs.close()\n\t\t\t\tbreak\n\n\t\t\tmsg = msg.replace(seprator_token, \": \")\n\t\t\tprint(msg)\n\t\t\tcounter=0\n\t\t\tfinname=''\n\t\t\twhile msg[counter]!=':':\n\t\t\t\tfinname+=msg[counter]\n\t\t\t\tcounter+=1\n\t\t\tcounter+=2\n\t\t\tnamestr=''\n\t\t\tcounter2=0\n\t\t\tunderscorePresent=0\n\t\t\twhile counter2 best_val_score:\n best_val_score = f1\n copyfile(\n model_file,\n os.path.join(model_dir, 'best_model.pt'))\n log.info('[new best model saved.]')\n\n\nif __name__ == '__main__':\n #test_loader()\n main()\n","sub_path":"src/.ipynb_checkpoints/maintrain-checkpoint.py","file_name":"maintrain-checkpoint.py","file_ext":"py","file_size_in_byte":11041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"424856118","text":"import torch\r\nfrom dataset import trainDataLoader,validDataLoader,testDataLoader\r\nfrom model import Net\r\nfrom train import train\r\n\r\n\r\nepochs = 500\r\n# lr = 0.1\r\nlr = 0.01\r\n\r\nnet = Net()\r\n\r\nif torch.cuda.is_available():\r\n print('CUDA is available! Training on GPU ...\\n')\r\n net.cuda()\r\n\r\n#optimizer = torch.optim.SGD(net.parameters(),lr=0.01) #利用SGD优化器优化神经网络,传入参数,学习率0.5\r\noptimizer = torch.optim.Adam(net.parameters(),lr=lr) #利用SGD优化器优化神经网络,传入参数,学习率0.5\r\nloss_func = torch.nn.MSELoss() #利用均方差进行回归\r\n\r\n\r\nfor epoch in range(epochs):\r\n train(epoch, net, trainDataLoader, optimizer, loss_func, validDataLoader,testDataLoader)\r\n\r\nprint('trainDataLoader')\r\n\r\n\r\n\r\n\r\n","sub_path":"AlexNet_Allsites_ROI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"274653793","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), 'test'))\nimport face_model\n\nimport argparse\nimport cv2\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport shutil\n\nimport time\nfrom PIL import Image\n\nclass Args():\n def __init__(self):\n self.image_size = '112,112'\n self.gpu = 0\n self.model = './models/model-r50-am-lfw/model,0000'\n self.ga_model = './models/gamodel-r50/model,0000'\n self.threshold = 1.24\n self.flip = 0\n self.det = 0\nIMAGE_SIZE = 112 \ndef resize_image(image, height = IMAGE_SIZE, width = IMAGE_SIZE):\n top, bottom, left, right = (0, 0, 0, 0)\n #獲取影象尺寸\n h, w, _ = image.shape\n\n #對於長寬不相等的圖片,找到最長的一邊\n longest_edge = max(h, w) \n #計算短邊需要增加多上畫素寬度使其與長邊等長\n if h < longest_edge:\n dh = longest_edge - h\n top = dh // 2\n bottom = dh - top\n elif w < longest_edge:\n dw = longest_edge - w\n left = dw // 2\n right = dw - left\n else:\n pass \n BLACK = [0, 0, 0]\n #給影象增加邊界,是圖片長、寬等長,cv2.BORDER_CONSTANT指定邊界顏色由value指定\n constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value = BLACK)\n #調整影象大小並返回\n return cv2.resize(constant, (height, width))\n\nargs = Args()\nmodel = face_model.FaceModel(args)\n\npath = r'../test_data/'\ndst_path = '../test_mtcnn_data/'\n# imgs = os.listdir(path)\ncnt = 0\n\nprint('finished load model')\n\nimages_list = os.listdir(path)\nfor image in images_list:\n print(os.path.join(path, image))\n img = cv2.imread(os.path.join(path, image))\n out = model.get_input(img) # 3x112x112\n try:\n print(f'{out.shape}')\n new_image = np.transpose(out, (1, 2, 0))[:, :, ::-1]\n except:\n new_image = np.transpose(img, (1, 2, 0))[:, :, ::-1]\n new_image = resize_image(img , 112 , 112)\n print(f'{new_image.shape}')\n out = Image.fromarray(new_image)\n out = out.resize((112, 112))\n out = np.asarray(out)\n\n# for point in points:\n# cv2.circle(out, (point[0], point[1]), 2, (0, 0, 255), -1)\n # cv2.putText(image, str(num), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.imwrite(os.path.join(dst_path, image[:-4] + '_mtcnn.jpg'), out)\n cnt += 1\n ","sub_path":"get_finetune_data/get_test_face.py","file_name":"get_test_face.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"119784437","text":"# encoding: utf-8\n\nimport logging\n\nfrom django.core.paginator import Paginator\nfrom rest_framework.views import APIView\n\nfrom common.models import BackstageHTTPResponse\nfrom common.utils import log_exception\nfrom tables.filters import TablesFilter\nfrom tables.models import Tables\nfrom tables.serializers import TableSerializer\n\nlogger = logging.getLogger(__name__)\n\n\nclass TableListAPI(APIView):\n\n @log_exception\n def get(self, request, *args, **kwargs):\n \"\"\"\n 数据表列表\n ---\n parameters:\n - name: index\n description: 页数\n type: integer\n paramType: query\n required: false\n - name: number\n description: 每页条数\n type: integer\n paramType: query\n required: false\n - name: name\n description: 名字\n type: string\n paramType: query\n required: false\n - name: type\n description: 类型\n type: integer\n paramType: query\n required: false\n \"\"\"\n tables = Tables.objects.all()\n tables = TablesFilter(request.GET, queryset=tables).qs\n paginator = Paginator(tables, request.GET.get('number', 100))\n page = paginator.page(request.GET.get('index', 1))\n serializer = TableSerializer(page, many=True)\n return BackstageHTTPResponse(\n code=BackstageHTTPResponse.API_HTTP_CODE_NORMAL,\n data=serializer.data,\n pageinfo=page\n ).to_response()\n\n\nclass TableDetailAPI(APIView):\n\n @log_exception\n def get(self, request, id, *args, **kwargs):\n \"\"\"\n 数据表\n ---\n parameters:\n - name: id\n description: id\n type: integer\n paramType: path\n required: true\n \"\"\"\n table = Tables.objects.filter(id=id).first()\n if id and not table:\n return BackstageHTTPResponse(code=BackstageHTTPResponse.API_HTTP_CODE_INVILID_PARAMS, message='没有该表').to_response()\n\n serializer = TableSerializer(table)\n return BackstageHTTPResponse(\n code=BackstageHTTPResponse.API_HTTP_CODE_NORMAL,\n data=serializer.data,\n ).to_response()\n","sub_path":"src/api/tables/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"233103994","text":"import argparse\nimport json\nimport numpy as np\nimport os\nimport tensorflow.keras as keras\n\n\ndef load_data(input_path):\n path = os.path.join(input_path, 'mnist/mnist.npz')\n with np.load(path, allow_pickle=True) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n x_train = x_train / 255.0\n x_test = x_test / 255.0\n return {\n 'x_train': x_train,\n 'y_train': y_train, \n 'x_test': x_test,\n 'y_test': y_test,\n }\n\n\ndef create_model(params):\n model = keras.models.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dropout(0.2),\n keras.layers.Dense(10, activation='softmax'), # why ,?\n ])\n\n model.compile(\n optimizer=keras.optimizers.Adam(learning_rate=params.learning_rate),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'],\n )\n return model\n\n\ndef train(model, data, params):\n def print_metrics(epoch, logs):\n print()\n logs = {k: str(v) for (k, v) in logs.items()}\n print(json.dumps({'epoch': epoch, **logs}))\n\n print_metrics_callback = keras.callbacks.LambdaCallback(on_epoch_end=print_metrics)\n\n model.fit(data['x_train'], data['y_train'], epochs=params.epochs, callbacks=[print_metrics_callback])\n\n\ndef evaluate(model, data):\n test_loss, test_acc = model.evaluate(data['x_test'], data['y_test'])\n print(json.dumps({\n 'test_loss': str(test_loss),\n 'test_acc': str(test_acc),\n }))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=5) \n parser.add_argument('--learning_rate', type=float, default=0.001)\n params = parser.parse_args()\n\n input_path = os.getenv('VH_INPUTS_DIR', '/valohai/inputs')\n output_path = os.getenv('VH_OUTPUTS_DIR', '/valohai/outputs')\n\n data = load_data(input_path)\n model = create_model(params)\n train(model, data, params)\n model.save(os.path.join(output_path, 'mnist.h5'))\n evaluate(model, data)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"266890737","text":"#!/usr/bin/python3\n\ndef fact(n):\n if(n==1 or n==0):\n return n\n else:\n return n*fact(n-1)\n\ntemp = fact(100)\nprint(temp)\n\nsum = 0\nwhile(temp):\n sum = sum + temp%10\n temp = temp//10\n\nprint(sum)\n","sub_path":"Problem_20.py","file_name":"Problem_20.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"392908237","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\ndriver=webdriver.Firefox(executable_path=\"C:\\Drivers\\geckodriver-v0.27.0-win64\\geckodriver.exe\")\r\n\r\ndriver.get(\"http://demo.automationtesting.in/Windows.html\") #openng the website\r\n\r\ndriver.find_element(By.XPATH,\"//*[@id='Tabbed']/a/button\").click() #clicking on the button\r\n\r\nprint(driver.current_window_handle) #returns the current window's handle value and prints it off\r\n\r\nhandles=driver.window_handles #stores handle values of all the windows that are opened\r\n\r\nfor handle in handles:\r\n driver.switch_to_window(handle)\r\n print(driver.title) #prints the titles of the windows\r\n\r\n if driver.title==\"Frames & windows\":\r\n driver.close() #this will just close the parent window\r\n\r\ndriver.quit()\r\n\r\n","sub_path":"windows.py","file_name":"windows.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"128969883","text":"# -*- coding:utf-8 -*-\nfrom __future__ import unicode_literals\nfrom baidu import exceptions\nfrom baidu.models import empty\nfrom baidu.models.lbscloud import Column, GeoTable\nfrom baidu.api.base import BaseAPI\n\n\nclass GeoDataAPI(BaseAPI):\n \"\"\"\n \"\"\"\n scope = 'geodata'\n version = 'v3'\n\n def create_geotable(self, name, is_published, geotype=1):\n \"\"\"\n 创建表(create geotable)接口\n\n :param name: geotable的中文名称\n :param is_published: 是否发布到检索\n :param geotype: geotable持有数据的类型。1:点;2:线;3:面。默认为1(当前只支持点)\n \"\"\"\n # 目前只支持点\n if geotype != 1:\n geotype = 1\n data = {\n 'name': name,\n 'geotype': geotype,\n 'is_published': is_published\n }\n result = self.post('/geotable/create', data=data)\n return result['id'] > 0\n\n def get_geotables(self, name=empty):\n \"\"\"\n 查询表(list geotable)接口\n\n :param name: geotable的名字\n \"\"\"\n if name is empty or not name:\n params = {}\n else:\n params = {'name': name}\n result = self.get('/geotable/list', params=params)\n geotables = []\n if result['size'] > 0:\n for geotable_info in result['geotables']:\n geotables.append(GeoTable(**geotable_info))\n\n return geotables\n\n def get_geotable_by_name(self, geotable_name):\n \"\"\"\n 通过位置数据表的表名获取表对象\n\n .. tips:\n 此方法不是百度 LSB API支持的,而是从`get_geotables`变形而来。\n \"\"\"\n geotables = self.get_geotables(name=geotable_name)\n if len(geotables) == 0:\n raise exceptions.GeotableDoesNotExistException()\n assert len(geotables) == 1, \"位置数据表的名称不唯一\"\n return geotables[0]\n\n def get_geotable(self, geotable_id):\n \"\"\"\n 查询指定id表(detail geotable)接口\n\n :param geotable_id: 指定geotable的id\n \"\"\"\n result = self.get('/geotable/detail', params={'id': geotable_id})\n assert 'geotable' in result\n return GeoTable(**result['geotable'])\n\n def update_geotable(self, geotable_id, name=empty, is_published=empty):\n \"\"\"\n 修改表(update geotable)接口\n\n :param geotable_id: geotable主键\n :param name: geotable的中文名称\n :param is_published: 是否发布到检索(会引起批量操作)\n \"\"\"\n data = {'id': geotable_id}\n if name not in (empty, None):\n data['name'] = name\n if is_published not in (empty, None):\n data['is_published'] = is_published\n\n return self.post('/geotable/update', data=data)\n\n def delete_geotable(self, geotable_id):\n \"\"\"\n 删除表(geotable)接口\n\n :param geotable_id: 指定geotable的id\n\n 注意: 当geotable里面没有有效数据时,才能删除geotable。\n \"\"\"\n return self.post('/geotable/delete', data={'id': geotable_id})\n\n def create_column(self, geotable_id, column):\n \"\"\"\n 创建列(create column)接口\n\n :param geotable_id: 所属于的geotable_id\n :param column: 列定义\n \"\"\"\n assert isinstance(column, Column)\n data = {'geotable_id': geotable_id}\n data.update(column.data)\n if 'id' in data:\n data.pop('id')\n result = self.post('/column/create', data=data)\n return result['id'] > 0\n\n def get_columns(self, geotable_id, name=None, key=None):\n \"\"\"\n 查询列(list column)接口\n\n :param geotable_id: 所属于的geotable_id\n :param name: geotable meta的属性中文名称\n :param key: geotable meta存储的属性key\n \"\"\"\n params = {'geotable_id': geotable_id}\n if name is not empty and name:\n params['name'] = name\n if key is not empty and key:\n params['key'] = key\n result = self.get('/column/list', params=params)\n column_list = []\n if result['size'] > 0:\n for column_info in result['columns']:\n column_list.append(Column(**column_info))\n return column_list\n\n def get_column(self, geotable_id, column_id):\n \"\"\"\n 查询指定id列(detail column)详情接口\n\n :param geotable_id: 所属于的geotable_id\n :param column_id: 列的id\n\n \"\"\"\n params = {'geotable_id': geotable_id, 'id': column_id}\n result = self.get('/column/detail', params=params)\n return Column(**result['column'])\n\n def update_column(self, geotable_id, column_id, **kwargs):\n \"\"\"\n 修改指定条件列(column)接口\n\n :param geotable_id: 所属于的geotable_id\n :param column_id: 列的id\n\n :param name: 属性中文名称,可选\n :param default_value: 默认值,可选\n :param max_length: 文本最大长度,可选\n :param is_sortfilter_field: 是否检索引擎的数值排序字段,可选\n :param is_search_field: 是否检索引擎的文本检索字段,可选\n :param is_index_field: 是否存储引擎的索引字段,可选\n :param is_unique_field: 是否存储索引的唯一索引字段,可选\n \"\"\"\n optionals = (\n 'name', 'default_value', 'max_length', 'is_sortfilter_field',\n 'is_search_field', 'is_index_field', 'is_unique_field'\n )\n data = {'geotable_id': geotable_id, 'id': column_id}\n for key in kwargs:\n if key not in optionals:\n kwargs.pop(key)\n if 'key' in kwargs and kwargs['key'] is empty:\n kwargs.pop(key)\n data.update(kwargs)\n return self.post('/column/update', data=data)\n\n def delete_column(self, geotable_id, column_id):\n \"\"\"\n 删除指定条件列(column)接口\n\n :param geotable_id: 所属于的geotable_id\n :param column_id: 列的id\n \"\"\"\n data = {'geotable_id': geotable_id, 'id': column_id}\n return self.post('/column/delete', data=data)\n\n def create_poi(\n self,\n geotable_id,\n longitude,\n latitude,\n coord_type,\n **kwargs):\n \"\"\"\n 创建数据(create poi)接口\n\n TODO: 用户在column定义的key/value对?\n\n :param geotable_id: 记录关联的geotable的标识\n :param longitude: 用户上传的经度\n :param latitude: 用户上传的纬度\n :param coord_type: 用户上传的坐标的类型\n :param title: poi名称,可选\n :param address: 地址,可选\n :param tags: tags,可选\n \"\"\"\n data = {\n 'geotable_id': geotable_id,\n 'latitude': latitude,\n 'longitude': longitude,\n 'coord_type': coord_type,\n }\n data.update(kwargs)\n result = self.post('/poi/create', data=data)\n return result['id']\n\n def get_pois(self, geotable_id, page_index=0, page_size=10, **kwargs):\n \"\"\"\n 查询指定条件的数据(poi)列表接口\n\n column需要设置了is_index_field=1。对于string,是两端匹配。对于int或者double,则是范围查找,传递的格式为最小值,最大值。当无最小值或者最大值时,用-代替,同时,此字段最大长度不超过50,最小值与最大值都是整数\n 例:如加入一个命名为color数据类型为string的column,在检索是可设置为“color=red”的形式来检索color字段为red的POI\n\n :param geotable_id: 记录关联的geotable的标识\n :param title: 记录(数据)名称\n :param tags: 记录的标签(用于检索筛选)\n :param bounds: 查询的矩形区域\n :param page_index: 分页索引\n :param page_size: 分页数目\n\n \"\"\"\n\n params = {'geotable_id': geotable_id}\n params.update(kwargs)\n return self.get('/poi/list', params=params)\n\n def get_poi(self, geotable_id, poi_id):\n \"\"\"\n 查询指定id的数据(poi)详情接口\n\n :param poi_id: 表主键\n :param geotable_id: poi主键\n\n \"\"\"\n params = {'geotable_id': geotable_id, 'id': poi_id}\n return self.get('/poi/detail', params=params)\n\n def update_poi(self, geotable_id, poi_id, coord_type=3, **kwargs):\n \"\"\"\n 修改数据(poi)接口\n\n :param geotable_id: 记录关联的geotable的标识\n :param poi_id: poi的id\n :param coord_type: 用户上传的坐标的类型\n \"\"\"\n\n data = {\n 'geotable_id': geotable_id,\n 'id': poi_id,\n 'coord_type': coord_type\n }\n data.update(kwargs)\n result = self.post('/poi/update', data=data)\n return result['id'] == poi_id\n\n def delete_poi(\n self,\n geotable_id,\n poi_id=empty,\n poi_ids=empty,\n is_total_del=empty,\n **kwargs\n ):\n \"\"\"\n 删除数据(poi)接口(支持批量)\n\n :param geotable_id: geotable_id\n :param poi_id: 如果传了这个参数,此其它的删除条件会被忽略,此时此操作不是批量请求。只会最多删除一个poi\n :param poi_ids: 最多1000个id,如果有这个条件,其它条件将被忽略.\n :param title: 名称\n :param tags: 标签\n :param bounds: string 查询的矩形区域, 格式x1,y1;x2,y2分别代表矩形的左上角和右下角\n\n \"\"\"\n data = {\n 'geotable_id': geotable_id,\n }\n if poi_id is not empty:\n data['id'] = poi_id\n elif poi_ids is not empty and isinstance(poi_ids, list):\n data['ids'] = ','.join(poi_ids)\n else:\n data.update(kwargs)\n\n # 如果是批量删除,则需要传这个参数,值为1;如果不是批量删除,则不用传这个参数\n if is_total_del == 1:\n data['is_total_del'] = 1\n self.post('/poi/delete', data=data)\n return True\n\n # def upload_poi(self,):\n # \"\"\"\n #\n # \"\"\"\n","sub_path":"baidu/api/map/lbscloud.py","file_name":"lbscloud.py","file_ext":"py","file_size_in_byte":10340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460031834","text":"import numpy as np\nfrom itertools import cycle\n\ndef DataIterator(x, y, batch_size=16, shuffle_on_epoch=True):\n\t\"\"\"\n\tgiven x, y and batch size returns a cyclical data iterator.\n\t\"\"\"\n\n\tassert x.shape[0] == y.shape[0], \"x and y must have same length.\"\n\tdata_size = x.shape[0]\n\n\tshuffle_in_unison(x, y)\n\n\ti = 0\n\twhile 1:\n\t\tif i + batch_size >= data_size:\n\t\t\twrap_around = batch_size + i - data_size\n\t\t\tx_batch = np.concatenate((x[i:,], x[:wrap_around,]), axis=0)\n\t\t\ty_batch = np.concatenate((y[i:,], y[:wrap_around,]), axis=0)\n\t\t\tif shuffle_on_epoch:\n\t\t\t\tshuffle_in_unison(x, y) # re-shuffle the data after every epoch\n\n\t\telse:\n\t\t\tx_batch = x[i:i + batch_size]\n\t\t\ty_batch = y[i:i + batch_size]\n\n\t\ti = (i + batch_size) % data_size\n\n\t\tyield x_batch, y_batch\n\ndef DataIteratorSorted(x, y, groupings, batch_size=16, shuffle_on_epoch=True):\n\t\"\"\"\n\tgiven x, y, groupings and batch size returns a cyclical data iterator.\n\tEach batch contains only elements from the same group, and each group gets equal representation.\n\t\"\"\"\n\tassert x.shape[0] == y.shape[0], \"x and y must have same length.\"\n\tdata_size = x.shape[0]\n\n\tgroup_idxs = {g:np.where(groupings==g) for g in set(groupings)}\n\tgroup_xs = {g:x[group_idxs[g]] for g in group_idxs.keys()}\n\tgroup_ys = {g:y[group_idxs[g]] for g in group_idxs.keys()}\n\tgroup_iterators = [DataIterator(x=group_xs[g], y=group_ys[g], batch_size=batch_size, shuffle_on_epoch=shuffle_on_epoch)\n\t\t\t\t\t\tfor g in group_idxs.keys()]\n\n\titerator_getter = cycle(group_iterators)\n\twhile 1:\n\t\tx, y = next(next(iterator_getter))\n\t\tyield x, y\ndef zipper_sort(x, y):\n\tzipper = zip(x, y)\n\tzipper = sorted(zipper, key=lambda elem:tuple(elem[1]))\n\tx, y = (np.array(data) for data in zip(*zipper))\n\treturn x, y\n\ndef shuffle_in_unison(a, b):\n\tassert(a.shape[0] == b.shape[0])\n\trng_state = np.random.get_state()\n\tnp.random.shuffle(a)\n\tnp.random.set_state(rng_state)\n\tnp.random.shuffle(b)\n\nif __name__ == \"__main__\":\n\tx = np.array(list(range(20)))\n\ty = np.array([[i]*5 for i in range(4)]).flatten()\n\tgroupings = np.array([[i]*10 for i in range(2)]).flatten()\n\tdIter = DataIteratorSorted(x=x, y=y, groupings=groupings, batch_size=3)\n\tfor i in range(10):\n\t\tprint(next(dIter))","sub_path":"utilities/data_iterator.py","file_name":"data_iterator.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"622673218","text":"\"\"\"Confirmation report generator for CLN and FRN-CLN basket trades.\n\nThis script is invoked by calling the homonymous ASQL query.\n\"\"\"\n# Desk Requester Developer CR Number\n# What\n# =============================================================================\n\n# OPS Sipho Ndlalane Lukas Paluzga ABITFA-1269\n# Refactored to use common base; Created CLN report.\n\n# OPS Letitia Carboni Lukas Paluzga ABITFA-1795\n# New letterhead\n\n# OPS Sipho Ndlalane\tSanele Macanda\t\tCHNG0001662676 - ABITFA -No Jira (23/01/2014)\n# Replaced os.startfile() with startFile() see SAGEN_IT_Functions\n\n\nimport os, time\n\nimport acm\n\nimport at, SACM_Trade_Confirmation_PDF as stc\nfrom XMLReport import mkinfo, mkvalues, mkcaption\nfrom zak_funcs import formcurr\nfrom SAGEN_IT_Functions import startFile\n\nclass ReturnConfirmationReport(stc.FRNReportBase):\n def validate_trade(self):\n if self.instrument.InsType() == at.INST_FRN:\n if at.addInfo.get_value(self.trade, at.addInfoSpecEnum.MM_INSTYPE) != 'CLN':\n acm.Log('Instrument type FRN is not an allowed unless CLN is specified in {0}. '.format(at.addInfoSpecEnum.MM_INSTYPE))\n return False\n\n return super(stc.FRNReportBase, self).validate_trade()\n\n def statement_detail(self):\n yield mkinfo(\"\"\"The Note described in this document is subject to the terms and conditions set out in the Applicable Pricing \\\nSupplement and the General Terms and Conditions of the Notes set out in the Programme Memorandum dated 19 July 2007 relating \\\nto the Issuer's Credit-linked Note Programme (the \"Programme Memorandum\"). This document must be read in conjunction with \\\nthe Applicable Pricing Supplement and the Programme Memorandum. In the event of any inconsistency between this document and \\\nthe Applicable Pricing Supplement, the Applicable Pricing Supplement will prevail. In the event of any inconsistency between \\\nthe Applicable Pricing Supplement and the Programme Memorandum, the Applicable Pricing Supplement will prevail.\"\"\")\n \n yield mkcaption('CLN ADVICE NOTE') \n \n if self.trade.Bought():\n yield mkinfo(\"We confirm having BOUGHT the following credit-linked note FROM you.\")\n else:\n yield mkinfo(\"We confirm having SOLD the following credit-linked note TO you.\")\n\n values = [[\"DEAL REFERENCE:\", self.trade.Name()],\n [\"COUNTERPARTY CODE:\", self.trade.Counterparty().Name()],\n [\"NUTRON CODE:\", self.trade.Counterparty().HostId()],\n [\"UNEXCOR CODE:\", self.unexCor()],\n [\"CLN DESCRIPTION\", self.instrument_externalid1()],\n [\"DEAL DATE:\", time.strftime('%d/%m/%Y %H:%M:%S ', time.localtime(self.trade.CreateTime()))],\n [\"SETTLEMENT DATE:\", self.trade.ValueDay()],\n [\"NOMINAL VALUE:\", formcurr(abs(self.trade.Nominal()))],\n [\"CONSIDERATION:\", formcurr(self.trade_ael.premium)]]\n \n try:\n values.append([\"ISSUED BY:\", self.instrument.Issuer().Name()])\n except AttributeError:\n pass\n\n # Do not show maturity date for combination/basket CLNs\n if not self.is_combination_or_basket():\n values.append([\"MATURITY DATE:\", self.instrument.ExpiryDateOnly()])\n\n values.append([\"ALL-IN-PRICE:\", self.trade.Price()])\n values.append([\"ACCRUED INTEREST:\", formcurr(self.traded_interest())])\n \n yield mkvalues(*values)\n\nael_gui_parameters = {'windowCaption':'CLN Confirmation'}\nael_variables = stc.get_ael_variables('Y:/Jhb/Ops CM/Capital Markets Confirmations/CLN Confirmations/')\n\ndef ael_main(parameters):\n trade_numbers = stc.parse_trade_numbers(parameters['TradeNumber'])\n\n if trade_numbers:\n allowed_instypes = [at.INST_FRN, at.INST_COMBINATION]\n reporter = stc.Reporter(stc.prep_reporter_args('CLN', ReturnConfirmationReport, allowed_instypes, parameters))\n\n output_file_name = reporter.create_reports(trade_numbers)\n startFile(output_file_name)\n\ndef ASQL(*rest):\n acm.RunModuleWithParameters('SACM_CLN_Confirmation_PDF', 'Standard') #@UndefinedVariable\n return 'SUCCESS'\n\n","sub_path":"Python modules/SACM_CLN_Confirmation_PDF.py","file_name":"SACM_CLN_Confirmation_PDF.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"80286649","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# @Time : 2019/7/18 下午10:38\n# @Author : Aries\n# @Site :\n# @File : Dnn-master.py\n# @Software: PyCharm\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib import layers\n\n'''\n定义训练轮数\n'''\ntraing_steps = 30000\n\n'''\n定义输入的数据和对应的标签并在for循环内进行填充\nbatch数据输入\n'''\ndata = []\nlabel = []\nfor i in range(200):\n\tx1 = np.random.uniform(-1, 1)\n\tx2 = np.random.uniform(0, 2)\n\t'''\n\t策略:对x1 and x2 进行判断,如果落在原点为中心1为半径的圆内,label = 0\n\t\t反之为1\n\t'''\n\tif x1**2 + x2**2 <= 1:\n\t\tdata.append([np.random.normal(x1, 0.1), np.random.normal(x2, 0.1)])\n\t\tlabel.append(0)\n\telse:\n\t\tdata.append([np.random.normal(x1, 0.1), np.random.normal(x2, 0.1)])\n\t\tlabel.append(1)\n\n'''\nnumpy的hstack()函数用于再水平方向将元素堆起来\n函数圆形 numpy.hstack(tup) tup 可以是元组,列表或者numpy数组\nreshape用于反转\n'''\ndata = np.hstack(data).reshape(-1, 2)\nlabel = np.hstack(label).reshape(-1, 1)\n\n'''\n定义完成前馈传递的隐藏层\n'''\n\n\ndef hidden_layer(input, w1, b1, w2, b2, w3, b3):\n\tlayer1 = tf.nn.relu(tf.matmul(input, w1) + b1)\n\tlayer2 = tf.nn.relu(tf.matmul(layer1, w2) + b2)\n\treturn tf.matmul(layer2, w3) + b3\n\n\nx = tf.placeholder(tf.float32, shape=(None, 2), name='x-input')\ny_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-output')\n\n'''\n定义权重参数和偏置参数\n'''\nw1 = tf.Variable(tf.truncated_normal([2, 10], stddev=0.1))\nb1 = tf.Variable(tf.constant(0.1, shape=[10]))\nw2 = tf.Variable(tf.truncated_normal([10, 10], stddev=0.1))\nb2 = tf.Variable(tf.constant(0.1, shape=[10]))\nw3 = tf.Variable(tf.truncated_normal([10, 1], stddev=0.1))\nb3 = tf.Variable(tf.constant(0.1, shape=[1]))\n\n'''\n用len记录data的长度\n'''\nsample_size = len(data)\n\n'''\n得到隐藏层前向传播结果\n'''\ny = hidden_layer(x, w1, b1, w2, b2, w3, b3)\n\n'''\n自定义损失函数\n'''\nerror_loss = tf.reduce_sum(tf.pow(y_ - y, 2)) / sample_size\ntf.add_to_collection('loss', error_loss)\n\nregularizer = layers.l2_regularizer(0.01)\nregularization = regularizer(w1) + regularizer(w2) + regularizer(w3)\ntf.add_to_collection('loss', regularization)\n\n'''\nget_collection()根据name,获取所有的损失值进行加运算\n'''\nloss = tf.add_n(tf.get_collection('loss'))\n\n'''\n定义一个优化器进行梯度更新\n学习率固定为0.01\n'''\ntraing_op = tf.train.AdamOptimizer(0.01).minimize(loss)\n\n'''\nexect()\n'''\nwith tf.Session() as sess:\n\t'''\n\t初始化tf变量\n\t'''\n\ttf.global_variables_initializer().run()\n\t\n\t'''\n\t进行30000次循环\n\t'''\n\tfor i in range(traing_steps):\n\t\tsess.run(traing_op, feed_dict={x: data, y_: label})\n\t\t\n\t\t'''\n\t\t每隔2000次输出一次loss值\n\t\t'''\n\t\tif i % 2000 == 0:\n\t\t\tloss_value = sess.run(loss, feed_dict={x: data, y_: label})\n\t\t\tprint('afer %d step ,loss value is %f' % (i, loss_value))\n","sub_path":"method_of_optimizing_network/DNN/Dnn-master.py","file_name":"Dnn-master.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"99494932","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport json\nimport pandas as pd\nimport numpy as np\nfrom UtilityLib import UtilityLib\n\n\n# In[2]:\n\n\nclass LuisAppProcessor:\n \n def __read_file(self, file_path):\n with open(file_path) as file:\n return json.load(file)\n\n def __extract_utterances_data(self, json_data):\n\n utterance_data = json_data['utterances']\n\n text_col_meta = ('text','object')\n intent_col_meta = ('intent','object')\n file_data_types = [text_col_meta, intent_col_meta]\n\n intent_values = []\n for utt in utterance_data:\n # fetchig utt['text] and utt['intent']\n value = (utt[text_col_meta[0]], utt[intent_col_meta[0]])\n intent_values.append(value)\n\n values = np.array(intent_values, dtype=file_data_types)\n return values\n\n def generate_intent_file(self, file_path):\n file_path = os.path.abspath(file_path)\n bot_file_data = self.__read_file(file_path)\n utts = self.__extract_utterances_data(bot_file_data)\n df = pd.DataFrame(data=utts, index=[i for i in range(len(utts))])\n\n ul = UtilityLib()\n dir_name = ul.get_directory_path(file_path)\n file_name_wo_ext = ul.get_file_name(file_path)\n new_file_name = file_name_wo_ext + '_intent.csv'\n\n new_file_path = ul.path_join(dir_name, new_file_name)\n df.to_csv(new_file_path, index=False)\n return new_file_path\n\n def get_entities(self, file_path):\n json_data = self.__read_file(file_path)\n entities = [ent['name'] for ent in json_data['entities']]\n return entities\n\n def get_entity_training_data(self, file_path):\n\n json_data = self.__read_file(file_path)\n\n utterance_data = json_data['utterances']\n entity_training_data = []\n for utt in utterance_data:\n entities = []\n for entity in utt['entities']:\n # In endPos, add 1 as Python uses these indexes slightly differently than Microsoft.\n entities.append((entity['startPos'], entity['endPos'] + 1, entity['entity']))\n\n ent_train_data = (utt['text'], {'entities': entities})\n entity_training_data.append(ent_train_data)\n return entity_training_data\n\n","sub_path":"Natural Language/Final Scripts/luis_app_processor.py","file_name":"luis_app_processor.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"170782716","text":"\"\"\"\nCore scaffolding for divide and conquer extraction algorithm\n\"\"\"\n\nimport sys\n\nimport numpy as np\n\ntry:\n import cupy as cp\nexcept ImportError:\n pass\n\nfrom gpu_specter.util import get_logger\nfrom gpu_specter.util import get_array_module\nfrom gpu_specter.util import Timer\nfrom gpu_specter.util import gather_ndarray\n\nclass Patch(object):\n def __init__(self, ispec, iwave, bspecmin, nspectra_per_patch, nwavestep, wavepad, nwave,\n bundlesize, ndiag):\n \"\"\"Convenience data wrapper for divide and conquer extraction patches\n\n Args:\n ispec: starting spectrum index\n iwave: starting wavelength index\n bspecmin: starting spectrum index of the bundle that this patch belongs to\n nspectra_per_patch: number of spectra to extract (not including padding)\n nwavestep: number of wavelengths to extract (not including padding)\n wavepad: number of extra wave bins to extract (and discard) on each end\n nwave: number of wavelength bins in for entire bundle\n bundlesize: size of fiber bundles\n ndiag: number of diagonal elements to keep in the resolution matrix\n\n All args become attributes.\n\n Additional attributes created:\n specslice: where this patch goes in the bundle result array\n waveslice: where this patch goes in the bundle result array\n keepslice: wavelength slice to keep from padded patch (the final patch in the bundle\n will be narrower when (nwave % nwavestep) != 0)\n \"\"\"\n\n self.ispec = ispec\n self.iwave = iwave\n\n self.nspectra_per_patch = nspectra_per_patch\n self.nwavestep = nwavestep\n\n #- padding to apply to patch\n self.wavepad = wavepad\n\n #- where this patch should go\n #- note: spec indexing is relative to subbundle\n self.bspecmin = bspecmin\n self.specslice = np.s_[ispec-bspecmin:ispec-bspecmin+nspectra_per_patch]\n self.waveslice = np.s_[iwave-wavepad:iwave-wavepad+nwavestep]\n\n #- how much of the patch to keep\n nwavekeep = min(nwavestep, nwave - (iwave-wavepad))\n self.keepslice = np.s_[0:nwavekeep]\n\n #- to help with reassembly\n self.nwave = nwave\n self.bundlesize = bundlesize\n self.ndiag = ndiag\n\n\ndef assemble_bundle_patches(rankresults):\n \"\"\"\n Assembles bundle patches into output arrays\n\n Args:\n rankresults: list of lists containing individual patch extraction results\n\n Returns:\n (spexflux, specivar, Rdiags) tuple\n \"\"\"\n\n #- flatten list of lists into single list\n allresults = list()\n for rr in rankresults:\n allresults.extend(rr)\n\n #- peak at result to get bundle params\n patch = allresults[0][0]\n nwave = patch.nwave\n bundlesize = patch.bundlesize\n ndiag = patch.ndiag\n\n xp = get_array_module(allresults[0][1]['flux'])\n\n #- Allocate output arrays to fill\n specflux = xp.zeros((bundlesize, nwave))\n specivar = xp.zeros((bundlesize, nwave))\n Rdiags = xp.zeros((bundlesize, 2*ndiag+1, nwave))\n\n #- Now put these into the final arrays\n for patch, result in allresults:\n fx = result['flux']\n fxivar = result['ivar']\n xRdiags = result['Rdiags']\n\n #- put the extracted patch into the output arrays\n specflux[patch.specslice, patch.waveslice] = fx[:, patch.keepslice]\n specivar[patch.specslice, patch.waveslice] = fxivar[:, patch.keepslice]\n Rdiags[patch.specslice, :, patch.waveslice] = xRdiags[:, :, patch.keepslice]\n\n return specflux, specivar, Rdiags\n\n\ndef extract_bundle(image, imageivar, psf, wave, fullwave, bspecmin, bundlesize=25, nsubbundles=1,\n nwavestep=50, wavepad=10, comm=None, gpu=None, loglevel=None):\n \"\"\"\n Extract 1D spectra from a single bundle of a 2D image.\n\n Args:\n image: full 2D array of image pixels\n imageivar: full 2D array of inverse variance for the image\n psf: dictionary psf object (see gpu_specter.io.read_psf)\n wave: 1D array of wavelengths to extract\n fullwave: Padded 1D array of wavelengths to extract\n bspecmin: index of the first spectrum in the bundle\n\n Options:\n bundlesize: fixed number of spectra per bundle (25 for DESI)\n nsubbundles: number of spectra per patch\n nwavestep: number of wavelength bins per patch\n wavepad: number of wavelengths bins to add on each end of patch for extraction\n comm: mpi communicator (no mpi: None)\n rank: integer process identifier (no mpi: 0)\n size: number of mpi processes (no mpi: 1)\n gpu: use GPU for extraction (not yet implemented)\n loglevel: log print level\n\n Returns:\n bundle: (flux, ivar, R) tuple\n\n \"\"\"\n timer = Timer()\n\n if comm is None:\n rank = 0\n size = 1\n else:\n rank = comm.rank\n size = comm.size\n\n log = get_logger(loglevel)\n\n #- Extracting on CPU or GPU?\n if gpu:\n from gpu_specter.extract.gpu import \\\n get_spots, ex2d_padded\n else:\n from gpu_specter.extract.cpu import \\\n get_spots, ex2d_padded\n\n nwave = len(wave)\n ndiag = psf['PSF'].meta['HSIZEY']\n\n timer.split('init')\n\n #- Cache PSF spots for all wavelengths for spectra in this bundle\n if gpu:\n cp.cuda.nvtx.RangePush('get_spots')\n spots, corners = get_spots(bspecmin, bundlesize, fullwave, psf)\n if gpu:\n cp.cuda.nvtx.RangePop()\n\n timer.split('spots/corners')\n\n #- Size of the individual spots\n spot_nx, spot_ny = spots.shape[2:4]\n\n #- Organize what sub-bundle patches to extract\n patches = list()\n nspectra_per_patch = bundlesize // nsubbundles\n for ispec in range(bspecmin, bspecmin+bundlesize, nspectra_per_patch):\n for iwave in range(wavepad, wavepad+nwave, nwavestep):\n patch = Patch(ispec, iwave, bspecmin,\n nspectra_per_patch, nwavestep, wavepad,\n nwave, bundlesize, ndiag)\n patches.append(patch)\n\n if rank == 0:\n log.info(f'Dividing {len(patches)} patches between {size} ranks')\n\n timer.split('organize patches')\n\n #- place to keep extraction patch results before assembling in rank 0\n results = list()\n for patch in patches[rank::size]:\n\n log.debug(f'rank={rank}, ispec={patch.ispec}, iwave={patch.iwave}')\n\n #- Always extract the same patch size (more efficient for GPU\n #- memory transfer) then decide post-facto whether to keep it all\n\n if gpu:\n cp.cuda.nvtx.RangePush('ex2d_padded')\n\n result = ex2d_padded(image, imageivar,\n patch.ispec-bspecmin, patch.nspectra_per_patch,\n patch.iwave, patch.nwavestep,\n spots, corners,\n wavepad=patch.wavepad,\n bundlesize=bundlesize)\n if gpu:\n cp.cuda.nvtx.RangePop()\n\n results.append( (patch, result) )\n\n timer.split('extracted patches')\n\n if comm is not None:\n if gpu:\n # If we have gpu and an MPI comm for this bundle, transfer data\n # back to host before assembling the patches\n patches = []\n flux = []\n fluxivar = []\n resolution = []\n for patch, results in results:\n patches.append(patch)\n flux.append(results['flux'])\n fluxivar.append(results['ivar'])\n resolution.append(results['Rdiags'])\n\n # transfer to host in 3 chunks\n cp.cuda.nvtx.RangePush('copy bundle results to host')\n device_id = cp.cuda.runtime.getDevice()\n log.info(f'Rank {rank}: Moving bundle {bspecmin} patches to host from device {device_id}')\n flux = cp.asnumpy(cp.array(flux, dtype=cp.float64))\n fluxivar = cp.asnumpy(cp.array(fluxivar, dtype=cp.float64))\n resolution = cp.asnumpy(cp.array(resolution, dtype=cp.float64))\n cp.cuda.nvtx.RangePop()\n\n # gather to root MPI rank\n patches = comm.gather(patches, root=0)\n flux = gather_ndarray(flux, comm, root=0)\n fluxivar = gather_ndarray(fluxivar, comm, root=0)\n resolution = gather_ndarray(resolution, comm, root=0)\n\n if rank == 0:\n # unpack patches\n patches = [patch for rankpatches in patches for patch in rankpatches]\n # repack everything\n rankresults = [\n zip(patches, \n map(lambda x: dict(flux=x[0], ivar=x[1], Rdiags=x[2]), \n zip(flux, fluxivar, resolution)\n )\n )\n ]\n else:\n rankresults = comm.gather(results, root=0)\n else:\n # this is fine for GPU w/out MPI comm\n rankresults = [results,]\n\n timer.split('gathered patches')\n\n bundle = None\n if rank == 0:\n if gpu:\n cp.cuda.nvtx.RangePush('assemble patches on device')\n device_id = cp.cuda.runtime.getDevice()\n log.info(f'Rank {rank}: Assembling bundle {bspecmin} patches on device {device_id}')\n bundle = assemble_bundle_patches(rankresults)\n if gpu:\n cp.cuda.nvtx.RangePop()\n if comm is None:\n cp.cuda.nvtx.RangePush('copy bundle results to host')\n device_id = cp.cuda.runtime.getDevice()\n log.info(f'Rank {rank}: Moving bundle {bspecmin} to host from device {device_id}')\n bundle = tuple(cp.asnumpy(x) for x in bundle)\n cp.cuda.nvtx.RangePop()\n timer.split('assembled patches')\n timer.log_splits(log)\n return bundle\n\n\ndef extract_frame(img, psf, bundlesize, specmin, nspec, wavelength=None, nwavestep=50, nsubbundles=1,\n comm=None, rank=0, size=1, gpu=None, loglevel=None):\n \"\"\"\n Extract 1D spectra from 2D image.\n\n Args:\n img: dictionary image object (see gpu_specter.io.read_img)\n psf: dictionary psf object (see gpu_specter.io.read_psf)\n bundlesize: fixed number of spectra per bundle (25 for DESI)\n specmin: index of first spectrum to extract\n nspec: number of spectra to extract\n\n Options:\n wavelength: wavelength range to extract, formatted as 'wmin,wmax,dw'\n nwavestep: number of wavelength bins per patch\n nsubbundles: number of spectra per patch\n comm: mpi communicator (no mpi: None)\n rank: integer process identifier (no mpi: 0)\n size: number of mpi processes (no mpi: 1)\n gpu: use GPU for extraction (not yet implemented)\n loglevel: log print level\n\n Returns:\n frame: dictionary frame object (see gpu_specter.io.write_frame)\n \"\"\"\n\n timer = Timer()\n\n log = get_logger(loglevel)\n\n #- Determine MPI communication strategy based on number of gpu devices and MPI ranks\n if gpu:\n import cupy as cp\n #- TODO: specify number of gpus to use?\n device_count = cp.cuda.runtime.getDeviceCount()\n assert size % device_count == 0, 'Number of MPI ranks must be divisible by number of GPUs'\n device_id = rank % device_count\n cp.cuda.Device(device_id).use()\n\n #- Divide mpi ranks evenly among gpus\n device_size = size // device_count\n bundle_rank = rank // device_count\n\n if device_count > 1:\n #- Multi gpu, MPI communication needs to happen at frame level\n frame_comm = comm.Split(color=bundle_rank, key=device_id)\n if device_size > 1:\n #- If multiple ranks per gpu, also need to communicate at bundle level\n bundle_comm = comm.Split(color=device_id, key=bundle_rank)\n else:\n #- If only one rank per gpu, don't need bundle level communication\n bundle_comm = None\n else:\n #- Single gpu, only do MPI communication at bundle level\n frame_comm = None\n bundle_comm = comm\n else:\n #- No gpu, do MPI communication at bundle level\n frame_comm = None\n bundle_comm = comm\n\n timer.split('init')\n\n imgpixels = imgivar = None\n if rank == 0:\n imgpixels = img['image']\n imgivar = img['ivar']\n\n #- If using MPI, broadcast image, ivar, and psf to all ranks\n if comm is not None:\n if rank == 0:\n log.info('Broadcasting inputs to other MPI ranks')\n imgpixels = comm.bcast(imgpixels, root=0)\n imgivar = comm.bcast(imgivar, root=0)\n psf = comm.bcast(psf, root=0)\n\n #- If using GPU, move image and ivar to device\n #- TODO: is there a way for ranks to share a pointer to device memory?\n if gpu:\n cp.cuda.nvtx.RangePush('copy imgpixels, imgivar to device')\n device_id = cp.cuda.runtime.getDevice()\n log.info(f'Rank {rank}: Moving image data to device {device_id}')\n imgpixels = cp.asarray(imgpixels)\n imgivar = cp.asarray(imgivar)\n cp.cuda.nvtx.RangePop()\n\n timer.split('distributed data')\n\n if wavelength is not None:\n wmin, wmax, dw = map(float, wavelength.split(','))\n else:\n wmin, wmax = psf['PSF'].meta['WAVEMIN'], psf['PSF'].meta['WAVEMAX']\n dw = 0.8\n\n if rank == 0:\n log.info(f'Extracting wavelengths {wmin},{wmax},{dw}')\n \n #- TODO: calculate this instead of hardcoding it\n wavepad = 10\n\n #- Wavelength range that we want to extract\n wave = np.arange(wmin, wmax + 0.5*dw, dw)\n nwave = len(wave)\n \n #- Pad that with buffer wavelengths to extract and discard, including an\n #- extra args.nwavestep bins to allow coverage for a final partial bin\n wavelo = np.arange(wavepad)*dw\n wavelo -= (np.max(wavelo)+dw)\n wavelo += wmin\n wavehi = wave[-1] + (1.0+np.arange(wavepad+nwavestep))*dw\n \n fullwave = np.concatenate((wavelo, wave, wavehi))\n assert np.allclose(np.diff(fullwave), dw)\n \n #- TODO: barycentric wavelength corrections\n\n #- Work bundle by bundle\n if frame_comm is None:\n bundle_start = 0\n bundle_step = 1\n else:\n bundle_start = device_id\n bundle_step = device_count\n bspecmins = list(range(specmin, specmin+nspec, bundlesize))\n bundles = list()\n for bspecmin in bspecmins[bundle_start::bundle_step]:\n log.info(f'Rank {rank}: Extracting spectra [{bspecmin}:{bspecmin+bundlesize}]')\n sys.stdout.flush()\n if gpu:\n cp.cuda.nvtx.RangePush('extract_bundle')\n bundle = extract_bundle(\n imgpixels, imgivar, psf,\n wave, fullwave, bspecmin,\n bundlesize=bundlesize, nsubbundles=nsubbundles,\n nwavestep=nwavestep, wavepad=wavepad,\n comm=bundle_comm,\n gpu=gpu\n )\n if gpu:\n cp.cuda.nvtx.RangePop()\n bundles.append((bspecmin, bundle))\n\n #- for good measure, have other ranks wait for rank 0\n if bundle_comm is not None:\n bundle_comm.barrier()\n\n timer.split('extracted bundles')\n\n if frame_comm is not None:\n # gather results from multiple mpi groups\n if bundle_rank == 0:\n bspecmins, bundles = zip(*bundles)\n flux, ivar, resolution = zip(*bundles)\n bspecmins = frame_comm.gather(bspecmins, root=0)\n flux = gather_ndarray(flux, frame_comm)\n ivar = gather_ndarray(ivar, frame_comm)\n resolution = gather_ndarray(resolution, frame_comm)\n if rank == 0:\n bspecmin = [bspecmin for rankbspecmins in bspecmins for bspecmin in rankbspecmins]\n rankbundles = [list(zip(bspecmin, zip(flux, ivar, resolution))), ]\n else:\n # no mpi or single group with all ranks\n rankbundles = [bundles,]\n\n timer.split('collected data')\n\n #- Finalize and write output\n frame = None\n if rank == 0:\n\n #- flatten list of lists into single list\n allbundles = list()\n for rb in rankbundles:\n allbundles.extend(rb)\n\n allbundles.sort(key=lambda x: x[0])\n\n specflux = np.vstack([b[1][0] for b in allbundles])\n specivar = np.vstack([b[1][1] for b in allbundles])\n Rdiags = np.vstack([b[1][2] for b in allbundles])\n\n timer.split(f'combined data')\n\n #- Convert flux to photons/A instead of photons/bin\n dwave = np.gradient(wave)\n specflux /= dwave\n specivar *= dwave**2\n\n #- TODO: specmask and chi2pix\n specmask = (specivar == 0).astype(np.int)\n chi2pix = np.ones(specflux.shape)\n\n frame = dict(\n specflux = specflux,\n specivar = specivar,\n specmask = specmask,\n wave = wave,\n Rdiags = Rdiags,\n chi2pix = np.ones(specflux.shape),\n imagehdr = img['imagehdr'],\n fibermap = img['fibermap'],\n fibermaphdr = img['fibermaphdr'],\n )\n\n timer.split(f'finished frame')\n timer.log_splits(log)\n\n return frame\n","sub_path":"py/gpu_specter/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":17165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"387396018","text":"import os\nfrom setuptools import setup\n\n# pyInteract, an interface to Responsys Interact Web Services\n# Responsys offers Interact Web Services to offer automation of offered\n# services available through the web UI.\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name = \"pyinteract\",\n version = \"0.7.8\",\n author = \"Johan Nestaas, Mason Dixon\",\n author_email = \"johan.nestaas@oracle.com, mason.dixon@oracle.com\",\n description = (\"A Python API for the SOAP Web Services offered by \"\n \"responsys.\"),\n license = \"BSD\",\n keywords = \"responsys interact marketing oracle marketing cloud\",\n url = \"https://bitbucket.org/johannestaas/responsys_pyinteract\",\n packages=['interact'],\n long_description=read('README'),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: BSD License',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Topic :: Communications :: Email',\n 'Topic :: Office/Business',\n ],\n install_requires=[\n 'suds',\n ],\n)\n","sub_path":"pypi_install_script/pyinteract-0.7.8.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"256775943","text":"from data import *\nfrom tkinter import *\nfrom datetime import date\nimport requests\nimport json\nimport locale\n\nlocale.setlocale(locale.LC_TIME, \"fi_FI\")\n\nnow = date.today()\nrest_nro = 164 #Sodexo ravintolan nro\nurl = \"https://www.sodexo.fi/ruokalistat/output/daily_json/\"+str(rest_nro) + \"/\" + str(now)\nformtd_day = now.strftime(\"%A %d. \").capitalize() + now.strftime(\"%B\").capitalize()\nbg_color = \"#EAFFE5\"#\"lightgrey\"\n\ndef menu(data, nro):\n try:\n theJSON = json.loads(data)\n if \"dietcodes\" in theJSON[\"courses\"][str(nro)]:\n return theJSON[\"courses\"][str(nro)][\"title_fi\"]+\"\\n\"+theJSON[\"courses\"][str(nro)][\"dietcodes\"]+\"\\n\"+theJSON[\"courses\"][str(nro)][\"price\"]\n return theJSON[\"courses\"][str(nro)][\"title_fi\"]+\"\\n\"+theJSON[\"courses\"][str(nro)][\"price\"]\n except:\n return 1\n \ndef get_content():\n try:\n r = requests.get(url)\n return r.content\n except:\n return \"Error on request\"\n\ndef main():\n lunch = []\n root = Tk()\n root.title(\"Ruokalista\")\n root.geometry(\"480x360+680+240\")\n root.configure(bg=bg_color)\n\n day = Label(root, text= formtd_day + \"\\n\", font=\"Arial 18 bold\", bg=bg_color)\n day.pack()\n\n if menu(get_content(), 1) != 1:\n for i in range(4):\n lunch.append(Label(root, text=str(menu(get_content(), i+1))+ \"\\n\", font=\"Arial 12\",bg=bg_color))\n lunch[i].pack()\n else: \n err_msg = Label(root, text=\"Tänään ei lounasta\", font=\"Arial 12\",bg=bg_color)\n err_msg.pack()\n\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"529647527","text":"import random\n\nm_rows, m_cols, n_cols = 2, 3, 2\n\nfile = open(\"input/in.data\", 'w+')\n\nfor r in range(1, m_rows+1):\n for c in range(1, m_cols+1):\n file.write('M#' + str(r) + '#' + str(c) + '#' + str(random.randint(0, 9)) + '\\n')\n\nfor r in range(1, m_cols+1):\n for c in range(1, n_cols+1):\n file.write('N#' + str(r) + '#' + str(c) + '#' + str(random.randint(0, 9)) + '\\n')\n\nfile.close()\n","sub_path":"02_MatrixMultiplication/gen-data.py","file_name":"gen-data.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"99305444","text":"#!/usr/bin/env python\nimport os\nimport feedparser\nimport psycopg2\nimport urlparse\n\nurlparse.uses_netloc.append(\"postgres\")\nurl = urlparse.urlparse(os.environ[\"DATABASE_URL\"])\nwith psycopg2.connect(database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port) as dbconnect:\n\tcur = dbconnect.cursor()\n\n\turl = (\n\t\t\t'http://kenvtv.com/news/local.rss',\n\t\t\t'http://news3lv.com/news/local.rss',\n\t\t\t'http://www.fox5vegas.com/category/210851/app-news?clienttype=rss',\n\t\t\t'http://www.telemundolasvegas.com/noticias/local/?rss=y&embedThumb=y&summary=y',\n\t\t\t'http://mynews4.com/news/local.rss',\n\t\t\t'http://www.kolotv.com/feeds/rss',\n\t\t\t'http://foxreno.com/news/local.rss',\n\t\t\t'http://www.ktvn.com/Global/category.asp?C=90455&clienttype=rss',\n\t\t\t'http://kdwn.com/tag/local/feed/',\n\t\t\t'http://lasvegas.cbslocal.com/category/news/feed/',\n\t\t\t'http://www.reviewjournal.com/news/las-vegas/feed',\n\t\t\t'http://elkodaily.com/search/?q=&t=article&l=25&d=&d1=&d2=&s=start_time&sd=desc&c[]=news/local*&f=rss',\n\t\t\t'http://lasvegassun.com/feeds/headlines/all/',\n\t\t\t'http://www.nevadaappeal.com/csp/mediapool/sites/SwiftShared/assets/csp/rssCategoryFeed.csp?pub=NevadaAppeal§ionId=656§ionLabel=Local',\n\t\t\t'http://rssfeeds.rgj.com/reno/news&x=1',\n\t\t\t'http://www.gvnews.com/search/?q=&t=article&l=25&d=&d1=&d2=&s=start_time&sd=desc&c[]=news/local*&f=rss',\n\t\t\t'http://pvtimes.com/taxonomy/term/1/feed',\n\t\t\t'http://www.recordcourier.com/csp/mediapool/sites/SwiftShared/assets/csp/rssCategoryFeed.csp?pub=RecordCourier§ionId=694§ionLabel=Local',\n\t\t\t'http://www.southvalleyjournal.com/categories/news.rss',\n\t\t\t'http://sparkstrib.com/category/news/feed/'\n\t\t\t)\n\n\tfor link in url:\n\t\td = feedparser.parse(link)\n\n\t\tfor data in d.entries:\n\n\n\t\t\ttitle = data.title\n\t\t\tlink = data.link\n\t\t\ttry:\n\t\t\t\ttime = data.published\n\t\t\texcept AttributeError:\n\t\t\t\ttry:\n\t\t\t\t\ttime = d.feed.published\n\t\t\t\texcept AttributeError:\n\t\t\t\t\ttime = data.updated\n\n\t\t\ttry: \n\t\t\t\timageUrl = data.links[1].href\n\t\t\texcept (IndexError, AttributeError): \n\t\t\t\ttry:\n\t\t\t\t\timageUrl = data.media_content[0]['url']\n\t\t\t\texcept (AttributeError, KeyError):\n\t\t\t\t\timageUrl = 'http://polar-spire-13485.herokuapp.com/static/img/logo3.png'\n\n\t\t\tsource = d.feed.title\n\t\t\tlocation = \"NV\"\n\n\t\t\ttry:\n\t\t\t\tcur.execute(\"\"\"INSERT INTO feeds_feeds(title, link, time, image, source, location) VALUES (%s, %s, %s, %s, %s, %s)\"\"\", (title, link, time, imageUrl, source, location))\n\t\t\t\tdbconnect.commit()\n\t\t\t\t\n\t\t\texcept psycopg2.IntegrityError:\n\t\t\t\tdbconnect.rollback()\n\n\t\t\t\t\n\t\t\t\t\n\n\t\t\t\t\n\n\t\t\t","sub_path":"feeder/states/nv.py","file_name":"nv.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"627409272","text":"from flask import Flask, request\nfrom flask_cors import CORS\nfrom classSlack import SlackApprovedService\nimport json\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\n\n@app.route(\"/slack/approved\", methods=['POST'])\ndef slack_service():\n data = json.loads(request.data)\n # slack.push(added_date, idi, description)\n\n added_date = data.get(\"added_date\")\n idi = data.get(\"id\")\n description = data.get(\"description\")\n slack.push_test(added_date, idi, description)\n\n return \"Success\"\n\n\nif __name__ == '__main__':\n slack = SlackApprovedService(\"config.yaml\")\n # slack.message(\"START -> slack-approved-service\")\n app.run(host=slack.host, port=slack.port)\n","sub_path":"slack-service-approved.py","file_name":"slack-service-approved.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"300946679","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# author: syaofox@gmail.com\nfrom PIL import Image, ImageChops\nfrom io import BytesIO\n\n\nclass VSMImage:\n @classmethod\n def trim(cls, im):\n try:\n bg = Image.new(im.mode, im.size, im.getpixel((0, 0)))\n diff = ImageChops.difference(im, bg)\n diff = ImageChops.add(diff, diff, 2.0, -100)\n bbox = diff.getbbox()\n if bbox:\n return im.crop(bbox)\n except Exception:\n return im\n\n @classmethod\n def rotate_image(cls, in_bytes):\n try:\n out = BytesIO()\n stream = BytesIO(in_bytes)\n im = Image.open(stream)\n img2 = im.transpose(Image.ROTATE_90)\n img2.save(out, format='JPEG')\n return out.getvalue()\n except Exception:\n return in_bytes\n\n @classmethod\n def tim_img_bytes(cls, in_bytes):\n try:\n out = BytesIO()\n stream = BytesIO(in_bytes)\n im = Image.open(stream)\n im = cls.trim(im)\n if not im:\n return in_bytes\n im.save(out, format='JPEG')\n return out.getvalue()\n except:\n return in_bytes\n\n @classmethod\n def create_poster(cls, in_bytes, middle=False):\n try:\n out = BytesIO()\n stream = BytesIO(in_bytes)\n im = Image.open(stream)\n im = cls.trim(im)\n if not im:\n return None\n if im.size[0] < im.size[1]:\n im.save(out, format='JPEG')\n return out.getvalue()\n if middle:\n pos = im.size[0] // 4\n else:\n pos = im.size[0] * 420 // 800\n box = pos, 0, im.size[0], im.size[1]\n region = im.crop(box)\n region.save(out, format='JPEG')\n return out.getvalue()\n except Exception:\n return None\n\n @classmethod\n def IsValidImage(cls, indata):\n bValid = True\n buf = indata\n if isinstance(indata, bytes):\n buf = BytesIO(indata)\n\n try:\n Image.open(buf).verify()\n except:\n bValid = False\n\n return bValid\n\n @classmethod\n def merge_image(cls, poster, bakdrop):\n try:\n out = BytesIO()\n\n stream = BytesIO(poster)\n im_poster = Image.open(stream)\n\n stream = BytesIO(bakdrop)\n im_bakdrop = Image.open(stream)\n\n im_bakdrop = im_bakdrop.resize(im_poster.size, Image.ANTIALIAS)\n\n new_im = Image.new('RGB', (im_poster.size[0] * 2, im_poster.size[1]))\n\n new_im.paste(im_bakdrop, (0, 0))\n new_im.paste(im_poster, (im_poster.size[0], 0))\n\n new_im.save(out, format='JPEG')\n return out.getvalue()\n except Exception:\n pass\n","sub_path":"models/imageEditor.py","file_name":"imageEditor.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"548464404","text":"from django.db import connection\n\n\nclass SQLCountMiddleware(object):\n\n def process_template_response(self, request, response):\n queries = connection.queries\n sql_queries_count = len(queries)\n sql_queries_time = 0.0\n for x in queries:\n sql_queries_time += float(x['time'])\n\n response.context_data['sql_count'] = sql_queries_count\n response.context_data['sql_time'] = sql_queries_time\n return response\n","sub_path":"students/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157974343","text":"#coding=utf-8\nimport time\nimport paramiko\nimport random\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom sys_info import *\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n#电话弹屏创建警情立案-调派-结案\noptions = Options()\noptions.add_argument(\"--kiosk\") # 加载启动项页面全屏效果,相当于F11。\noptions.add_experimental_option(\"excludeSwitches\", ['enable-automation']) # 禁止谷歌弹出正在被自动化软件控制消息\ndriver = webdriver.Chrome(r\"E:\\lijie\\chromedriver.exe\", 0, options=options,keep_alive=True)\ndriver.get(\"http://192.168.7.7/ers-web/#/\")\ntime.sleep(2)\ndriver.find_element_by_xpath(\"//div/div[2]/form/div[1]/div/div[1]/input\").send_keys(\"kunming001\")\ndriver.find_element_by_xpath(\"//div/div[2]/form/div[2]/div/div[1]/input\").send_keys(\"keda123!\")\ndriver.find_element_by_css_selector(\"#keybtn\").click()\ndriver.implicitly_wait(10)\nall_handles = driver.window_handles\ndriver.switch_to.window(all_handles[-1])\ndriver.maximize_window()\ntime.sleep(3)\ndriver.refresh()\ntime.sleep(3)\n#print(driver.window_handles)\ni = 1\nwhile (i < 10000):\n\n try:\n ssh_client = paramiko.SSHClient()\n ssh_client.load_system_host_keys()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect('192.168.7.38', 22, 'root', 'kedacom888')\n std_in, std_out, std_err = ssh_client.exec_command('cd /opt/sipp-3.4.0/;sh uac_sendbye.sh 17751237537 1009 5080', get_pty=True)\n # 在command命令最后加上 get_pty=True,执行多条命令 的话用;隔开,另外所有命令都在一个大的单引号范围内引用\n std_in.write('PWD' + '\\n') # 执行输入命令,输入sudo命令的密码,会自动执行\n time.sleep(2)\n driver.find_element_by_xpath('//*[@id=\"app\"]/div/div[3]/div/div[3]/div/div/div').click() # 点击接听\n for line in std_out:\n print(line.strip('\\n'))\n ssh_client.close()\n except:\n print(\"voice send fail!\")\n\n #电话录音文件\n driver.implicitly_wait(20)\n j = random.randint(10000,50000)\n\n driver.find_element_by_xpath(\"//div/form/div[1]/div/div/div/div[1]/div[1]/div[1]/input\").send_keys(\"昆明长水国际机场\")\n time.sleep(3)\n driver.find_element_by_xpath(\"//div/form/div[1]/div/div/div/div[1]/div[1]/div[2]/div[1]/div[1]/ul/li[1]/span[1]\").click() # 选择地址\n time.sleep(3)\n\n driver.find_element_by_xpath(\"//div/form/div[3]/div[2]/div/div/div/div[1]/span/span/i\").click() # 下拉\n time.sleep(3)\n driver.find_element_by_xpath(\"//div/div/div/div[2]/div/div[1]/ul/div/div[1]/div/span[2]\").click() # 易燃易爆\n time.sleep(3)\n driver.find_element_by_xpath(\"/html/body/div[1]/div/div[2]/div/div[2]/div/div[1]/div/div[2]/section/div/div[2]/div/form/div[4]/div[2]/span\").click() # 案件描述自动生成\n time.sleep(2)\n driver.find_element_by_xpath(\"//div/form/div[7]/div[1]/div/div/div/input\").send_keys(\"188565\" + str(j)) # 报警电话\n time.sleep(2)\n driver.find_element_by_xpath(\"//div/form/div[8]/div[1]/div/div/div/input\").send_keys(\"张国庆\" + str(j)) # 报警人姓名\n time.sleep(2)\n\n driver.find_element_by_xpath(\"/html/body/div[1]/div/div[2]/div/div[2]/div/div[1]/div/div[3]/div/div/div/div/div[1]/div[1]/div/span[1]/span\").click() # 自定义调派\n time.sleep(2)\n driver.find_element_by_xpath(\"/html/body/div[1]/div/div[2]/div/div[2]/div/div[1]/div/div[3]/div/div/div/div/div[1]/div[2]/div[2]/div[3]/div/div[2]/div/ul[2]/li[1]/div[2]/span\").click() # 选择队伍\n time.sleep(2)\n driver.find_element_by_xpath(\"/html/body/div[1]/div/div[2]/div/div[2]/div/div[1]/div/div[2]/section/div/div[2]/div/div[1]/div/div[1]/span\").click() # 立案\n # driver.find_element_by_xpath(\"/html/body/div[1]/div/div[2]/div/div[2]/div/div[1]/div/div[2]/section/div/div[2]/div/div[1]/div/div[2]/span\").click() #存草稿\n driver.implicitly_wait(30)\n driver.find_element_by_xpath(\"/html/body/div[1]/div/div[2]/div/div[2]/div/div[1]/div/div[1]/div/div[2]/div/div/div[2]/div/div[1]/div/div[1]/label\").click()#当前灾情\n time.sleep(3)\n driver.find_element_by_xpath(\"/html/body/div[1]/div/div[2]/div/div[2]/div/div[1]/div/div[2]/div/div[2]/div/div/div[3]/div[1]/div[2]\").click() # 编辑\n time.sleep(3)\n driver.find_element_by_id(\"select\").click()\n time.sleep(3)\n driver.find_element_by_xpath(\"/html/body/div[last()]/div/div[1]/ul/li[last()]/span\").click() # 结案\n time.sleep(4)\n\n driver.find_element_by_xpath(\"/html/body/div[1]/div/div[1]/div/div/div[2]/ul/li[1]/span[2]\").click()\n time.sleep(3)\n print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))\n print(i)\n print(\"cpu利用率: \" + str(get_cpu_info())+\"%\")\n print(get_memory_info())\n i = i + 1\nprint(\"Test Pass!\")\ndriver.quit()","sub_path":"UItest/Case/接处警2.0/语音+调度.py","file_name":"语音+调度.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"87443808","text":"from tkinter import *\n\nclass LoanCalculator:\n def __init__(self):\n window = Tk()\n window.title(\"대출 계산기\")\n\n Label(window, text = \"연이율\").grid(row = 1, column = 1, sticky = W)\n Label(window, text = \"대출년수\").grid(row = 2, column = 1, sticky = W)\n Label(window, text = \"대출금\").grid(row = 3, column = 1, sticky = W)\n Label(window, text = \"월상환금\").grid(row = 4, column = 1, sticky = W)\n Label(window, text = \"총상환금\").grid(row = 5, column = 1, sticky = W)\n\n self.annualInterestRateVar = StringVar()\n Entry(window, textvariable = self.annualInterestRateVar, justify = RIGHT).grid(row = 1, column = 2)\n self.numberOfYearsVar = StringVar()\n Entry(window, textvariable = self.numberOfYearsVar, justify = RIGHT).grid(row = 2, column = 2)\n self.loanAmountVar = StringVar()\n Entry(window, textvariable = self.loanAmountVar, justify = RIGHT).grid(row = 3, column = 2)\n self.monthlyPaymentVar = StringVar()\n lblMonthlyPayment = Label(window, textvariable = self.monthlyPaymentVar).grid(row = 4, column = 2, sticky = E)\n self.totalPatmentVar = StringVar()\n lblTotalPaymentVar = Label(window, textvariable = self.totalPatmentVar).grid(row = 5, column = 2, sticky = E)\n btComputePayment = Button(window, text = \"상환금 계산하기\", command = self.computePayment).grid(row = 6, column = 2, sticky = E)\n\n window.mainloop()\n\n def computePayment(self):\n monthlyPayment = self.getMonthlyPayment(\n float(self.laonAmount.get()), float(self.annualInterestRateVar.get()) / 1200, int(self.numberOfYearsVar.get()))\n self.monthlyPaymentVar.set(format(monthlyPayment, \"10.2f\"))\n totalPayment = float(self.monthlyPaymentVar.get()) * 12 * int(self.numberOfYearsVar.get())\n self.totalPatmentVar.set(format(totalPayment, \"10.2f\"))\n\n def getMonthlyPayment(self, loanAmount, monthlyInterestRate, numberOfYears):\n monthlyPayment = loanAmount * monthlyInterestRate / (1 - 1 / (1 + monthlyInterestRate)**(numberOfYears * 12))\n return monthlyPayment\n\nLoanCalculator()","sub_path":"PythonProgramming/cp09/LoanCalculator.py","file_name":"LoanCalculator.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343849167","text":"def levelOrder(self, root: TreeNode) -> List[List[int]]:\n \n # iterative solution is easier than recursion (bfs)\n stack = [root]\n levels = []\n while stack:\n nstack = stack\n stack = []\n level = []\n\n # add to new stack left and right of each cur node if it is not None\n while nstack:\n cur = nstack.pop(0)\n if cur is not None:\n level.append(cur.val)\n stack.append(cur.left)\n stack.append(cur.right)\n if level != []:\n levels.append(level)\n return levels","sub_path":"binarytreelevelordertraversal.py","file_name":"binarytreelevelordertraversal.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122820310","text":"from __future__ import unicode_literals\n\nimport csv\nimport logging\n\nfrom reports.serialize import to_simple\nfrom django.utils.encoding import smart_text, force_text\n\n\nlogger = logging.getLogger(__name__)\n\nLIST_DELIMITER_CSV = ';'\n\ndef from_csv(csvfile, list_delimiter=LIST_DELIMITER_CSV, list_keys=None):\n '''\n Returns an in memory matrix (array of arrays) for the input file\n \n @param list_keys overrides nested list eval for column keys; no brackets '[]' are \n needed to denote these columns as list columns - however, to comply with \n the csv standard, they still have to be quoted (if list_delimiter=csv_delimiter)\n NOTES: \n - nested lists are denoted by brackets, i.e. '[]',\n - to escape use '\\[...' (i.e. when embedding a regex expression)\n TODO: version 2 - read from a stream\n '''\n reader = csv.reader(csvfile)\n return from_csv_iterate(reader, list_delimiter=list_delimiter, list_keys=list_keys)\n \ndef csv_generator(iterable, list_delimiter=LIST_DELIMITER_CSV, list_keys=None):\n list_keys = list_keys or []\n list_keys = list(list_keys)\n i = 0 \n for row in iterable:\n if i == 0:\n keys = [x for x in row]\n else:\n item = dict(zip(keys,row))\n for key in item.keys():\n val = item[key]\n if val and len(val)> 1:\n if val[0] == '\\\\' and val[1] == '[':\n # this could denote an escaped bracket, i.e. for a regex\n item[key] = val[1:]\n elif key in list_keys or val[0]=='[':\n # due to the simplicity of the serializer, above, any \n # quoted string is a nested list\n list_keys.append(key)\n item[key] = [\n x.strip() \n for x in val.strip('\"[]').split(list_delimiter)]\n yield item\n i += 1\n logger.debug('read in data, count: %d', i ) \n \ndef from_csv_iterate(iterable, list_delimiter=LIST_DELIMITER_CSV, list_keys=None):\n '''\n Returns an in memory array of dicts for the iterable, representing a \n csv-like input matrix.\n - the first row is interpreted as the dict keys, unless a list_keys param is \n specified \n '''\n list_keys = list_keys or []\n data_result = []\n i = 0\n keys = []\n list_keys = list(list_keys) \n logger.debug('list_keys: %r', list_keys)\n for row in iterable:\n if i == 0:\n keys = [x for x in row]\n else:\n item = dict(zip(keys,row))\n for key in item.keys():\n val = item[key]\n if val and len(val)> 1:\n if val[0] == '\\\\' and val[1] == '[':\n # this could denote an escaped bracket, i.e. for a regex\n item[key] = val[1:]\n elif key in list_keys or val[0]=='[':\n # due to the simplicity of the serializer, above, any \n # quoted string is a nested list\n list_keys.append(key)\n item[key] = []\n for x in val.strip('\"[]').split(list_delimiter):\n x = x.strip()\n if x:\n item[key].append(x)\n data_result.append(item)\n i += 1\n logger.debug('read in data, count: ' + str(len(data_result)) ) \n return data_result\n\ndef string_convert(val):\n return csv_convert(val, delimiter=',')\n\ndef dict_to_rows(_dict):\n ''' Utility that converts a dict into a table for writing to a spreadsheet\n '''\n \n logger.debug('_dict: %r', _dict)\n values = []\n if isinstance(_dict, dict):\n for key,val in _dict.items():\n for row in dict_to_rows(val):\n if not row:\n values.append([key,None])\n else:\n keyrow = [key]\n if isinstance(row, basestring):\n keyrow.append(row)\n else:\n keyrow.extend(row)\n values.append(keyrow)\n else:\n values = (csv_convert(_dict),)\n return values\n\ndef csv_convert(val, delimiter=LIST_DELIMITER_CSV, list_brackets='[]'):\n delimiter = delimiter + ' '\n if isinstance(val, (list,tuple)):\n if list_brackets:\n return ( list_brackets[0] \n + delimiter.join([smart_text(to_simple(x)) for x in val]) \n + list_brackets[1] )\n else: \n return delimiter.join([smart_text(to_simple(x)) for x in val]) \n elif val != None:\n if type(val) == bool:\n if val:\n return 'TRUE'\n else:\n return 'FALSE'\n else:\n return force_text(to_simple(val))\n else:\n return None\n","sub_path":"reports/serialize/csvutils.py","file_name":"csvutils.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"228307798","text":"import boto3\nimport json\n\nregion = 'us-west-1'\nclient = boto3.client('config', region)\nssm = boto3.client('ssm', region)\namis = ssm.get_parameter(Name=\"/GoldenAMI/latest\")['Parameter']['Value']\nprint(type(amis))\nupdate_ami = {}\nupdate_ami['amiIds'] = amis\njson_update_ami = json.dumps(update_ami)\nprint(json_update_ami)\n\nresponse = client.put_config_rule(\n ConfigRule={\n 'ConfigRuleArn': 'arn:aws:config:us-west-1:811284348584:config-rule/config-rule-suojze',\n 'Source': {\n 'Owner': 'AWS',\n 'SourceIdentifier': 'APPROVED_AMIS_BY_ID',\n },\n 'InputParameters': json_update_ami\n # 'MaximumExecutionFrequency': 'One_Hour'|'Three_Hours'|'Six_Hours'|'Twelve_Hours'|'TwentyFour_Hours',\n },\n)","sub_path":"testbed/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15947549","text":"\"\"\"\nA perfect number is a number for which the sum of its proper divisors is exactly equal to the number. For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.\n\nA number n is called deficient if the sum of its proper divisors is less than n and it is called abundant if this sum exceeds n.\n\nAs 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be written as the sum of two abundant numbers is 24. By mathematical analysis, it can be shown that all integers greater than 28123 can be written as the sum of two abundant numbers. However, this upper limit cannot be reduced any further by analysis even though it is known that the greatest number that cannot be expressed as the sum of two abundant numbers is less than this limit.\n\nFind the sum of all the positive integers which cannot be written as the sum of two abundant numbers.\n\n\"\"\"\n\nimport math\n\n# Improved sum of divisor function\ndef sum_of_divisor(n):\n sum = 1\n p = 2\n while p*p <= n and n > 1:\n if n % p == 0:\n j = p * p\n n //= p\n while n % p == 0:\n j *= p\n n //=p\n sum *= (j - 1)\n sum //= (p - 1)\n if p == 2:\n p = 3 \n else:\n p += 2\n if n > 1:\n sum = sum * (n + 1)\n return sum\n\n# Sum of proper divisor (proper means with out itself :-))\n# as decribed in the solution manual\ndef sum_of_proper_divisors(n):\n return sum_of_divisor(n) - n\n\n\ndef main():\n # Bruteforce solution\n\n # Step 1: Calculate all abundanten number less or equal to 28123\n abundanten_numbers = []\n for i in range(1, 28123 + 1):\n if sum_of_proper_divisors(i) > i: \n abundanten_numbers.append(i)\n\n\n # Step 2: Cross out all number which are sums of abundant numbers\n is_no_abundant_sum = [True] * (28123+1)\n\n for i in range(0, len(abundanten_numbers)):\n for j in range(0, len(abundanten_numbers)):\n if (abundanten_numbers[i] + abundanten_numbers[j]) <= 28123:\n is_no_abundant_sum[abundanten_numbers[i] + abundanten_numbers[j]] = False\n\n # Step 3: Sum up all not crossed numbers \n sum = 0\n for i in range(1,28123+1):\n if is_no_abundant_sum[i] == True:\n sum += i\n\n print(sum)\n\nif __name__ == '__main__':\n main() ","sub_path":"Python/23Non-abundantSums.py","file_name":"23Non-abundantSums.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"493887066","text":"from rlberry.envs.classic_control import MountainCar\nfrom rlberry.wrappers import DiscretizeStateWrapper\n\ncont_env = MountainCar()\nenv = DiscretizeStateWrapper(cont_env, 10) # 10 bins per dimension\n\nprint(cont_env.observation_space)\nprint(env.observation_space)\nprint(\"reset in discrete environment gives initial state = \", env.reset())\n\nenv.enable_rendering()\nfor tt in range(20):\n next_s, _, _, _ = env.step(env.action_space.sample())\n env.sample(54, 1)\nenv.render()\n","sub_path":"examples/demo_discretize_state.py","file_name":"demo_discretize_state.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"480026274","text":"from packages.core.utils.singleton import SingletonClass as Singleton\nfrom packages.core.utils.app_loop import AppLoop\n\nimport itertools\nimport json\nimport asyncio\nimport re\nimport subprocess\nimport aiohttp\nimport logging\nimport sys\nimport concurrent\nfrom random import randint\n\nclass WebClient(metaclass=Singleton):\n\n def __init__(self, *args, **kwargs):\n self.all_sessions = None\n self.lock = asyncio.Lock()\n # faster\n # http://azenv.net/\n # http://httpheader.net/azenv.php\n # http://proxyjudge.us/azenv.php\n # http://www.proxyfire.net/fastenv\n\n # medium\n # http://httpbin.org/get?show_env\n # http://www.sbjudge3.com/azenv.php\n # https://httpbin.org/get?show_env\n\n # > 0.2 sec\n # http://www.proxy-listen.de/azenv.php\n # https://www.proxy-listen.de/azenv.php\n # http://www.sbjudge2.com/azenv.php\n # http://www.proxyjudge.info/azenv.php\n\n # ?\n # https://api.ipify.org?format=json\n # http://ip-api.com/json\n # http://httpbin.org/ip\n\n self.url_judges = (\"http://azenv.net/\", \"http://httpheader.net/azenv.php\")\n\n async def internet_check(self, session, skip=False):\n if skip:\n public_ip = session._connector._local_addr[0]\n self.ip_publics.append(public_ip)\n return session\n for url_judge in self.url_judges:\n async with session.get(url_judge, timeout=20) as resp:\n if resp:\n resp = await resp.text()\n public_ip = re.findall(r\"\\d+\\.\\d+\\.\\d+\\.\\d+\", resp)\n\n if public_ip not in self.ip_publics:\n self.ip_publics.append(public_ip)\n return session\n logging.getLogger('log_print').error(\n f\"internet_check error con: url_judge: {url_judge}, {session._connector._local_addr[0]}\"\n )\n\n await session.close()\n return\n\n async def starts(self):\n try:\n cmd = r\"ip -o -4 addr show|grep ' en\\| eth\\| wl'|awk '{print $4}'|cut -d/ -f1\" # deja solo las redes : \"enp|eth\" sin vpn sin docker\n ps = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n ips = ps.communicate()[0].decode().strip().split()\n except Exception as e: # si es windows\n logging.getLogger('log_print').error(f\"Error, {e}, es windows?\")\n ips = [\"0.0.0.0\"]\n if not ips:\n raise Exception(\"no hay ips de salida disponibles\")\n\n self.sessions = []\n self.ip_publics = []\n coros = []\n for ip in ips:\n conn = aiohttp.connector.TCPConnector(\n local_addr=(ip, 0), limit=300, loop=AppLoop().get_loop()\n )\n session = AutoRetrySession(\n connector=conn,\n headers={\n \"User-Agent\": \"Mozilla/5.0 (compatible MSIE 9.0 Windows NT 6.1 Trident/5.0)\"\n },\n )\n coros.append(self.internet_check(session, skip=len(ips) > 10))\n\n self.sessions = filter(None, await asyncio.gather(*coros))\n if len(self.ip_publics) > 0:\n logging.getLogger(\"log_print\").info(\n f\"Usando {len(self.ip_publics)} Ips_rotativas\"\n )\n else:\n raise Exception(\n f\"Error, no hay ips disponibles con internet testeado con: {self.url_judges}\"\n )\n exit()\n\n self.all_sessions = self.get_all_sessions()\n\n async def do_request(\n self,\n uri:str,\n rq_type=\"get\",\n payload=dict(),\n params=None,\n return_data=\"json\",\n headers=dict(),\n cookies=dict(),\n **kwargs,\n ):\n if payload:\n payload = json.dumps(payload)\n\n max_reintents = 30\n i = 0\n while i < max_reintents: # max_reintents por si no recibe un Json\n i += 1\n async with (await self.get_session()).__getattribute__(rq_type)(\n uri,\n data=payload,\n params=params,\n headers=headers,\n verify_ssl=False,\n cookies=cookies\n ) as resp:\n if not resp:\n logging.getLogger(\"log_print\").error(\n f\"not resp {rq_type} {uri} {payload} {params}\"\n )\n return\n try:\n res_json = {}\n if return_data == \"json\":\n if resp.content_type == \"text/html\":\n logging.getLogger(\"log_print\").error(\n f\"{rq_type} {resp.status} {uri} \\\n error jot json response: {await resp.text()} \"\n )\n return\n\n res_json = await resp.json()\n if isinstance(res_json, (list, dict)):\n final_res = res_json\n elif return_data == \"text\":\n res_text = await resp.text()\n if res_text:\n final_res = res_text\n elif return_data is None:\n final_res = None\n\n if resp.status in (200, 201, 206):\n if isinstance(final_res, list) and any(\n 1 for i in final_res if i.get(\"code\") == 500 or i.get(\"status\") == 500\n ):\n logging.getLogger(\"log_print\").debug(\n f\"{rq_type}, {resp.status}, {resp.url}, some one with code:500\"\n )\n continue\n return final_res\n elif resp.status in (403,): # 403 = Forbidden (meli day limit)\n return final_res\n elif resp.status in (\n 429,\n 500,\n 501,\n 502,\n 409,\n 504,\n ): # 504=not found(temporaly), 409 =optimistic locking, 429 = toomany request\n if 0 < i < 5:\n logging.getLogger(\"log_print\").debug(\n f\"{rq_type} {resp.status} retrying No-{i} , too quikly? {0.2 * i}\"\n )\n elif 5 < i:\n logging.getLogger(\"log_print\").info(\n f\"{rq_type} {resp.status} retrying No-{i} , too quikly? {0.2 * i}\"\n )\n await asyncio.sleep(0.2 * i)\n continue\n # elif resp.status == 401: # expired_token\n # print(\n # f\"{rq_type} status 401, expired_token, forcing to refresh, {resp.url}\"\n # )\n # token = await self.get_token(force=True)\n # params[\"access_token\"] = token\n # max_reintents /= 3\n # continue\n elif resp.status in (404, 400, 401) and isinstance(res_json, dict):\n logging.getLogger(\"log_print\").debug(\n f\"{rq_type}, {resp.status}, {resp.url}, {res_json.get('message')}, {res_json.get('cause')}\"\n )\n return final_res\n else:\n if res_json:\n logging.getLogger(\"log_print\").info(\n f\"{rq_type}, {resp.status}, {resp.url}, -{await resp.text()}-\"\n )\n return final_res\n except Exception as e:\n logging.getLogger(\"log\").error(\n f\"Error on {rq_type} return_data:{return_data} {uri}, {e}\"\n )\n await asyncio.sleep(0.5)\n continue\n\n async def get(\n self, uri, payload={}, params=None, return_data=\"json\", headers={}, **kwargs\n ):\n return await self.do_request(\n rq_type=\"get\",\n uri=uri,\n payload=payload,\n params=params,\n return_data=return_data,\n headers=headers,\n **kwargs,\n )\n\n async def post(\n self, uri, payload={}, params=None, return_data=\"json\", headers={}, **kwargs\n ):\n return await self.do_request(\n rq_type=\"post\",\n uri=uri,\n payload=payload,\n params=params,\n return_data=return_data,\n headers=headers,\n **kwargs,\n )\n\n async def put(\n self, uri, payload={}, params=None, return_data=\"json\", headers={}, **kwargs\n ):\n return await self.do_request(\n rq_type=\"put\",\n uri=uri,\n payload=payload,\n params=params,\n return_data=return_data,\n headers=headers,\n **kwargs,\n )\n\n async def delete(\n self, uri, payload={}, params=None, return_data=\"json\", headers={}, **kwargs\n ):\n return await self.do_request(\n rq_type=\"delete\",\n uri=uri,\n payload=payload,\n params=params,\n return_data=return_data,\n headers=headers,\n **kwargs,\n )\n\n async def get_session(self):\n with await self.lock:\n if not self.all_sessions:\n await self.starts()\n return self.session\n\n @property\n def session(self):\n return next(self.all_sessions)\n\n def get_all_sessions(self):\n positions = itertools.cycle(self.sessions)\n for session in itertools.islice(\n positions, randint(0, len(self.ip_publics)), None\n ):\n yield session\n\nasync def retry_if_disconect(function, *args, **kwargs):\n for i in range(1, 16):\n try:\n return await function(*args, **kwargs)\n except asyncio.TimeoutError: # do not retry if timeout\n error = f\"{function.__name__}:error Timeout = {args}\"\n logging.getLogger(\"log_print\").debug(error)\n return\n except (\n aiohttp.client_exceptions.ClientConnectorError,\n aiohttp.client_exceptions.ServerDisconnectedError,\n aiohttp.client_exceptions.ClientResponseError,\n concurrent.futures.CancelledError,\n ):\n local_ip = function.__self__._connector._local_addr[0]\n logging.getLogger(\"log_print\").warning(\n f\"{function.__name__}: AutoRetrySession sleep({round(0.1 * i, 2)}),{sys.exc_info()[0]}, {args} {local_ip}\"\n )\n await asyncio.sleep(round(0.1 * i, 2))\n except aiohttp.client_exceptions.InvalidURL:\n error = f\"error InvalidURL = {args}\"\n raise Exception(error)\n except Exception as e:\n logging.getLogger('log_print').error(f\"Unexpected error ({e}):\", sys.exc_info()[0])\n\nclass GetRetry:\n def __init__(self, function, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n self.resp = None\n self.function = function\n\n async def __aenter__(self):\n self.resp = await retry_if_disconect(self.function, *self.args, **self.kwargs)\n return self.resp\n\n async def __aexit__(self, exc_type, exc, tb):\n if self.resp:\n await self.resp.release()\n\nclass AutoRetrySession(aiohttp.ClientSession):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def get(self, *args, **kwargs):\n return GetRetry(super().get, *args, **kwargs)\n\n def post(self, *args, **kwargs):\n return GetRetry(super().post, *args, **kwargs)\n\n def put(self, *args, **kwargs):\n return GetRetry(super().put, *args, **kwargs)\n\n def delete(self, *args, **kwargs):\n return GetRetry(super().delete, *args, **kwargs)","sub_path":"packages/core/utils/web_client.py","file_name":"web_client.py","file_ext":"py","file_size_in_byte":12602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164200274","text":"\"\"\"\n Colorear una serie de cuadrados de distintos colores, cada uno dentro del otro.\n\"\"\"\n\nimport turtle\nfrom random import randint\nfrom math import hypot\n\ns = turtle.Screen()\nt = turtle.Turtle()\ns.bgcolor('black')\nt.hideturtle()\nt.speed(0)\n\n\ndef color_generator():\n \"\"\"Genera un color hexadecimal aleatorio\"\"\"\n return '#%02X%02X%02X' % (randint(0, 255), randint(0, 255), randint(0, 255))\n\n\ndef poligono(vertices, lado):\n \"\"\"Pinta un polígono regular para un lado y número de lados dado por el usuario\"\"\"\n originhead = t.heading()\n t.setheading(90)\n t.down()\n for i in range(vertices * 2):\n t.forward(lado // 2)\n if i % 2 == 0:\n t.right(360 / vertices)\n t.up()\n t.setheading(originhead)\n\n\ndef cuadrados_coloreados(min_size, num_cuadrados):\n \"\"\"Pinta una serie de cuadrados concéntricos de distinto color, cada uno más pequeño que el anterior\"\"\"\n for i in range(num_cuadrados, 1, -1):\n t.up()\n t.setpos(0, 0)\n apotema = min_size * (i * 0.5)\n lado = apotema // 2\n print(\"Apotema \" + str(apotema) + \" - Lado \" + str(lado))\n t.setpos(-lado // 2, 0)\n t.begin_fill()\n t.fillcolor(color_generator())\n poligono(4, lado)\n t.end_fill()\n\n\ncuadrados_coloreados(150, 55)\n\nturtle.done()\n","sub_path":"Ejercicios/SeguTrim/Ejercicio7/Subpaq2/Ejercicio4.py","file_name":"Ejercicio4.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"578818917","text":"import re\nfrom pkg_resources import iter_entry_points\n\n\ndef resolve_uri(uri):\n \"\"\"\n Returns a tuple, (factory, dbkw) where factory is a no-arg callable which\n returns a storage matching the spec defined in the uri. dbkw is a dict of\n keyword arguments that may be passed to ZODB.DB.DB.\n \"\"\"\n factory, dbkw = _resolve_uri(uri)\n return factory, _get_dbkw(dbkw)\n\n# _resolve_uri serves resolve_uri: it returns factory and original raw dbkw.\ndef _resolve_uri(uri):\n scheme = uri[:uri.find(':')]\n for ep in iter_entry_points('zodburi.resolvers'):\n if ep.name == scheme:\n resolver = ep.load()\n factory, dbkw = resolver(uri)\n return factory, dbkw\n else:\n raise KeyError('No resolver found for uri: %s' % uri)\n\nconnection_parameters = '''\n pool_size pool_timeout cache_size cache_size_bytes\n historical_pool_size historical_cache_size historical_cache_size_bytes\n historical_timeout large_record_size\n '''.strip().split()\n\nbytes_parameters = (\n 'cache_size_bytes', 'historical_cache_size_bytes', 'large_record_size')\n\nparameters = dict(database_name = 'database_name')\nfor parameter in connection_parameters:\n parameters['connection_' + parameter] = parameter\n\nhas_units = re.compile(r'\\s*(\\d+)\\s*([kmg])b\\s*$').match\nunits = dict(k=1<<10, m=1<<20, g=1<<30)\ndef _parse_bytes(s):\n m = has_units(s.lower())\n if m:\n v, uname = m.group(1, 2)\n return int(v) * units[uname]\n else:\n return int(s)\n\ndef _get_dbkw(kw):\n dbkw = {\n 'cache_size': 10000,\n 'pool_size': 7,\n 'database_name': 'unnamed',\n }\n for parameter in parameters:\n if parameter in kw:\n v = kw.pop(parameter)\n if parameter.startswith('connection_'):\n if not isinstance(v, int):\n if parameters[parameter] in bytes_parameters:\n v = _parse_bytes(v)\n else:\n v = int(v)\n dbkw[parameters[parameter]] = v\n\n if kw:\n raise KeyError('Unrecognized database keyword(s): %s' % ', '.join(kw))\n\n return dbkw\n","sub_path":"zodburi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"619664506","text":"import json\nimport re\n\n\ndef get_business_id_to_categories_dict():\n business_id_to_categories = {}\n with open('../data/yelp/yelp_academic_dataset_business.json', encoding='utf-8') as f:\n for line in f:\n old_dict = json.loads(line)\n business_id_to_categories[old_dict['business_id']] = old_dict['categories']\n return business_id_to_categories\n\n\ndef save_to_json(business_id_to_categories):\n with open('../data/yelp/review_and_categories.json', 'w', encoding='utf-8') as f1:\n with open('../data/yelp/yelp_academic_dataset_review.json', encoding='utf-8') as f2:\n for line in f2:\n old_dict = json.loads(line)\n\n text = re.sub('[\\n ]+', ' ', old_dict['text'])\n categories = business_id_to_categories[old_dict['business_id']]\n\n new_dict = {\n # 'business_id': old_dict['business_id'],\n 'text': text,\n 'categories': categories\n }\n f1.write(json.dumps(new_dict) + '\\n')\n\n\ndef save_to_txt(business_id_to_categories):\n with open('../data/yelp/reviews.txt', 'w', encoding='utf-8') as f1:\n with open('../data/yelp/labels.txt', 'w', encoding='utf-8') as f2:\n with open('../data/yelp/yelp_academic_dataset_review.json', encoding='utf-8') as f3:\n for line in f3:\n old_dict = json.loads(line)\n\n text = re.sub('[\\n ]+', ' ', old_dict['text'])\n categories = business_id_to_categories[old_dict['business_id']]\n categories = '' if categories is None else categories\n f1.write(text + '\\n')\n f2.write(categories + '\\n')\n\n\ndef parse_json():\n # business_id_to_categories = get_business_id_to_categories_dict()\n # save_to_txt(business_id_to_categories)\n\n categories_set = set()\n with open('../data/yelp/labels.txt', encoding='utf-8') as f:\n for line in f:\n if line == '\\n':\n continue\n for category in line.strip().split(', '):\n categories_set.add(category)\n\n with open('../data/yelp/categories.txt', 'w', encoding='utf-8') as f:\n for category in categories_set:\n f.write(category + '\\n')\n\n with open('sss.txt', 'w', encoding='utf-8') as f:\n f.write(str(categories_set))\n\n\ndef get_sorted_category_count():\n category_count = {}\n\n with open('../data/yelp/categories.txt', encoding='utf-8') as f:\n for line in f:\n category_count[line.strip()] = 0\n\n no_label_count = 0\n with open('../data/yelp/labels.txt', encoding='utf-8') as f:\n for line in f:\n if line == '\\n':\n no_label_count += 1\n continue\n for category in line.strip().split(', '):\n category_count[category] += 1\n\n print('no_label_count: ' + str(no_label_count) + '\\tremoved: 0')\n\n category_count = sorted(category_count.items(), key=lambda kv: kv[1], reverse=True)\n return category_count\n\n\ndef count_removed_docs(category_count):\n for keep_category_count in [10, 20]:\n category_count_set = set([cc[0] for cc in category_count[:10]])\n no_label_count = 0\n with open('../data/yelp/labels.txt', encoding='utf-8') as f:\n for i, line in enumerate(f):\n if line == '\\n':\n no_label_count += 1\n continue\n no_label = True\n for category in line.strip().split(', '):\n if category in category_count_set:\n no_label = False\n break\n\n if no_label:\n no_label_count += 1\n print('no_label_count: ' + str(no_label_count) + '\\tkeep category count: ' + str(keep_category_count))\n\n\ndef remove_docs(category_count, keep_category_count):\n category_count_set = set([cc[0] for cc in category_count[:keep_category_count]])\n no_label_count = 0\n with open('../data/yelp/new_reviews.txt', 'w', encoding='utf-8') as f_new_reviews:\n with open('../data/yelp/new_labels.txt', 'w', encoding='utf-8') as f_new_labels:\n with open('../data/yelp/reviews.txt', encoding='utf-8') as f_reviews:\n with open('../data/yelp/labels.txt', encoding='utf-8') as f_labels:\n for i, (review, label) in enumerate(zip(f_reviews, f_labels)):\n if label == '\\n':\n no_label_count += 1\n continue\n\n no_label = True\n new_label = []\n\n for category in label.strip().split(', '):\n if category in category_count_set:\n new_label.append(category)\n no_label = False\n\n if no_label:\n no_label_count += 1\n continue\n\n # only keep docs with greater than or equal to 4 categories\n if len(new_label) < 4:\n continue\n\n f_new_reviews.write(review)\n f_new_labels.write(', '.join(new_label) + '\\n')\n\n\ndef main():\n category_count = get_sorted_category_count()\n\n dict1 = {}\n for i, cc in enumerate(category_count):\n dict1[cc[0]] = {'id': i, 'count': cc[1]}\n\n with open('ffff.json', 'w', encoding='utf-8') as f:\n f.write(json.dumps(dict1))\n\n with open('../data/yelp/labels1.txt', 'w', encoding='utf-8') as f2:\n with open('../data/yelp/labels.txt', encoding='utf-8') as f:\n for i, line in enumerate(f):\n if line == '\\n':\n continue\n prefix = ''\n for category in line.strip().split(', '):\n f2.write(prefix + str(dict1[category]['id']))\n prefix = ', '\n f2.write('\\n')\n\n remove_docs(category_count, 10)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/preprocess/yelp.py","file_name":"yelp.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"286156374","text":"import cv2\nfrom model import NN\nimport numpy as np\nimport torch\n\ncap = cv2.VideoCapture(0)\ni = 0 \nclassify = 1 \nlabels =[] \nModel = NN(batch_size = 1)\nModel.load_state_dict(torch.load(\"1\"))\nModel.eval()\ntardict = {1 : 'Face Detected' , 0 : 'Undetected' }\n\nwhile True:\n i += 1\n ret , frame = cap.read()\n gray = cv2.cvtColor(frame , cv2.COLOR_RGB2GRAY)\n gray = cv2.GaussianBlur(gray, (15,15), 0)\n cv2.imshow('feed' , frame)\n gray = torch.from_numpy(gray).view(1 , 1, 480 , 640).float()\n output = torch.round(Model.forward(gray))\n output = output.item()\n print (tardict[output])\n if output != 0:\n input()\n if cv2.waitKey(1) & 0xFF == ord('q') :\n break \n \n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"553217926","text":"#!python\n'''\noptimization algorithms\n'''\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport sys\nimport numpy as np\n\n\nfrom itertools import chain,repeat\n\ndef polynomial(w,x):\n return np.polyval(w[::-1],x)\n \ndef cost(w,x,y,N):\n result=0;\n for n in range(0,N):\n val=polynomial(w,x[n])-y[n] \n val=val*val\n result=result+val\n return (0.5*result/N) \n\ndef gradient(w,x,y,N):\n grad=w*0.\n I=len(w)-1\n pw = range(I+1)\n for n in range(0,N):\n #print(\"n : \" , N)\n val=polynomial(w,x[n])-y[n]\n phi=np.power(x[n],pw)\n grad=grad+phi*val\n #print(\"final\")\n return grad/N \n \ndef step(w,x,y,g,N):\n alpha=0.2\n return alpha\n\n#Algoritmo - Procura de Armijo com backtracking\n\n\n# Nt - Numero total de registos usados para treino\n# #Nb - Numero de registos que vão ser usadas para a amostra - este valor está a ser calculado\n # o Nb vai ser calculado com Nb = ...ceil(T**iterador)\n\n\ndef backtracking(w,xt,yt,Gb,Nt):\n\n #Initialize\n #alpha = alpha_bar\n\n alpha=1\n\n #alpha Gradiente Transposto * gradiente\n #gradiente total = gT\n Gt = gradient(w,xt,yt,Nt)\n\n #Mudar para produto interno\n #Gt*Gb\n\n\n inner_product = np.inner(Gt, np.transpose(Gb))\n cost_k = cost(w,xt,yt,Nt)\n\n w_aux = w\n while cost(w_aux,xt,yt,Nt) > (cost_k - alpha * inner_product):\n alpha = alpha/2\n\n if alpha * np.linalg.norm(Gb) <= 1.e-8 :\n break\n\n w_aux = w - alpha * gradient(w,xt,yt,Nb)\n\n return alpha\n\n\n\n\ndef get_data(xt,yt,Nt,Nb):\n xb=[];yb=[];\n for n in range(0,Nb):\n pos=math.ceil(np.random.rand()*(Nt-1))\n xb.append(xt[pos])\n yb.append(yt[pos])\n return xb,yb\n\n# In this function we need to select the records sequentially and not in a random way.\ndef ncycles(iterable, n):\n return chain.from_iterable(repeat(tuple(iterable), n))\n\ndef get_data_cycle(xt, yt, position, Nb):\n size = len(xt)\n cicle_step = (size % (position+Nb)) + 1\n cicle_list_x = list(ncycles(xt, cicle_step))\n cicle_list_y = list(ncycles(yt, cicle_step))\n return cicle_list_x[position:(position+Nb)], cicle_list_y[position:(position+Nb)], (position+Nb)\n\n\n\ndef get_data_sequencial(xt,yt,Nt,Nb,posicao_actual,posicao_actual_w_Nb):\n xb=[];yb=[];\n ok = True\n\n while ok:\n for n in range(0,Nb):\n pos = n + posicao_actual\n #posicao_actual_w_Nb =posicao_actual_w_Nb-1\n Nb =Nb -1\n if pos == Nt-1:\n #print(\"-----------\")\n #print(\"reiniciou porque pos = \", pos)\n #print(\"Nb está em = \", Nb)\n #print(\"posicao_actual_w_Nb está em = \",posicao_actual_w_Nb)\n #print(\"-----------\")\n posicao_actual = 1\n n=0\n pos = n + posicao_actual\n break # break here\n\n #print(\"pos está em = \",pos)\n xb.append(xt[pos])\n yb.append(yt[pos])\n return xb,yb\n\n if(Nb==0): ok=False;\n\n\n#=========== MAIN CODE ===============\n# read the data file\nx=[];y=[]; N=0\nwith open('P5-large_yes.csv', 'r') as file:\n reader = csv.reader(file)\n for u in reader:\n x=np.append(x,float(u[0]))\n y=np.append(y,float(u[1]))\n N=N+1\nNt=math.floor(N*0.8)\nxt=x[0:Nt] \nyt=y[0:Nt] \nxv=x[Nt:N] \nyv=y[Nt:N] \nI=5;\n#Nb=math.floor(Nt*0.1)\nNb=1\n \n\n'''\nfig = plt.figure()\nplt.plot(x,y,'ro')\ns = np.linspace(0, 1,100, endpoint = True)\nr = 1/5-1/2*(s)+(s*s)-2/3*(s*s*s)+4*(s*s*s*s)-5*(s*s*s*s*s)\nplt.plot(s, r, '-g', label=r'$exato$')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Polynomial Curve')\nplt.show()\n'''\n\nw=np.array([1]*(I+1));\n#w=np.array([1/5,-1/2,1,-2/3,4,-5])\n#xb,yb=get_data(xt,yt,Nt,Nb)\n#print(cost(w,xb,yb,Nb))\n#print(gradient(w,xb,yb,Nb))\n\n\nok=True; iter=0\n\n#posicao actual que vai ser utlizada para verificar em que po\nposicao_actual = 0\n\nwhile ok:\n # O dataset tem de ser modificado e o que teremos de fazer é o seguinte\n # Calcular o T random entre 1 e 2\n import random\n Theta = random.uniform(1, 2)\n\n # O nosso Nb vai ser calculado de um modo dinamico (nisso consiste este metodo de mini-batch dinamico)\n # o Nb vai ser calculado com Nb = ...ceil(T**iterador)\n #Return the ceiling of x as a float, the smallest integer value greater than or equal to x.\n Nb = math.ceil(Theta**iter)\n\n # outro dado imporntante é que o Nb terá de ser sempre o minimo entre o calcuo\n # Nb = min(Nb e o conjunto total dos dados)\n # Nt - Numero total de registos usados para treino\n Nb=min(Nb,Nt)\n\n\n posicao_actual_w_Nb= Nb + posicao_actual\n\n\n\n #print(posicao_actual_w_Nb)\n #xb,yb = get_data_sequencial(xt,yt,Nt,Nb,posicao_actual,posicao_actual_w_Nb)\n\n xb, yb, posicao_actual = get_data_cycle(xt,yt, posicao_actual,Nb)\n\n #xb,yb=get_data(xt,yt,Nt,Nb)\n\n # calculo da direcao estocastica do subconjunto\n Gb=gradient(w,xb,yb,Nb)\n\n\n #alpha=step(w,xb,yb,g,Nb)\n\n\n #Nt - Numero total de registos usados para treino\n alpha=backtracking(w,xt,yt,Gb,Nt)\n\n w=w-alpha*Gb\n\n gT=gradient(w,xt,yt,Nt)\n Norm_gT=np.linalg.norm(gT)\n\n #print(w)\n #print(g)\n # paragem quando o iterador chegar ao valor 20000\n if(iter>5000): ok=False;\n # Imprime-se o custo\n print(iter,cost(w,xb,yb,Nb))\n print(\"Norma Gt : \",Norm_gT)\n\n # Funcao de paragem, é para ser utilizada\n #1 * 10 ^ -8, or .00000001\n\n if(Norm_gT<1.e-8): ok=False\n\n iter=iter+1\n\nprint(\"======================================\")\nprint(\"I={:d}\".format(I))\nprint(\"coefficient\",w)\nprint(\"Nt - numero total de registos usados para treino : \",Nt)\nprint(\"in error sample {:e}\".format(cost(w,xt,yt,Nt)))\nprint(\"out error sample {:e}\".format(cost(w,xv,yv,N-Nt)))\n\nfig = plt.figure()\nplt.plot(x,y,'ro')\ns = np.linspace(0, 1,100, endpoint = True)\nr = w[0]+w[1]*(s)+w[2]*(s*s)+w[3]*(s*s*s)+w[4]*(s*s*s*s)+w[5]*(s*s*s*s*s)\nplt.plot(s, r, '-b', linewidth=3,label=r'$exato$')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('Polynomial Curve')\nplt.show()\nprint('bye')\n","sub_path":"material_laboratorio4/T4-amostragem_dinamica/optim_amostragem_dinamica.py","file_name":"optim_amostragem_dinamica.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"209875360","text":"import torch\nfrom torch import nn\n\n\nclass ONet(nn.Module):\n def __init__(self, pretrained=False):\n super(ONet, self).__init__()\n # input: 48x48\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3) # 46x46\n self.bn1 = nn.BatchNorm2d(32)\n self.prelu1 = nn.PReLU(32)\n self.maxpool1 = nn.MaxPool2d(3, 2, ceil_mode=True) # 23x23\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3) # 21x21\n self.bn2 = nn.BatchNorm2d(64)\n self.prelu2 = nn.PReLU(64)\n self.maxpool2 = nn.MaxPool2d(3, 2, ceil_mode=True) # 10x10\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3) # 8x8\n self.bn3 = nn.BatchNorm2d(64)\n self.prelu3 = nn.PReLU(64)\n self.maxpool3 = nn.MaxPool2d(2, 2, ceil_mode=True) # 4x4\n self.conv4 = nn.Conv2d(64, 128, kernel_size=2) # 3x3\n self.bn4 = nn.BatchNorm2d(128)\n self.prelu4 = nn.PReLU(128)\n self.dense5 = nn.Linear(3*3*128, 256)\n self.prelu5 = nn.PReLU(256)\n self.dense6_1 = nn.Linear(256, 1)\n self.sigmoid6_1 = nn.Sigmoid()\n self.dense6_2 = nn.Linear(256, 4)\n self.dense6_3 = nn.Linear(256, 10)\n\n # self.training = False\n #\n if pretrained:\n state_dict_path = 'weights/onet.pth'\n state_dict = torch.load(state_dict_path)\n self.load_state_dict(state_dict)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.prelu1(x)\n x = self.maxpool1(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.prelu2(x)\n x = self.maxpool2(x)\n x = self.conv3(x)\n x = self.bn3(x)\n x = self.prelu3(x)\n x = self.maxpool3(x)\n x = self.conv4(x)\n x = self.bn4(x)\n x = self.prelu4(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense5(x.view(x.shape[0], -1))\n x = self.prelu5(x)\n c = self.dense6_1(x)\n c = self.sigmoid6_1(c)\n b = self.dense6_2(x)\n l = self.dense6_3(x)\n return c, b, l\n\n\nif __name__ == '__main__':\n a = torch.rand(128, 3, 48, 48)\n net = ONet()\n conf, reg, mark = net(a)\n print(conf.shape)\n print(reg.shape)\n print(mark.shape)\n\n","sub_path":"modeling/layers/ONet.py","file_name":"ONet.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"377581377","text":"#Chef has opened his new restaurant and made the first meal free for everyone!\n#\n#You want to try the new restaurant, but since it is offering free meals, many people are coming and a huge queue has formed. Currently (at time T=0), there are M people waiting in the queue. You also know that there are N more people coming; let's denote the time when the i-th person stands at the back of the queue by Ai. You noticed that each exactly L seconds, one place in the restaurant will become vacant and the person currently at the front of the queue takes it, i.e. at time T=L, one person enters, then at time T=2L, another person enters and so on.\n#\n#You do not like to wait in queues, so you want to choose the time when you stand at the back of the queue in such a way that the time between this moment and the moment when you enter the restaurant is minimum possible. Assume that if you decide to stand at the back of the queue at the same moment as some other person, you will stand before them in the queue (closer to the restaurant). Also, you have to stand at the back of the queue no later than in the K-th second, otherwise you will arrive at home late.\n#\n#What is the minimum time you have to spend standing in the queue?\n\nfor y in range(int(input())):\n n,m,k,l=map(int, input().split())\n arr=list(map(int, input().split()))\n arr=sorted(arr)\n mini=(m+1)*l-(1%l)\n for i in range(n):\n s=m-(arr[i]//l)+i\n s=(s+1)*l-(arr[i]%l)\n if s\n\n\n\"\"\"Tests for assessment service handle.\"\"\"\nimport random\n\nimport ddt\n\nfrom ggrc.models import all_models\nfrom integration.ggrc import TestCase\nfrom integration.ggrc.query_helper import WithQueryApi\nfrom integration.ggrc.models import factories\nfrom integration.ggrc.api_helper import Api\nfrom integration.ggrc.generator import ObjectGenerator\n\n\n@ddt.ddt\nclass TestCollection(TestCase, WithQueryApi):\n\n \"\"\"Test for collection assessment objects.\"\"\"\n\n def setUp(self):\n super(TestCollection, self).setUp()\n self.client.get(\"/login\")\n self.clear_data()\n self.api = Api()\n self.generator = ObjectGenerator()\n\n @ddt.data(True, False)\n def test_order_by_test(self, desc):\n \"\"\"Order by fultext attr\"\"\"\n expected_ids = []\n with factories.single_commit():\n assessments = [factories.AssessmentFactory() for _ in range(10)]\n random.shuffle(assessments)\n with factories.single_commit():\n for idx, assessment in enumerate(assessments):\n comment = factories.CommentFactory(description=str(idx))\n factories.RelationshipFactory(source=assessment, destination=comment)\n expected_ids.append(assessment.id)\n query = self._make_query_dict(\n \"Assessment\", order_by=[{\"name\": \"comment\", \"desc\": desc}]\n )\n if desc:\n expected_ids = expected_ids[::-1]\n results = self._get_first_result_set(query, \"Assessment\", \"values\")\n self.assertEqual(expected_ids, [i['id'] for i in results])\n\n @ddt.data(\"Assessor\", \"Creator\", \"Verifier\")\n def test_delete_assessment_by_role(self, role_name):\n \"\"\"Delete assessment not allowed for based on Assignee Type.\"\"\"\n with factories.single_commit():\n assessment = factories.AssessmentFactory()\n context = factories.ContextFactory(related_object=assessment)\n assessment.context = context\n person = factories.PersonFactory()\n object_person_rel = factories.RelationshipFactory(\n source=assessment, destination=person)\n factories.RelationshipAttrFactory(\n relationship_id=object_person_rel.id,\n attr_name=\"AssigneeType\",\n attr_value=role_name,\n )\n assessment_id = assessment.id\n role = all_models.Role.query.filter(\n all_models.Role.name == \"Creator\"\n ).first()\n self.generator.generate_user_role(person, role, context)\n self.api.set_user(person)\n assessment = all_models.Assessment.query.get(assessment_id)\n resp = self.api.delete(assessment)\n self.assert403(resp)\n self.assertTrue(all_models.Assessment.query.filter(\n all_models.Assessment.id == assessment_id).one())\n\n @ddt.data(\n (all_models.Assessment.REWORK_NEEDED, True),\n (all_models.Assessment.DONE_STATE, True),\n (all_models.Assessment.FINAL_STATE, True),\n (all_models.Assessment.START_STATE, False),\n )\n @ddt.unpack\n def test_update_status_need_rework(self, status, is_valid):\n \"\"\"Update assessment state from need rework to valid or invalid states.\"\"\"\n with factories.single_commit():\n assessment = factories.AssessmentFactory(\n status=all_models.Assessment.REWORK_NEEDED\n )\n assessment_id = assessment.id\n resp = self.api.put(assessment, {\"status\": status})\n if is_valid:\n self.assert200(resp)\n check_status = status\n else:\n self.assert400(resp)\n check_status = all_models.Assessment.REWORK_NEEDED\n self.assertEqual(\n check_status, all_models.Assessment.query.get(assessment_id).status)\n","sub_path":"test/integration/ggrc/services/test_assessments.py","file_name":"test_assessments.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"650715434","text":"def arithmetic_arranger(problems, show_results=False):\n\n problems_list = []\n for p in problems:\n problems_list += p.split(\" \")\n\n problems_digits = [pr.replace(\"+\", '1') for pr in problems_list]\n problems_digits = [pr.replace(\"-\", '-1') for pr in problems_digits]\n\n test_total = 0\n\n # Test too many problems - ttmp -\n if len(problems) > 5:\n arranged_problems = \"Error: Too many problems.\"\n test_total += 1\n\n # Test Incorrect Operator - tio -\n count_pl = 1\n tio = 0\n\n while count_pl < len(problems_list):\n\n count_ele = problems_list[count_pl]\n\n if count_ele == \"+\":\n count_ele = count_ele.replace(\"+\", \"0\")\n if count_ele == \"-\":\n count_ele = count_ele.replace(\"-\", \"0\")\n if count_ele.isdigit():\n tio += 1\n\n count_pl += 3\n\n if tio < len(problems):\n arranged_problems = \"Error: Operator must be '+' or '-'.\"\n test_total += 1\n\n # Test Only Digits - tod -\n count_pl = 0\n tod = 0\n\n while count_pl < len(problems_list):\n count_ele = problems_list[count_pl]\n count_ele2 = problems_list[count_pl + 2]\n if count_ele.isnumeric() and count_ele2.isnumeric():\n tod += 1\n count_pl += 3\n\n if tod < len(problems):\n arranged_problems = \"Error: Numbers must only contain digits.\"\n test_total += 1\n\n # Test Too Many Digits - ttmd -\n ttmd = 0\n for pd in problems_digits:\n if len(pd) > 4:\n ttmd += 1\n if ttmd > 0:\n arranged_problems = \"Error: Numbers cannot be more than four digits.\"\n test_total += 1\n\n if test_total == 0:\n tt = 0\n first_row = \"\"\n second_row = \"\"\n underline_row = \"\"\n results = \"\"\n\n while tt < len(problems_digits):\n\n if len(problems_digits[tt]) > len(problems_digits[tt + 2]):\n longest_digit = len(problems_digits[tt])\n else:\n longest_digit = len(problems_digits[tt + 2])\n\n # First numbers row\n first_row_spaces = longest_digit - len(problems_digits[tt])\n first_row_formatted = (' ' * first_row_spaces) + problems_digits[tt]\n first_row += \" \" + first_row_formatted + \" \"\n\n # Second numbers row\n second_row += problems_list[tt + 1]\n second_row_spaces = longest_digit - len(problems_digits[tt + 2])\n second_row_formatted = (' ' * second_row_spaces) + problems_digits[tt + 2]\n second_row += \" \" + second_row_formatted + \" \"\n\n # Underline row\n underline_row_spaces = longest_digit\n underline_formatted = ('-' * underline_row_spaces)\n underline_row += \"--\" + underline_formatted + \" \"\n\n # Results row\n add = int(problems_digits[tt]) + int(problems_digits[tt + 1]) * int(problems_digits[tt + 2])\n result_spaces = (2 + longest_digit) - len(str(add))\n result_formatted = (' ' * result_spaces) + str(add)\n results += result_formatted + \" \"\n\n tt += 3\n\n test_arrangement = first_row[:-4] + \"\\n\" + second_row[:-4] + \"\\n\" + underline_row[:-4]\n test_solutions = test_arrangement + \"\\n\" + results[:-4]\n\n if show_results:\n arranged_problems = test_solutions\n else:\n arranged_problems = test_arrangement\n\n return arranged_problems\n","sub_path":"ArithmeticFormatter/arithmetic_arranger.py","file_name":"arithmetic_arranger.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"131030125","text":"import pandas as pd\nimport numpy as np\nimport os\nimport pdb\nimport re\n\nnew_lakes = pd.read_feather(\"../../../metadata/lake_metadata.feather\")\nnew_lakes = new_lakes['site_id']\nold_lakes = pd.read_csv(\"../../../metadata/lake_metadata.csv\")['nhd_id']\n\n\nold_ct = np.empty_like(old_lakes.values)\nnew_ct = np.empty((new_lakes.values.shape[0] - 5))\nold_tot = np.empty_like(old_lakes.values)\nnew_tot = np.empty((new_lakes.values.shape[0] - 5))\nold_ct[:] = np.nan\nold_tot[:] = np.nan\nnew_ct[:] = np.nan\nnew_tot[:] = np.nan \nfor i, lake in enumerate(old_lakes):\n\t# if lake == '1097324':\n\t\t# pdb.set_trace()\n\tprint(lake)\n\tobs = pd.read_feather(\"../../../data/raw/figure3/nhd_\"+str(lake)+\"_test_train.feather\")\n\tn_s = np.sum(obs['depth'] < 0.25)\n\tn_o = obs.shape[0] - n_s\n\told_ct[i] = n_s\n\told_tot[i] = n_o\n\t#total surface obs\n\nct = -1\nfor i, lake in enumerate(new_lakes):\n\tct += 1\n\tnid = lake\n\tmatch = re.search(\"nhdhr_(.+)\", str(lake))\n\tif nid == 'nhdhr_120018008' or nid == 'nhdhr_120020307' or nid == 'nhdhr_120020636' or nid == 'nhdhr_32671150' or nid =='nhdhr_58125241':\n\t\tct -= 1\n\t\tcontinue\n\n\tlake = match.group(1)\n\n\tobs = pd.read_feather(\"../../../data/raw/figure3/nhd_\"+str(lake)+\"_test_train.feather\")\n\tn_s = np.sum(obs['depth'] < 0.25)\n\tn_o = obs.shape[0] - n_s\n\tnew_ct[ct] = n_s\n\tnew_tot[ct] = n_o\n\n\n\nprint(\"old=\", np.sum(old_ct)/np.sum(old_tot), \", new=\", np.sum(new_ct)/np.sum(new_tot))\n\t#total surface obs\n\n\n","sub_path":"src/scripts/one-off/count_surface_obs.py","file_name":"count_surface_obs.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"289539484","text":"\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom .views import createV,deleteV,updateV,sentpostV,inboxpostV,pendingpostV,refusedpostV,confirmedpostV,adminconfirmedpostV,adminrefusedpostV,adminpendingpostV\n\napp_name='Post'\n\nurlpatterns = [\n\n path('create/',createV,name='create'),\n path('delete/',deleteV,name='delete'),\n path('update/',updateV,name='update'),\n path('sent/',sentpostV.as_view(),name='sent'),\n path('inbox/',inboxpostV.as_view(),name='inbox'),\n\n path('pending/',pendingpostV.as_view(),name='pending'),\n path('pending_all/',adminpendingpostV.as_view(),name='pending_all'),\n\n path('refused/',refusedpostV.as_view(),name='refused'),\n path('refused_all/',adminrefusedpostV.as_view(),name='refused_all'),\n\n path('confirmed/',confirmedpostV.as_view(),name='confirmed'),\n path('confirmed_all/',adminconfirmedpostV.as_view(),name='confirmed_all'),\n \n]\n","sub_path":"Post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"70152853","text":"# -*- coding: utf-8 -*-\n\"\"\"\nRun probabilistic line search on a CIFAR-10 example.\n\"\"\"\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\nimport tensorflow as tf\n\nfrom probls.tensorflow_interface.interface_sgd import ProbLSOptimizerSGDInterface\nfrom probls.line_search import ProbLSOptimizer\n\nimport cifar10\n\n#### Specify training specifics here ##########################################\nfrom models import cifar10_2conv_3dense as model\nnum_steps = 4000\nbatch_size = 256\n###############################################################################\n\n\n# Set up model\ntf.reset_default_graph()\nimages, labels = cifar10.distorted_inputs(batch_size=batch_size)\nlosses, variables = model.set_up_model(images, labels)\n\n# Set up ProbLS optimizer\nopt_interface = ProbLSOptimizerSGDInterface()\nopt_interface.minimize(losses, variables)\nsess = tf.Session()\nopt_interface.register_session(sess)\nopt_ls = ProbLSOptimizer(opt_interface, alpha0=1e-3, cW=0.3, c1=0.05,\n target_df=0.5, df_lo=-0.1, df_hi=1.1, expl_policy=\"linear\", fpush=1.0,\n max_change_factor=10., max_steps=10, max_expl=10, max_dmu0=0.0)\n\n# Initialize variables and start queues\ncoord = tf.train.Coordinator()\nsess.run(tf.global_variables_initializer())\nthreads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n# Run ProbLS\nopt_ls.prepare()\nfor i in range(num_steps):\n print(opt_ls.proceed())\n\n# Stop queues\ncoord.request_stop()\ncoord.join(threads)","sub_path":"examples/run_probls_cifar10.py","file_name":"run_probls_cifar10.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"631539105","text":"import json\nimport sys\nprint(\"Creator.\")\nff = {}\nwhile 1:\n try:\n s = input(\"Save as: \")\n u = input(\"URL: \")\n ff[\"/home/gdrive/\" + str(s)] = str(u)\n except KeyboardInterrupt:\n print(\"Saving..\")\n fp = open(\"config.json\", 'w')\n json.dump(ff, fp)\n break\n\nsys.exit(0)\n","sub_path":"creator.py","file_name":"creator.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"316287903","text":"# -*- coding: utf-8 -*-\n\"\"\"\\\n==============\npipetestserver\n==============\n\nThis recipe describes how you can create / activate and kill a temporary HTTP\nserver with a WSGI app to provide unittest resources to a client software,\nthat's the target of your application.\n\nFor our demo, we create a stupid wsgi app that returns the double of the value\nprovided in a simple JSON structure.\n\n{\"value\": 5} -> {\"value\": 10}\n{\"value\": \"ta\"} -> {\"value\": \"tata\"}\n{\"value\": {}} -> {\"error\": \"TypeError\", \"traceback\": \"...\"}\n\nRun this module with either::\n\n $ python testserver.py\n $ python -m unittest discover -v\n\nNote that this will work only on an Unix box (use of select.select on a pipe).\nThis code works on Python 2.6 or 2.7 and needs some changes for Python 3.x\n\"\"\"\n\nimport httplib\nimport json\nimport os\nimport select\nimport StringIO\nimport threading\nimport traceback\nimport unittest\nimport urllib2\nimport wsgiref.simple_server\n\n# APPLICATION\n# ===========\n# This part is a portion of your application sw that includes an HTTP client\n\nENDPOINT = \"http://somehost.mydomain.com\"\n\n\ndef proxiedClient(value, endpoint=ENDPOINT):\n proxy = urllib2.ProxyHandler({'http': 'localhost:8500'})\n opener = urllib2.build_opener(proxy)\n urllib2.install_opener(opener)\n payload = json.dumps({'value': value})\n headers = {'Content-Type': 'application/json'}\n request = urllib2.Request(endpoint, payload, headers)\n payload = urllib2.urlopen(request).read()\n result = json.loads(payload)\n if 'value' in result:\n return result['value']\n else:\n return result # dict with 'error' and 'traceback' keys\n\n\ndef client(value, endpoint=ENDPOINT):\n payload = json.dumps({'value': value})\n headers = {'Content-Type': 'application/json'}\n request = urllib2.Request(endpoint, payload, headers)\n payload = urllib2.urlopen(request).read()\n result = json.loads(payload)\n if 'value' in result:\n return result['value']\n else:\n return result # dict with 'error' and 'traceback' keys\n\n\n# RESSOURCES\n# ==========\n# This part sits tipycally in a tests/resources.py module\n# Make an \"application\" that suits your client and mocks a real web service\n\ndef application(environ, start_response):\n \"\"\"The WSGI application that mocks a real server\n \"\"\"\n def make_status(value):\n \"\"\"HTTP status (int) -> WSGI response suitable status\n \"\"\"\n return \"{0} {1}\".format(value, httplib.responses[value])\n\n headers = [('Content-Type', 'application/json')]\n try:\n if environ.get('REQUEST_METHOD') != 'POST':\n start_response(make_status(httplib.METHOD_NOT_ALLOWED, headers))\n return [\"Seul le mode POST est admis\"]\n try:\n request_body_size = int(environ.get('CONTENT_LENGTH', 0))\n except (ValueError):\n request_body_size = 0\n request_body = environ['wsgi.input'].read(request_body_size)\n request_dict = json.loads(request_body)\n value = request_dict[u'value']\n result = 2 * value\n response_body = json.dumps({'value': result})\n status = httplib.OK\n\n except Exception as exc:\n tb_stream = StringIO.StringIO()\n traceback.print_exc(file=tb_stream)\n response = {\n 'exception': exc.__class__.__name__,\n 'traceback': tb_stream.getvalue()\n }\n response_body = json.dumps(response)\n status = httplib.OK\n\n headers.append(('Content-Length', str(len(response_body))))\n start_response(make_status(status), headers)\n return [response_body]\n\n# But you can copy this class as-is in your tests/resources.py module.\n\n\nclass ThreadedServerControl(object):\n\n \"\"\"Will provide a temporary test server in another thread for your\n application.\n\n :param app: A wsgi application\n :param host: Listening hostname or IP\n :param port: Listening port preferably >= 1024 unless you're root\n \"\"\"\n __stop_marker = 'stop'\n\n def __init__(self, app, host='localhost', port=8000):\n self.app = app\n self.host = host\n self.port = port\n\n # Communication pipe with the thread\n self.stop_read, self.stop_write = os.pipe()\n self.started = False\n return\n\n def __run(self):\n httpd = wsgiref.simple_server.make_server(self.host, self.port,\n self.app)\n\n # We don't want logs in the console\n log_request = httpd.RequestHandlerClass.log_request\n no_logging = lambda *args, **kwargs: None\n httpd.RequestHandlerClass.log_request = no_logging\n\n # Notify / unlock self.start()\n self.ready.set()\n while True:\n ready, dummy, dummy = select.select(\n [httpd, self.stop_read], [self.stop_write], []\n )\n # HTTP client request detected ?\n if httpd in ready:\n httpd.handle_request()\n\n # self.stop() synch called ?\n if self.stop_read in ready:\n os.read(self.stop_read, len(self.__stop_marker))\n # Re-enable console logging and exit\n httpd.RequestHandlerClass.log_request = log_request\n break\n\n def start(self):\n \"\"\"Launches the server in a thread\n \"\"\"\n # Bounce protection\n if self.started:\n return\n\n # Threaded server and synch setup\n self.ready = threading.Event()\n self.server_thread = threading.Thread(target=self.__run)\n self.server_thread.start()\n\n # Wait server readyness (if a client runs before -> raise URLError)\n self.ready.wait()\n self.started = True\n return\n\n def stop(self):\n \"\"\"Stops and kills the server and thread\n \"\"\"\n # Bounce protection\n if not self.started:\n return\n\n # Notify thread's suicide\n os.write(self.stop_write, self.__stop_marker)\n\n # Cleanup after thread's suicide\n self.server_thread.join()\n os.close(self.stop_write)\n os.close(self.stop_read)\n self.started = False\n import time\n time.sleep(0.01)\n return\n\n\n# TESTS\n# =====\n# The usual tests suite in a tests/test_somemodule.py module. Look how we\n# start and stop the server respectively in setUpClass and tearDownClass\n\n\nclass ClientTest(object):\n\n \"\"\"Common mixin test case\n \"\"\"\n endpoint = 'http://localhost:8000/' # Our tests server\n\n def test_int(self):\n \"\"\"Integer * 2 -> OK\n \"\"\"\n result = client(2, endpoint=self.endpoint)\n self.assertEqual(result, 4)\n return\n\n def test_str(self):\n \"\"\"String * 2 -> OK\n \"\"\"\n result = client(\"co\", endpoint=self.endpoint)\n self.assertEqual(result, \"coco\")\n return\n\n def test_err(self):\n \"\"\"Dict * 2 -> TypeError (server)\n \"\"\"\n result = client({}, endpoint=self.endpoint)\n self.assertTrue('exception' in result)\n self.assertEqual(result['exception'], 'TypeError')\n self.assertTrue('traceback' in result)\n return\n\n\nclass SetUpClassTest(unittest.TestCase, ClientTest):\n\n \"\"\"Server settings through setUpClass / tearDownClass\n \"\"\"\n @classmethod\n def setUpClass(cls):\n # Create and starts the server\n cls.server = ThreadedServerControl(application)\n cls.server.start()\n return\n\n @classmethod\n def tearDownClass(cls):\n # Stop and delete the server\n cls.server.stop()\n return\n\n\nclass SetUpTest(unittest.TestCase, ClientTest):\n\n \"\"\"Server settings through setUp / tearDown\n \"\"\"\n\n def setUp(self):\n # Create and starts the server\n self.server = ThreadedServerControl(application)\n self.server.start()\n return\n\n def tearDown(self):\n # Stop and delete the server\n self.server.stop()\n return\n\n\nclass SetUpTest(unittest.TestCase, ClientTest):\n\n \"\"\"Server settings through setUp / tearDown\n \"\"\"\n\n def setUp(self):\n # Create and starts the server\n self.server = ThreadedServerControl(application)\n self.server.start()\n return\n\n def tearDown(self):\n # Stop and delete the server\n self.server.stop()\n return\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n tests = [unittest.makeSuite(SetUpTest)]\n if hasattr(unittest, 'skipIf'):\n # New style unittest oy unittest2\n tests += [unittest.makeSuite(SetUpClassTest)]\n suite.addTests(tests)\n return suite\n\n\nif __name__ == '__main__':\n unittest.TextTestRunner().run(test_suite())\n","sub_path":"hoverpy/tests/pipetestserver.py","file_name":"pipetestserver.py","file_ext":"py","file_size_in_byte":8612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"511883909","text":"import pandas as pd\nimport numpy as np\nimport os\nimport pathlib\n\n\ndef clean_zips(df):\n \"\"\"\n A helper function to clean zip code columns\n for Los Angeles Airbnb Data\n \"\"\"\n\n def first_ele(df):\n for i in df:\n return i\n\n def first_five(df):\n return df[2:7]\n\n # Splits data for entries containing period\n df['zipcode'] = df['zipcode'].str.split('.')\n\n # Transform all values into strings\n df['zipcode'] = df['zipcode'].apply(str)\n\n # Returns only the first five characters\n df['zipcode'] = df['zipcode'].map(first_five)\n\n df = df.loc[df['zipcode'] != 'n']\n\n return df\n\n\ndef clean_price(df):\n df['price'] = df['price'].str.strip('$')\n df['cleaning_fee'] = df['cleaning_fee'].str.strip('$')\n\n df['price'] = df['price'].str.replace(',', '')\n df['cleaning_fee'] = df['cleaning_fee'].str.replace(',', '')\n\n df['cleaning_fee'] = df['cleaning_fee'].replace(np.nan, 0)\n\n df['price'] = df['price'].astype(float)\n df['cleaning_fee'] = df['cleaning_fee'].astype(float)\n\n df['total_price'] = df['price'] + df['cleaning_fee']\n df = df.drop(['price', 'cleaning_fee'], axis=1)\n\n df = df[df['total_price'] > 1]\n\n # Log transform total price\n df['price_log'] = df['total_price'].apply(lambda x: np.log(x))\n\n return df\n\n\ndef clean_data(df):\n # Drop columns containing null values\n df = df.dropna()\n\n return df\n\n\ndef data_cleaning():\n print(\"Info: Step 2 - Data Cleaning start ...\")\n\n # Remove old data if present\n file = pathlib.Path(\"data-clean.csv\")\n if file.exists():\n print(\"Info: Removing data-clean.csv\")\n os.remove(\"data-clean.csv\")\n\n df = pd.read_csv('data.csv')\n df = clean_data(clean_price(clean_zips(df)))\n df.to_csv('data-clean.csv', index=False)\n\n print(\"Info: Data Cleaning completed ...\")\n\nif __name__ == '__main__':\n data_cleaning()\n","sub_path":"eb-flask/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"162883190","text":"import numpy as np\nfrom pydub import AudioSegment\nfrom scipy.io import wavfile\nimport random\nimport sys\nimport io\nimport os\nimport glob\nimport IPython\nfrom song_preprocessing import *\n\nfrom keras.layers import Bidirectional, Concatenate, Permute, Dot, Input, LSTM, Multiply\nfrom keras.layers import RepeatVector, Dense, Activation, Lambda\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\nfrom keras.models import load_model, Model\nimport keras.backend as K\nimport tensorflow as tf\n\n# IPython.display.Audio(\"./songs/chuyentinhtoi.wav\")\n# x = graph_spectrogram(\"./songs/chuyentinhtoi.wav\") # (101,156017)\n# x_y = graph_spectrogram(\"./songs/yemsao.wav\") # (101, 116171)\n# # number of column depends on the duration of the song\n# _, data = wavfile.read(\"./songs/chuyentinhtoi.wav\") # (12481536, 2)\n\ndef one_step_attention(a, s_prev):\n # shape of a = (None,S,256)\n s_prev = repeator(s_prev) # (None,S,256)\n concat = concatenator([a, s_prev]) # (None,S,512)\n e = densor1(concat) # (None,S,10)\n energies = densor2(e) # (None,S,1)\n alphas = activator(energies) # (None,S,1)\n context = dotor([alphas, a]) # (None,1,1)\n return context\n\n\ndef model(Tx, Ty, n_a, n_s, n_x, n_y):\n X = Input(shape=(Tx, n_x))\n s0 = Input(shape=(n_s,), name = 's0')\n c0 = Input(shape=(n_s,), name = 'c0')\n s = s0\n c = c0\n\n outputs = []\n\n a = Bidirectional(LSTM(n_a, return_sequences = True))(X) #(None, Tx, n_a*2)\n\n for t in range(Ty):\n\n context = one_step_attention(a,s)\n s, _, c = post_activation_LSTM(context, initial_state = [s,c])\n\n out = output_layer(s)\n outputs.append(out)\n\n model = Model(inputs=[X,s0,c0], outputs = outputs)\n return model\n\nif __name__ == \"__main__\":\n\n S = 1000\n Tx = get_Tx(\"./songs/\")\n Ty = 1\n\n _, n = get_songs(\"./songs/\")\n x,y = preprocessing_data(\"./songs/\", Tx, Ty)\n\n n_a = 128\n n_s = 256\n n_x = 101\n n_y = n\n\n repeator = RepeatVector(Tx)\n concatenator = Concatenate(axis=-1)\n sliding = tf.contrib.data.sliding_window_batch(S)\n densor1 = Dense(10, activation = \"tanh\")\n densor2 = Dense(1, activation = \"relu\")\n activator = Activation(K.softmax, name=\"attention_weights\")\n dotor = Dot(axes= 1)\n\n post_activation_LSTM = LSTM(n_s, return_state = True)\n output_layer = Dense(n, activation=K.softmax)\n print(x.shape)\n print(y.shape)\n outputs = list(y.swapaxes(0,1))\n\n model = model(Tx, Ty, n_a, n_s, n_x, n_y)\n model.compile(optimizer = Adam(lr=0.005, beta_1 = 0.9, beta_2 = 0.999, decay =0.1), metrics = ['accuracy'], loss = 'categorical_crossentropy')\n s0 = np.zeros((n, n_s))\n c0 = np.zeros((n, n_s))\n model.fit([x, s0, c0], outputs, epochs = 100, batch_size = 1)\n","sub_path":"Supervised Learning/Song_Detector/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"145060287","text":"from pandas import DataFrame\nfrom ..utils import get_offset, verify_series\n\n\ndef ABSSIO(close, length=None, offset=None, **kwargs):\n \"\"\"Absolute Strength Index Oscillator\"\"\"\n\n # Validate Arguments\n close = verify_series(close)\n length = length if length and length > 0 else 14\n min_periods = int(\n kwargs['min_periods']) if 'min_periods' in kwargs and kwargs['min_periods'] is not None else length\n offset = get_offset(offset)\n \n # Calculate Result\n \n\n return ('Absolute Strength Index Oscilator')\n","sub_path":"pandas_ta_indicators/trend/ABSSIO.py","file_name":"ABSSIO.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"515723375","text":"import abc\n\nfrom pokeretriever.Ability import Ability\nfrom pokeretriever.Move import Move\nfrom pokeretriever.Pokemon import Pokemon\nfrom pokeretriever.Stats import Stats\n\n\nclass BaseFactory(abc.ABC):\n \"\"\"\n Abstract factory class for Pokedex Objects.\n \"\"\"\n @abc.abstractmethod\n def create_item(self, json_dict):\n \"\"\"\n Create a Pokedex Object.\n :param json_dict: a dictionary with necessary values to initialize a Pokedex Object\n :return: a PokedexObject\n \"\"\"\n pass\n\n\nclass AbilityFactory(BaseFactory):\n \"\"\"\n Factory class for Ability objects.\n \"\"\"\n def create_item(self, ability_dict):\n \"\"\"\n Return an Ability object.\n :param ability_dict: dictionary with key-value pairs necessary to initialize an Ability object\n :return: Ability\n \"\"\"\n generation = ability_dict[\"generation\"][\"name\"]\n if ability_dict[\"effect_entries\"]:\n effect = ability_dict[\"effect_entries\"][1][\"effect\"]\n short_effect = ability_dict[\"effect_entries\"][1][\"short_effect\"]\n else:\n effect = \"N/A\"\n short_effect = \"N/A\"\n pokemon = []\n for item in ability_dict[\"pokemon\"]:\n pokemon.append(item[\"pokemon\"][\"name\"])\n name = ability_dict[\"name\"]\n ability_id = ability_dict[\"id\"]\n return Ability(generation, effect, short_effect, pokemon, name, ability_id)\n\n\nclass MoveFactory(BaseFactory):\n \"\"\"\n Factory class for Move objects.\n \"\"\"\n def create_item(self, move_dict):\n \"\"\"\n Return a Move object.\n :param move_dict: dictionary with key-value pairs necessary to initialize a Move object\n :return: Move\n \"\"\"\n generation = move_dict[\"generation\"][\"name\"]\n accuracy = move_dict[\"accuracy\"]\n pp = move_dict[\"pp\"]\n power = move_dict[\"power\"]\n move_type = move_dict[\"type\"][\"name\"]\n damage_class = move_dict[\"damage_class\"][\"name\"]\n if move_dict[\"effect_entries\"]:\n short_effect = move_dict[\"effect_entries\"][0][\"short_effect\"]\n else:\n short_effect = \"N/A\"\n name = move_dict[\"name\"]\n move_id = move_dict[\"id\"]\n return Move(generation, accuracy, pp, power, move_type, damage_class, short_effect, name, move_id)\n\n\nclass PokemonFactory(BaseFactory):\n \"\"\"\n Factory class for Pokemon objects.\n \"\"\"\n def create_item(self, pokemon_info):\n \"\"\"\n Return a Pokemon object.\n :param pokemon_info: dictionary with key-value pairs necessary to initialize a Move object\n :return: Pokemon\n \"\"\"\n pokemon_info[\"types\"] = [pokemon_type[\"type\"][\"name\"] for pokemon_type in pokemon_info[\"types\"]]\n\n if type(pokemon_info[\"stats\"][0]) is not Stats:\n pokemon_info[\"stats\"] = [(stat[\"stat\"][\"name\"], stat[\"base_stat\"]) for stat in pokemon_info[\"stats\"]]\n pokemon_info[\"abilities\"] = [ability[\"ability\"][\"name\"] for ability in pokemon_info[\"abilities\"]]\n pokemon_info[\"moves\"] = [(\"Move name: \" + move[\"move\"][\"name\"], \"Level acquired: \" + str(move[\"version_group_details\"][0][\"level_learned_at\"])) for move in pokemon_info[\"moves\"]]\n\n return Pokemon(**pokemon_info)\n\n\nclass StatsFactory(BaseFactory):\n \"\"\"\n Factory class for Stats objects.\n \"\"\"\n def create_item(self, stats_info):\n \"\"\"\n Return a Stats object.\n :param stats_info: dictionary necessary to initialize a Stats object\n :return: Stats\n \"\"\"\n return Stats(**stats_info)\n","sub_path":"pokedex/pokeretriever/Factory.py","file_name":"Factory.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226766563","text":"class Solution:\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n start ,leng, dict= 0, 0, {}\n for i in range(len(s)):\n if s[i] in dict:\n if dict[s[i]] >= start:\n start = dict[s[i]]+1\n dict[s[i]] = i\n leng = max(leng, i-start+1)\n return leng\n\n\n# string的题目通常只要返回长度数字,不要返回具体的字符串,这样的话可以采用dict方式记录index很方便。同时采用几个pointers进行遍历也是常用的方法。\n# 用dict的key记录str的char,value记录对应的index简直是神器","sub_path":"3. Longest Substring Without Repeating Characters.py","file_name":"3. Longest Substring Without Repeating Characters.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"183069121","text":"\"\"\"This module defines the text based template responses to be formatted\nand sent to users with the proper data\nThis is meant to be used with the sample weather agent for Dialogflow, located at\nhttps://console.dialogflow.com/api-client/#/agent//prebuiltAgents/Weather\n\"\"\"\n\nLIST_YES = [\n 'Meglio portarlo con te.',\n 'Non fa mai male portarlo con sè.',\n 'Considerando il meteo è meglio averlo con sè.'\n]\n\nLIST_NO = [\n 'No, puoi anche non portarlo.',\n 'Non penso sarà necessario...',\n 'Potresti anche portarlo ma dubito che ne avrai di bisogno.',\n 'A mio parere non ti servirà.'\n]\n\nLIST_COLD = [\n 'Fa freddissimo a {city}. La temperatura si aggira attorno a {temp}°C.',\n '{temp}°C. Fa molto freddo direi a {city}...',\n 'Fa molto freddo a {city}, faresti meglio a non dimenticarti i guanti. Ci sono {temp}°C.',\n 'La temperatura è di {temp}°C a {city}.'\n]\n\nLIST_CHILLY = [\n 'Fa piuttosto freddo a {city}. La temperatura è di {temp}°C.',\n 'Ti servirebbe sicuramente una giacca con {temp}°C a {city}.',\n 'Non fa molto caldo a {city}. Ci sono {temp}°C.',\n 'La temperatura è di {temp}°C a {city}.'\n]\n\nLIST_WARM = [\n 'La temperatura è ottima a {city}. Si attesta attorno a {temp}°C.',\n 'Le temperature si aggirano intorno a valori ottimali a {city} con una media di {temp}°C.',\n 'La temperatura è di {temp}°C a {city}.'\n]\n\nLIST_HOT = [\n 'Oh, a {city} fa molto caldo! Ci sono {temp}°C.',\n 'Fa molto caldo a {city}. La temperatura è di {temp}°C.',\n 'Le temperature sono molto elevate a {city}, fa un gran caldo.',\n 'La temperatura è di {temp}°C a {city}.'\n]\n\nWEATHER_CURRENT = [\n 'A {city} {descr} con {temp}°C e venti da {direct} a {speed} km/h.',\n 'Adesso la temperatura è di {temp}°C a {city} con {descr}. Umidità al {umid}%.',\n 'Attualmente {descr} con {temp}°C a {city}. Venti a {speed} km/h provenienti da {direct}.',\n 'La temperatura a {place} è di {temp}°C con {descr} e venti da {direct}.',\n 'A {city} {descr} con {temp}°C. Umidità al {umid}% con venti da {direct} a {speed} km/h.'\n]\n\nWEATHER_DATE = [\n '{day} in {place} it will be around {temperature} and {condition}.',\n '{day} in {place} you can expect it to be around {temperature} and \\\n {condition}.',\n '{day} in {place} you can expect {condition}, with temperature around \\\n {temperature}.',\n '{day} in {place} it will be {condition}, {temperature}.',\n]\n\nWEATHER_WEEKDAY = [\n 'On {date} in {place} it will be {condition}, {temperature}.',\n 'On {date} in {place} it\\'s expected to be {condition}, {temperature}.',\n 'The forecast for {date} in {place} is {condition}, {temperature}.',\n '{date} in {place} is expected to be {condition}, {temperature}.'\n]\n\nWEATHER_DATE_TIME = [\n '{day} in {place} at {time} it will be around {temperature} and \\\n {condition}.',\n '{day} in {place} at {time} you can expect it to be around {temperature} \\\n and {condition}.',\n '{day} in {place} at {time} you can expect {condition}, with the \\\n temperature around {temperature}.',\n '{day} in {place} at {time} it will be {condition}, {temperature}.',\n 'At {time} on {day} in {place} it will be {temperature} and {condition}.'\n]\n\nWEATHER_TIME_PERIOD = [\n 'It will be {condition} in {city} and around {temp} on period from \\\n {time_start} till {time_end}.'\n]\n\nWEATHER_TIME_PERIOD_DEFINED = [\n 'This {time_period} in {place} it will be {temperature} and {condition}.',\n 'This {time_period} in {place} you can expect {condition}, with \\\n temperature around {temperature}.',\n 'Expect a {condition} {time_period} in {place}, with temperature around \\\n {temperature}.',\n 'It will be {condition} in {place} and around {temperature} this \\\n {time_period}.',\n]\n\nWEATHER_DATE_PERIOD_WEEKEND = [\n 'On Saturday in {city} it will be {condition_sat}, '\n 'with temperatures from {sat_temp_min} to {sat_temp_max}. '\n 'And Sunday should be {condition_sun}, '\n 'with a low of {sun_temp_min} and a high of {sun_temp_max}.'\n]\n\nWEATHER_DATE_PERIOD = [\n 'During period from {date_start} till {date_end}'\n ' in {city} you can expect {condition}, '\n 'with a low of {degree_list_min} and a high of {degree_list_max}.'\n]\n\nWEATHER_ACTIVITY_YES = [\n 'What a nice weather for {activity}!'\n]\n\nWEATHER_ACTIVITY_NO = [\n 'Not the best weather for {activity}.'\n]\n\nRESPONSE_WEATHER_CONDITION = [\n 'Chance of {condition_original} is {condition} percent.'\n]\n\nRESPONSE_WEATHER_OUTFIT = [\n 'Chance of {condition_original} is {condition} percent. {answer}'\n]\n","sub_path":"responses.py","file_name":"responses.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"548075540","text":"#\n# Copyright 2011, Intel Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Library General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n'''\nCreated on 29 sept. 2011\n\n@author: ronan\n'''\nimport os\n\nimport ObsLightOsc\nimport ObsLightErr\nimport ObsLightTools\nimport ObsLightPrintManager\n\nclass ObsLightServer(object):\n\n def __init__(self,\n serverWeb=\"\",\n serverAPI=None,\n serverRepo=\"\",\n alias=None,\n user=None,\n passw=None,\n fromSave=None):\n '''\n Create a reference to a OBS server\n '''\n self.__alias = None\n self.__serverRepo = None\n self.__isReachable = None\n\n self.__projectRreadOnlyBuffer = {}\n\n if fromSave != None:\n if \"isOBSConnected\" in fromSave.keys():\n self.__isOBSConnected = fromSave[\"isOBSConnected\"]\n if \"serverWeb\" in fromSave.keys():\n self.__serverWeb = fromSave[\"serverWeb\"]\n if \"serverAPI\" in fromSave.keys():\n self.__serverAPI = str(fromSave[\"serverAPI\"])\n if \"serverRepo\" in fromSave.keys():\n self.__serverRepo = fromSave[\"serverRepo\"]\n if \"alias\" in fromSave.keys():\n self.__alias = fromSave[\"alias\"]\n if \"user\" in fromSave.keys():\n self.__user = fromSave[\"user\"]\n if \"passw\" in fromSave.keys():\n self.__passw = fromSave[\"passw\"]\n else:\n self.__isOBSConnected = False\n self.__serverWeb = serverWeb\n self.__serverAPI = str(serverAPI)\n self.__serverRepo = serverRepo\n\n self.__alias = alias\n\n self.__user = user\n self.__passw = passw\n\n if (self.__alias == None) or (len(self.__alias) < 1):\n self.__alias = self.__serverAPI\n\n ObsLightOsc.getObsLightOsc().initConf(api=self.__serverAPI,\n user=self.__user,\n passw=self.__passw,\n alias=self.__alias)\n\n def testServer(self):\n self.__isReachable = (self.testServerAPI() and\n self.testServerRepo() and\n self.testServerWeb())\n return self.__isReachable\n\n def testServerAPI(self):\n return ObsLightTools.testHost(self.__serverAPI)\n\n def testServerRepo(self):\n return ObsLightTools.testHost(self.__serverRepo)\n\n def testServerWeb(self):\n return ObsLightTools.testHost(self.__serverWeb)\n\n def isReachable(self):\n if self.__isReachable == None:\n self.__isReachable = self.testServer()\n return self.__isReachable\n\n def getObsProjectPackageList(self, projectObsName):\n return ObsLightOsc.getObsLightOsc().getPackageList(apiurl=self.__serverAPI,\n projectLocalName=projectObsName)\n\n def getFilesListPackage(self,\n projectObsName,\n package):\n return ObsLightOsc.getObsLightOsc().getFilesListPackage(apiurl=self.__serverAPI,\n projectObsName=projectObsName,\n package=package)\n\n\n def getPackageBuildRequires(self,\n projectObsName,\n projectTarget,\n arch,\n specFile,\n extraPackages=[]):\n buildInfoXml = ObsLightOsc.getObsLightOsc().getPackageBuildRequires(self.__serverAPI,\n projectObsName,\n projectTarget,\n arch,\n specFile,\n extraPackages)\n\n buildInfoCli = ObsLightOsc.getObsLightOsc().createBuildInfoObj(buildInfoXml, self.__serverAPI)\n buildInfoCli = ObsLightOsc.getObsLightOsc().updateCache(buildInfoCli, self.__serverAPI)\n\n return buildInfoCli\n\n def getObsServerParameter(self, parameter=None):\n '''\n return the value of the parameter \"parameter\"\n the valid parameter is:\n isOBSConnected\n serverWeb\n serverAPI\n serverRepo\n alias\n user\n passw\n '''\n\n if parameter == \"isOBSConnected\":\n return self.__isOBSConnected\n elif parameter == \"serverWeb\":\n return self.__serverWeb\n elif parameter == \"serverAPI\":\n return str(self.__serverAPI)\n elif parameter == \"serverRepo\":\n return self.__serverRepo\n elif parameter == \"alias\":\n return self.__alias\n elif parameter == \"user\":\n return self.__user\n elif parameter == \"passw\":\n return self.__passw\n\n def getProjectParameter(self, project, parameter):\n '''\n Get the value of a project parameter.\n Valid parameter are:\n title\n description\n remoteurl\n maintainer\n bugowner\n arch\n repository \n '''\n if not parameter in [\"title\",\n \"description\",\n \"remoteurl\",\n \"maintainer\",\n \"bugowner\",\n \"arch\",\n \"repository\",\n \"readonly\"]:\n mess = \"\\\"%s\\\" is not a parameter of a OBS project\" % parameter\n raise ObsLightErr.ObsLightObsServers(mess)\n\n if not project in self.getLocalProjectList(raw=True):\n message = \"Can't return the project parameter,\\n\"\n message += \" '%s' is not a project on obs '%s'\"\n message = message % (project , self.__serverAPI)\n\n raise ObsLightErr.ObsLightObsServers(message)\n\n if parameter == \"readonly\":\n if project in self.__projectRreadOnlyBuffer.keys():\n return self.__projectRreadOnlyBuffer[project]\n res = ObsLightOsc.getObsLightOsc().getProjectParameter(projectObsName=project,\n apiurl=self.__serverAPI,\n parameter=\"maintainer\")\n ro = not self.__user in res\n self.__projectRreadOnlyBuffer[project] = ro\n return ro\n else:\n res = ObsLightOsc.getObsLightOsc().getProjectParameter(projectObsName=project,\n apiurl=self.__serverAPI,\n parameter=parameter)\n return res\n\n def getAPI(self):\n return str(self.__serverAPI)\n\n def getPackageParameter(self, project, package, parameter):\n '''\n Get the value of a package parameter.\n Valid parameter are:\n title\n description\n url\n status \n listFile \n '''\n\n if not parameter in [\"title\",\n \"description\",\n \"url\",\n \"status\",\n \"listFile\"]:\n raise ObsLightErr.ObsLightObsServers(parameter + \" is not a parameter of a OBS package\")\n\n# FIXME: is that necessary ? It's really slow on public OBS servers...\n# if not project in self.getLocalProjectList(raw=True):\n# message = \"Can't return the package parameter,\\n\"\n# message += \"'%s' is not a project on obs '%s'\"\n# message = message % (project, self.__serverAPI)\n#\n# raise ObsLightErr.ObsLightObsServers(message)\n\n if not package in self.getObsProjectPackageList(projectObsName=project):\n message = \"Can't return the package parameter,\\n\"\n message += \"'%s' is not a package of project '%s' on obs '%s'\"\n message = message % (package, project, self.__serverAPI)\n raise ObsLightErr.ObsLightObsServers(message)\n\n if parameter in [\"title\",\n \"description\",\n \"url\"]:\n return ObsLightOsc.getObsLightOsc().getPackageMetaParameter(projectObsName=project,\n package=package,\n apiurl=self.__serverAPI,\n parameter=parameter)\n elif parameter in [\"listFile\"]:\n return ObsLightOsc.getObsLightOsc().getPackageParameter(projectObsName=project,\n package=package,\n apiurl=self.__serverAPI,\n parameter=parameter)\n\n\n def getObsPackageRev(self,\n projectObsName,\n package):\n return ObsLightOsc.getObsLightOsc().getObsPackageRev(apiurl=self.__serverAPI,\n projectObsName=projectObsName,\n package=package)\n\n def getOscPackageRev(self, workingdir):\n return ObsLightOsc.getObsLightOsc().getOscPackageRev(workingdir=workingdir)\n\n def setObsServerParameter(self, parameter=None, value=None):\n '''\n change the value of the parameter \"parameter\"\n the valid parameter is:\n obssOBSConnected\n serverWeb\n serverAPI\n serverRepo\n alias\n user\n passw\n '''\n if value == None:\n raise ObsLightErr.ObsLightObsServers(\"value is not valid for setObsServerParameter\")\n if parameter == \"isOBSConnected\":\n self.__isOBSConnected = value\n elif parameter == \"serverWeb\":\n self.__serverWeb = value\n elif parameter == \"serverAPI\":\n ObsLightOsc.getObsLightOsc().changeAPI(api=self.__serverAPI,\n newApi=value)\n self.__serverAPI = str(value)\n elif parameter == \"serverRepo\":\n self.__serverRepo = value\n elif parameter == \"alias\":\n self.__alias = value\n elif parameter == \"user\":\n ObsLightOsc.getObsLightOsc().changeUser(api=self.__serverAPI,\n user=value)\n self.__user = value\n elif parameter == \"passw\":\n ObsLightOsc.getObsLightOsc().changePassw(api=self.__serverAPI,\n passw=value)\n self.__passw = value\n else:\n raise ObsLightErr.ObsLightObsServers(\"parameter is not valid for setObsServerParameter\")\n return None\n\n def initConfigProject(self, projet, repos, lastResult=None):\n lastResult = lastResult or {}\n #if the repository is link to a listDepProject\n res = ObsLightOsc.getObsLightOsc().getDepProject(apiurl=self.__serverAPI,\n projet=projet,\n repos=repos)\n #the listDepProject must be trust(add to .oscrc )\n if res != None:\n ObsLightOsc.getObsLightOsc().trustRepos(api=self.__serverAPI,\n listDepProject=res)\n\n listProject = res.keys()\n\n dicoProject = dict(lastResult)\n for aprojet in listProject:\n dicoProject[aprojet] = res[aprojet]\n\n for aprojet in listProject:\n if (aprojet != projet) or (res[aprojet] != repos) :\n if (aprojet not in lastResult.keys()):\n self.initConfigProject(projet=aprojet,\n repos=res[aprojet],\n lastResult=dicoProject)\n\n def getDic(self):\n '''\n return a description of the object in a dictionary \n '''\n aDic = {}\n aDic[\"isOBSConnected\"] = self.__isOBSConnected = False\n aDic[\"serverWeb\"] = self.__serverWeb\n aDic[\"serverAPI\"] = str(self.__serverAPI)\n aDic[\"serverRepo\"] = self.__serverRepo\n aDic[\"alias\"] = self.__alias\n aDic[\"user\"] = self.__user\n aDic[\"passw\"] = self.__passw\n return aDic\n\n def getName(self):\n '''\n return the OBS server name.\n '''\n return self.__alias\n\n def getPackageList(self, projectLocalName=None):\n return ObsLightOsc.getObsLightOsc().getPackageList(apiurl=self.__serverAPI,\n projectLocalName=projectLocalName)\n\n def checkoutPackage(self, projectObsName=None, package=None, directory=None):\n ObsLightOsc.getObsLightOsc().checkoutPackage(obsServer=self.__serverAPI,\n projectObsName=projectObsName,\n package=package,\n directory=directory)\n\n def getPackageStatus(self,\n project=None,\n package=None,\n repo=None,\n arch=None):\n return ObsLightOsc.getObsLightOsc().getPackageStatus(obsServer=self.__serverAPI,\n project=project,\n package=package,\n repo=repo,\n arch=arch)\n\n def getDependencyRepositories(self, projectObsName, target, arch):\n '''\n Return the list of the dependency repositories.\n '''\n\n\n result1 = ObsLightOsc.getObsLightOsc().getDependencyProjects(self.__serverAPI,\n projectObsName, target)\n\n result2 = {}\n for prj in result1.keys():\n url = os.path.join(self.__serverRepo, prj.replace(\":\", \":/\"), result1[prj])\n\n alias = ObsLightOsc.getObsLightOsc().getAliasOfRepo(url + \"/\" + prj + \".repo\")\n if alias != None:\n result2[alias] = url\n\n listUrl = ObsLightOsc.getObsLightOsc().getDODUrl(self.__serverAPI, prj, arch)\n if listUrl != None:\n for url in listUrl:\n alias = ObsLightOsc.getObsLightOsc().getAliasOfRepo(url)\n if alias != None:\n result2[alias] = url\n\n return result2\n\n def getRepo(self):\n if self.__serverRepo != None:\n return self.__serverRepo\n else:\n raise ObsLightErr.ObsLightObsServers(\"In \" + self.__alias + \" there is no repo\")\n\n\n def getLocalProjectList(self,\n maintainer=False,\n bugowner=False,\n arch=None,\n remoteurl=False,\n raw=True):\n logger = ObsLightPrintManager.getLogger()\n logger.info(\"Getting project list from %s\" % self.__serverAPI)\n\n if (\"https://api.meego.com\" in self.__serverAPI):\n msg = \"api.meego.com doesn't support search request, Bug 24979\"\n logger.warning(msg)\n\n obsLightOsc = ObsLightOsc.getObsLightOsc()\n if (not raw) and not (\"https://api.meego.com\" in self.__serverAPI):\n aBugowner = None\n if bugowner:\n aBugowner = self.__user\n\n aMaintainer = None\n if maintainer:\n aMaintainer = self.__user\n\n return obsLightOsc.getFilteredProjectListFromServer(self.__serverAPI,\n aMaintainer,\n aBugowner,\n arch,\n remoteurl)\n else:\n return obsLightOsc.getProjectListFromServer(self.__serverAPI)\n\n def getTargetList(self, projectObsName=None):\n return ObsLightOsc.getObsLightOsc().getTargetList(obsServer=self.__serverAPI,\n projectObsName=projectObsName)\n\n def getArchitectureList(self,\n projectObsName=None,\n projectTarget=None):\n return ObsLightOsc.getObsLightOsc().getArchitectureList(obsServer=self.__serverAPI ,\n projectObsName=projectObsName,\n projectTarget=projectTarget)\n\n\n\n def getUrlServerWeb(self):\n return self.__serverWeb\n\n def getProjectTitle(self, projectObsName):\n return ObsLightOsc.getObsLightOsc().getProjectParameter(projectObsName=projectObsName,\n apiurl=self.__serverAPI,\n parameter=\"title\")\n\n def getProjectDescription(self, projectObsName):\n return ObsLightOsc.getObsLightOsc().getProjectParameter(projectObsName=projectObsName,\n apiurl=self.__serverAPI,\n parameter=\"description\")\n\n\n def createObsProject(self, projectObsName, title=\"\", description=\"\"):\n return ObsLightOsc.getObsLightOsc().createObsProject(self.__serverAPI,\n projectObsName,\n self.__user,\n title,\n description)\n\n\n def createObsPackage(self, projectObsName, package, title=\"\", description=\"\"):\n return ObsLightOsc.getObsLightOsc().createObsPackage(self.__serverAPI,\n projectObsName,\n package,\n title,\n description)\n\n def saveProjectConfig(self, projectObsName, target):\n return ObsLightOsc.getObsLightOsc().saveProjectConfig(self.__serverAPI,\n projectObsName,\n target)\n\n","sub_path":"obslight/ObsLight/ObsLightServer.py","file_name":"ObsLightServer.py","file_ext":"py","file_size_in_byte":19951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"38607227","text":"# coding=utf-8\nfrom socket import *\nfrom multiprocessing import *\n# 导入正则\nimport re\n\n# 静态文件根目录\nHTML_ROOT_DIR=\"./html\"\n\ndef handle_conn(client_socket,client_addr):\n\n # 接收数据\n request_data = client_socket.recv(1024);\n print(\"request data:\",request_data)\n\n if(len(request_data)<=0):\n client_socket.close()\n return\n\n # 解析http数据报文\n # 按照换行符进行分割\n request_lines = request_data.splitlines()\n for line in request_lines:\n print(line)\n\n # 提取请求方式\n # 获取第一行\n # 'GET / HTTP/1.1'\n request_start_line = request_lines[0]\n # 提取请求路径\n # 使用正则表达式进行划分\n # \\w+ 多个字母开头 \\s一个或者多个空格 /[^\\s]*表示/后面非空格的多个值\n file_name = re.match(r\"\\w+\\s+(/[^\\s]*)\\s\",request_start_line.decode(\"utf-8\")).group(1)\n # 可以获得第一个/后面的值\n if \"/\" == file_name:\n # 判断默认路径\n file_name = \"/index.html\"\n\n # 打开文件,有可能打开的是图片,以二进制的方式读取\n try:\n file = open(HTML_ROOT_DIR+file_name,\"rb\")\n except IOError:\n response_start_line = \"HTTP/1.1 404 Not Found\\r\\n\"\n response_body=\"the file is not found\"\n else:\n file_data = file.read()\n # 返回响应的数据 http 响应格式\n \"\"\"\n HTTP 1.1 200 OK\\r\\n\n \\r\\n\n hello world\n \"\"\"\n # 构造响应数据\n response_start_line=\"HTTP/1.1 200 OK\\r\\n\"\n response_body=file_data.decode(\"utf-8\")\n\n response_headers=\"Server: My Server\\r\\n\"\n response = response_start_line+response_headers+\"\\r\\n\"+response_body\n print(\"response data:\",response)\n\n # 向客户端返回响应数据\n # client_socket.send(response.encode())\n client_socket.send(bytes(response,\"utf-8\"))\n client_socket.close()\n\nif __name__ == '__main__':\n\n server = socket(AF_INET, SOCK_STREAM)\n # 修改socket级别参数值reuseaddr,重用ip地址\n server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n # 设置端口号\n server.bind(('', 8090))\n # 设置监听队列\n server.listen(128)\n\n while True:\n client_socket,client_addr = server.accept()\n # print(\"[%s %s] 已经连接上了\"%(client_addr[0],client_addr[1]))\n # 等价于\n print(\"[%s %s] 已经连接上了\"%client_addr)\n p = Process(target=handle_conn,args=(client_socket,client_addr))\n p.start()\n # 注意关闭client的socket 由于p进程已经收到client的socket,原先的关闭\n client_socket.close()\n","sub_path":"code/03.web服务器/02.web静态服务简单实现-返回静态页面.py","file_name":"02.web静态服务简单实现-返回静态页面.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"649320665","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\nimport datetime\nimport calendar\nfrom django.db.models import Q\nfrom sjdl.news.models import News\nfrom mezzanine.galleries.models import Gallery\nfrom mezzanine.galleries.models import GalleryImage\n\ndef listing(request):\n news = News.latest_news_list()[:8]\n\n archives = News.latest_news_list()\n\n return render(request, 'news/index.html', {'news':news,'archives':archives})\n\ndef archives(request, year, month):\n monthrange = calendar.monthrange(int(year), int(month))\n \n dateMin = datetime.date(int(year),int(month),1)\n dateMax = datetime.date(dateMin.year, dateMin.month, monthrange[1])\n\n news = News.objects.filter(\n Q(publish_date__range = (dateMin,dateMax)) | Q(publish_date__isnull = True)\n ).filter(\n Q(expiry_date__gte = dateMax) | Q(expiry_date__isnull = True)\n ).filter(\n status = 2\n ).order_by('-publish_date')\n\n archives = News.latest_news_list()\n\n return render(request, 'news/index.html', {'news':news,'archives':archives})\n\ndef detail(request, slug):\n news = get_object_or_404(News, slug=slug)\n images = None\n\n if news.ik_diapo_id is not None:\n gallery = Gallery.objects.get(id=news.ik_diapo_id)\n images = GalleryImage.objects.all().filter(gallery_id=gallery.id)\n\n archives = News.latest_news_list()\n\n return render(request, 'news/detail.html', {'news':news, 'images':images,'archives':archives})","sub_path":"news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"393223166","text":"\"\"\"Parsing of Storm Prediction Center SAW Product\n\nThis does not process the legacy SAW products that did not have LAT...LON\n\"\"\"\nimport re\nimport datetime\n\nfrom shapely.geometry import Polygon as ShapelyPolygon\nfrom shapely.geometry import MultiPolygon\nfrom pyiem.nws.product import TextProduct\nfrom pyiem.util import utc\nfrom pyiem.exceptions import SAWException\n\nLATLON = re.compile(r\"LAT\\.\\.\\.LON\\s+((?:[0-9]{8}\\s+)+)\")\nNUM_RE = re.compile(\n r\"WW ([0-9]*) (TEST)?\\s?\" \"(SEVERE TSTM|TORNADO|SEVERE THUNDERSTORM)\"\n)\nREPLACES_RE = re.compile(\"REPLACES WW ([0-9]*)\")\nDBTYPES = [\"TOR\", \"SVR\"]\nTYPE2STRING = [\"Tornado\", \"Severe Thunderstorm\"]\n\n\nclass SAWProduct(TextProduct):\n \"\"\"Class representing a SAW Product\"\"\"\n\n (TORNADO, SEVERE_THUNDERSTORM) = range(2)\n (ISSUES, CANCELS) = range(2)\n\n def __init__(self, text, utcnow=None):\n \"\"\"Constructor\n\n Args:\n text (str): text to parse\n \"\"\"\n TextProduct.__init__(self, text, utcnow=utcnow)\n self.saw = int(self.afos[3:].strip())\n self.action = self.find_action()\n self.geometry = self.find_polygon()\n self.ww_num = self.find_ww_num()\n (self.sts, self.ets) = self.find_time()\n self.ww_type = self.find_ww_type()\n self.affected_wfos = []\n\n def find_action(self):\n \"\"\"Figure out if this is an issuance or cancells statement\n\n Return:\n (int): either ISSUES or CANCELS\n \"\"\"\n if re.findall(\"CANCELLED\", self.unixtext):\n return self.CANCELS\n return self.ISSUES\n\n def compute_wfos(self, txn):\n \"\"\"Figure out who is impacted by this watch\"\"\"\n if self.geometry is None:\n return\n txn.execute(\n \"SELECT distinct wfo from ugcs WHERE \"\n f\"ST_Contains('SRID=4326;{self.geometry.wkt}', geom) \"\n \"and end_ts is null\"\n )\n for row in txn.fetchall():\n self.affected_wfos.append(row[0])\n\n def sql(self, txn):\n \"\"\"Do the necessary database work\n\n Args:\n (psycopg2.transaction): a database transaction\n \"\"\"\n if self.action == self.ISSUES:\n # Delete any current entries\n txn.execute(\n \"DELETE from watches WHERE num = %s and \"\n \"extract(year from issued) = %s\",\n (self.ww_num, self.sts.year),\n )\n # Insert into the main watches table\n giswkt = \"SRID=4326;%s\" % (MultiPolygon([self.geometry]).wkt,)\n sql = (\n \"INSERT into watches (sel, issued, expired, type, report, \"\n \"geom, num) VALUES(%s,%s,%s,%s,%s,%s,%s)\"\n )\n args = (\n \"SEL%s\" % (self.saw,),\n self.sts,\n self.ets,\n DBTYPES[self.ww_type],\n self.unixtext,\n giswkt,\n self.ww_num,\n )\n txn.execute(sql, args)\n # Update the watches_current table\n sql = (\n \"UPDATE watches_current SET issued = %s, expired = %s, \"\n \"type = %s, report = %s, geom = %s, num = %s WHERE sel = %s\"\n )\n args = (\n self.sts,\n self.ets,\n DBTYPES[self.ww_type],\n self.unixtext,\n giswkt,\n self.ww_num,\n \"SEL%s\" % (self.saw,),\n )\n txn.execute(sql, args)\n # Is this a replacement?\n if REPLACES_RE.findall(self.unixtext):\n rnum = REPLACES_RE.findall(self.unixtext)[0][0]\n txn.execute(\n \"UPDATE watches SET expired = %s \"\n \"WHERE num = %s and extract(year from expired) = %s\",\n (self.valid, rnum, self.sts.year),\n )\n elif self.action == self.CANCELS:\n for table in (\"watches\", \"watches_current\"):\n txn.execute(\n f\"UPDATE {table} SET expired = %s \"\n \"WHERE num = %s and extract(year from expired) = %s\",\n (self.valid, self.ww_num, self.valid.year),\n )\n if table == \"watches\" and txn.rowcount != 0:\n self.warnings.append(\n \"Expiration of watch resulted in \"\n f\"update of {txn.rowcount} rows, instead of 1.\"\n )\n\n def find_time(self):\n \"\"\"Find the start and end valid time of this watch\n\n Returns:\n (datetime, datetime): representing the time of this watch\n \"\"\"\n if self.action == self.CANCELS:\n return (None, None)\n tokens = re.findall(\n \"([0-3][0-9])([0-2][0-9])([0-6][0-9])Z - \"\n \"([0-3][0-9])([0-2][0-9])([0-6][0-9])Z\",\n self.unixtext,\n )\n\n day1 = int(tokens[0][0])\n hour1 = int(tokens[0][1])\n minute1 = int(tokens[0][2])\n day2 = int(tokens[0][3])\n hour2 = int(tokens[0][4])\n minute2 = int(tokens[0][5])\n\n sts = utc(self.utcnow.year, self.utcnow.month, day1, hour1, minute1)\n ets = utc(self.utcnow.year, self.utcnow.month, day2, hour2, minute2)\n\n # If we are near the end of the month and the day1 is 1, add 1 month\n if self.utcnow.day > 27 and day1 == 1:\n sts += datetime.timedelta(days=+35)\n sts = sts.replace(day=1)\n if self.utcnow.day > 27 and day2 == 1:\n ets += datetime.timedelta(days=+35)\n ets = ets.replace(day=1)\n return (sts, ets)\n\n def find_ww_num(self):\n \"\"\"Find the Weather Watch Number\n\n Returns:\n (int): The Weather Watch Number\n \"\"\"\n tokens = NUM_RE.findall(self.unixtext)\n if not tokens:\n raise SAWException(\"Could not locate Weather Watch Number\")\n return int(tokens[0][0])\n\n def is_test(self):\n \"\"\"Is this a test watch?\n\n Returns:\n boolean if this SAW is a test or not\n \"\"\"\n tokens = NUM_RE.findall(self.unixtext)\n if not tokens:\n raise SAWException(\"Could not locate Weather Watch Number\")\n return tokens[0][1] == \"TEST\"\n\n def find_ww_type(self):\n \"\"\"Find the Weather Watch Type\n\n Returns:\n (int): The Weather Watch Type\n \"\"\"\n tokens = NUM_RE.findall(self.unixtext)\n if not tokens:\n raise SAWException(\"Could not locate Weather Watch Type\")\n if tokens[0][2] == \"TORNADO\":\n return self.TORNADO\n return self.SEVERE_THUNDERSTORM\n\n def find_polygon(self):\n \"\"\"Search out the text for the LAT...LON polygon\n\n Returns:\n (str): Well Known Text (WKT) representation\n \"\"\"\n if self.action == self.CANCELS:\n return\n tokens = LATLON.findall(self.unixtext.replace(\"\\n\", \" \"))\n if not tokens:\n raise SAWException(\"Could not parse LAT...LON geometry\")\n pts = []\n for pair in tokens[0].split():\n lat = float(pair[:4]) / 100.0\n lon = 0 - float(pair[4:]) / 100.0\n if lon > -40:\n lon = lon - 100.0\n pts.append((lon, lat))\n return ShapelyPolygon(pts)\n\n def get_jabbers(self, uri, _uri2=None):\n \"\"\"Generate the jabber messages for this Product\n\n NOTE: In the past, the messages generated here have tripped twitter's\n spam logic, so we are careful to craft unique messages\n\n Args:\n uri (str): un-used in this context\n \"\"\"\n res = []\n url = (\"https://www.spc.noaa.gov/products/watch/%s/ww%04i.html\") % (\n self.valid.year,\n self.ww_num,\n )\n spc_channels = f\"SPC,SPC.{DBTYPES[self.ww_type]}WATCH\"\n if self.action == self.CANCELS:\n plain = (\n \"Storm Prediction Center cancels Weather Watch Number %s \" \"%s\"\n ) % (self.ww_num, url)\n html = (\n '

Storm Prediction Center cancels '\n \"Weather Watch Number %s

\"\n ) % (url, self.ww_num)\n res.append(\n [plain, html, dict(channels=spc_channels, twitter=plain)]\n )\n # Now create templates\n plain = (\n \"Storm Prediction Center cancels Weather Watch Number %s \"\n \"for portions of %%s %s\"\n ) % (self.ww_num, url)\n html = (\n '

Storm Prediction Center cancels '\n \"Weather Watch Number %s for portions of %%s

\"\n ) % (url, self.ww_num)\n elif self.action == self.ISSUES:\n plain = (\"SPC issues %s Watch %s till %sZ\") % (\n TYPE2STRING[self.ww_type],\n self.ww_num,\n self.ets.strftime(\"%-H:%M\"),\n )\n html = (\n \"

Storm Prediction Center issues \"\n '%s Watch %s '\n \"till %s UTC\"\n ) % (\n int(self.ww_num),\n TYPE2STRING[self.ww_type],\n self.ww_num,\n self.ets.strftime(\"%-H:%M\"),\n )\n if REPLACES_RE.findall(self.unixtext):\n rtext = (\"WW %s \") % (\n REPLACES_RE.findall(self.unixtext)[0][0].strip(),\n )\n plain += \", new watch replaces \" + rtext\n html += \", new watch replaces \" + rtext\n\n plain2 = \"%s %s\" % (plain, url)\n plain2 = \" \".join(plain2.split())\n html2 = html + (\n ' (Watch ' \"Quickview)

\"\n ) % (uri, self.sts.year, self.ww_num)\n res.append(\n [plain2, html2, dict(channels=spc_channels, twitter=plain2)]\n )\n # Now create templates\n plain += \" for portions of %%s %s\" % (url,)\n html += (\n \" for portions of %%s \"\n '(Watch '\n \"Quickview)

\"\n ) % (uri, self.sts.year, self.ww_num)\n\n plain = \" \".join(plain.split())\n for wfo in self.affected_wfos:\n res.append(\n [\n plain % (wfo,),\n html % (wfo,),\n dict(channels=wfo, twitter=(plain % (wfo,))),\n ]\n )\n return res\n\n\ndef parser(text, utcnow=None):\n \"\"\"parser of raw SPC SAW Text\n\n Args:\n text (str): the raw text to parse\n utcnow (datetime): the current datetime with timezone set!\n\n Returns:\n SAWProduct instance\n \"\"\"\n return SAWProduct(text, utcnow=utcnow)\n","sub_path":"src/pyiem/nws/products/saw.py","file_name":"saw.py","file_ext":"py","file_size_in_byte":10866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"104576026","text":"\"\"\"Memory related utils.\n\"\"\"\nimport getpass\nimport sys\nimport math\nfrom collections import deque\nimport time\nfrom argparse import ArgumentParser, Namespace\nimport numpy as np\nimport psutil\nfrom loguru import logger\n\nUSER = getpass.getuser()\n\n\ndef get_memory_usage(user: str = USER) -> int:\n \"\"\"Get the memory usage of the specified user.\n\n :param user: The user whose memory usage to get.\n :return: The memory usage of the user in bytes.\n \"\"\"\n STATUS = (\n psutil.STATUS_RUNNING,\n psutil.STATUS_SLEEPING,\n psutil.STATUS_DISK_SLEEP,\n psutil.STATUS_WAKING,\n psutil.STATUS_PARKED,\n psutil.STATUS_IDLE,\n psutil.STATUS_WAITING,\n )\n try:\n return sum(\n p.memory_info().rss for p in psutil.process_iter()\n if p.username() == USER and p.status() in STATUS\n )\n except:\n return get_memory_usage(user)\n\n\ndef monitor_memory_usage(seconds: float = 1, user: str = USER):\n \"\"\"Log out the memory usage of the specified user in a specified frequency.\n\n :param seconds: The number of seconds to wait before the next logging.\n :param user: The user whose memory usage to monitor.\n \"\"\"\n while True:\n time.sleep(seconds)\n logger.info(\"Memory used by {}: {:,}\", user, get_memory_usage(user=user))\n\n\ndef match_memory_usage(\n target: float,\n arr_size: int = 1_000_000,\n sleep_min: float = 1,\n sleep_max: float = 30\n):\n \"\"\"Match a user's memory usage to the specified value.\n The memory usage will gradually increase to the specified value \n if it is smaller than the specified value.\n Otherwise, \n the memory usage drops immediately to match the specified value.\n\n :param target: The target memory in bytes.\n :param arr_size: The size of integer arrays for consuming memory.\n :param sleep_min: The minimum time of sleeping.\n :param sleep_max: The maximum time of sleeping.\n \"\"\"\n logger.info(\"Target memory: {:,.0f}\", target)\n # define an template array\n arr = list(range(arr_size))\n size = sys.getsizeof(arr)\n # deque for consuming memory flexibly\n dq = deque()\n # define 2 points for linear interpolation of sleep seconds\n xp = (0, 10)\n yp = (sleep_max, sleep_min)\n while True:\n mem = get_memory_usage(USER)\n logger.info(\n \"Current used memory by {}: {:,} out of which {:,} is contributed by the memory matcher\",\n USER, mem, size * len(dq)\n )\n diff = (target - mem) / size\n if diff > 0:\n logger.info(\"Consuming more memory ...\")\n dq.append(arr.copy())\n time.sleep(np.interp(diff, xp, yp))\n else:\n count = min(math.ceil(-diff), len(dq))\n logger.info(\"Releasing memory ...\")\n for _ in range(count):\n dq.pop()\n time.sleep(np.interp(count, xp, yp))\n\n\ndef parse_args(args=None, namespace=None) -> Namespace:\n \"\"\"Parse command-line arguments.\n\n :param args: The arguments to parse. \n If None, the arguments from command-line are parsed.\n :param namespace: An inital Namespace object.\n :return: A namespace object containing parsed options.\n \"\"\"\n parser = ArgumentParser(\n description=\"Make memory consumption match the specified target.\"\n )\n mutex = parser.add_mutually_exclusive_group()\n mutex.add_argument(\n \"-g\",\n dest=\"target\",\n type=lambda s: int(s) * 1073741824,\n help=\"Specify target memory in gigabytes.\"\n )\n mutex.add_argument(\n \"-m\",\n dest=\"target\",\n type=lambda s: int(s) * 1048576,\n help=\"Specify target memory in megabytes.\"\n )\n return parser.parse_args(args=args, namespace=namespace)\n\n\ndef main():\n \"\"\"The main function for scripting usage.\n \"\"\"\n args = parse_args()\n match_memory_usage(args.target)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dsutil/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"330551532","text":"import random, string\n\nnum = \"0123456789\"\nupp = \"ABCDEFGHIJKLMNOPQRSTYVWXYZ\"\nlow = \"abcdefghijklmnopqrstuvwxyz\"\n\nen = \"BNZQ:1l36de9583w5516fv3b8691102224f3e\"\nres = \"\"\ncnt = 0\nfor i in range(len(en)):\n c = en[i]\n if c.islower():\n for j in range(len(low)):\n random.seed(\"random\")\n for k in range(cnt):\n random.randrange(0, 1)\n val = chr((ord(low[j])-ord('a')+random.randrange(0,26))%26 + ord('a'))\n if val == c:\n res += low[j]\n cnt += 1\n break\n elif c.isupper():\n for j in range(len(upp)):\n random.seed(\"random\")\n for k in range(cnt):\n random.randrange(0, 1)\n val = chr((ord(upp[j])-ord('A')+random.randrange(0,26))%26 + ord('A'))\n if val == c:\n res += upp[j]\n cnt += 1\n break\n elif c.isdigit():\n for j in range(len(num)):\n random.seed(\"random\")\n for k in range(cnt):\n random.randrange(0, 1)\n val = chr((ord(num[j])-ord('0')+random.randrange(0,10))%10 + ord('0'))\n if val == c:\n res += num[j]\n cnt += 1\n break\n else:\n res += c\n\nprint(res)\n","sub_path":"2017_picoctf/LEVEL_2/Cryptography/SoRandom-75/sorandom_solve.py","file_name":"sorandom_solve.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"410740382","text":"import torch\nfrom torchvision import datasets, transforms\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data import DataLoader\nimport os\nfrom PIL import Image\nimport pandas as pd\nimport numpy as np\nimport cv2\nfrom elastic_transform import ElasticTransform\n\nclass MyDigitsDataset(Dataset):\n def __init__(self, csv_path,img_path, transform=None):\n \"\"\"\n Args:\n csv_path (string): path to csv file\n img_path (string): path to the folder where images are\n transform: Optional transform to be applied on a sample.\n \"\"\"\n self.data = pd.read_csv(csv_path,sep=\";\")\n self.name_files = np.asarray(self.data.iloc[:, 0])\n self.labels = np.asarray(self.data.iloc[:, 1])\n self.img_path = img_path\n self.transform = transform\n \n def __getitem__(self, index):\n # stuff\n single_image_label = self.labels[index] -1\n if single_image_label==-1:\n single_image_label=9\n\n img_name = os.path.join(self.img_path,self.name_files[index])\n data = Image.open(img_name).convert('RGB')\n #data = data.resize((32, 32)) \n #data=cv2.imread(img_name)\n #data = cv2.resize(data, (32, 32)) \n #data = np.asarray(img)\n #data = np.transpose(data,(2,0,1))\n if self.transform is not None:\n data = self.transform(data)\n # If the transform variable is not empty\n # then it applies the operations in the transforms with the order that it is created.\n return (data, single_image_label)\n\n def __len__(self):\n return len(self.labels) \n\ndef get(batch_size, csv_path='', data_root='/tmp/public_dataset/pytorch', train=True, val=True, show=False, **kwargs):\n #data_root = os.path.expanduser(os.path.join(data_root, 'svhn-data'))\n num_workers = kwargs.setdefault('num_workers', 0)\n kwargs.pop('input_size', None)\n #print(\"Building SVHN data loader with {} workers\".format(num_workers))\n\n def target_transform(target):\n return int(target[0]) - 1\n\n ds = []\n if train:\n train_loader = torch.utils.data.DataLoader(\n MyDigitsDataset(\n csv_path=csv_path, img_path=data_root,\n transform=transforms.Compose([\n transforms.RandomApply(\n [transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.3),\n transforms.RandomRotation(15),\n transforms.RandomAffine(0,scale=(0.7,0.9)),\n transforms.RandomAffine(0,scale=(1.1,1.2)),\n transforms.RandomAffine(0,shear=10),\n ElasticTransform(1000,30)],p=0.5),\n transforms.Resize((32,32)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n ),\n batch_size=batch_size, shuffle=True, **kwargs)\n ds.append(train_loader)\n\n if val:\n test_loader = torch.utils.data.DataLoader(\n MyDigitsDataset(\n csv_path=csv_path, img_path=data_root,\n transform=transforms.Compose([\n transforms.Resize((32,32)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n ), \n batch_size=batch_size, shuffle=False, **kwargs)\n ds.append(test_loader)\n \n\n if show:\n show_loader = torch.utils.data.DataLoader(\n MyDigitsDataset(\n csv_path=csv_path, img_path=data_root,\n transform=transforms.Compose([ \n transforms.RandomApply(\n [transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.3),\n transforms.RandomRotation(15),\n transforms.RandomAffine(0,scale=(0.7,0.9)),\n transforms.RandomAffine(0,scale=(1.1,1.2)),\n transforms.RandomAffine(0,shear=10),\n ElasticTransform(1000,30)],p=0.5),\n #transforms.Resize((32,32)),\n transforms.ToTensor()\n ])\n ),\n batch_size=batch_size, shuffle=True, **kwargs)\n ds.append(show_loader)\n ds = ds[0] if len(ds) == 1 else ds\n return ds\n\n","sub_path":"svhn/dataset_digits.py","file_name":"dataset_digits.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"334010734","text":"import numpy as np\n\n\ndef rbf(d, eps):\n return np.exp(-(d * eps) ** 2)\n\n\ndef distance(x, y):\n return np.sum(np.abs(x - y))\n\n\ndef pairwise_distances(X):\n D = np.zeros((len(X), len(X)))\n\n for i in range(len(X)):\n for j in range(len(X)):\n if i == j:\n continue\n\n d = distance(X[i], X[j])\n\n D[i][j] = d\n D[j][i] = d\n\n return D\n\n\ndef taxicab_sample(n, r):\n sample = []\n\n for _ in range(n):\n spread = r - np.sum([np.abs(x) for x in sample])\n sample.append(spread * (2 * np.random.rand() - 1))\n\n return np.random.permutation(sample)\n\n\nclass RBFUnderSampler:\n def __init__(self, gamma=0.05, n=None, scaled=False):\n self.gamma = gamma\n self.n = n\n self.scaled = scaled\n\n def fit_sample(self, X, y):\n if self.scaled:\n gamma = self.gamma * np.sqrt(X.shape[1])\n else:\n gamma = self.gamma\n\n classes = np.unique(y)\n sizes = [sum(y == c) for c in classes]\n majority_class = classes[np.argmax(sizes)]\n minority_class = classes[np.argmin(sizes)]\n\n if self.n is None:\n n = sum(y == majority_class) - sum(y == minority_class)\n else:\n n = self.n\n\n D = pairwise_distances(X)\n\n scores = np.zeros(len(X))\n\n for i in range(len(X)):\n if y[i] == minority_class:\n scores[i] = -np.inf\n else:\n score = 0.0\n\n for j in range(len(X)):\n multiplier = (1 if y[j] == majority_class else -1)\n score += multiplier * rbf(D[i][j], 1.0 / gamma)\n\n scores[i] = score\n\n discarded = []\n\n while len(discarded) < n:\n candidate = np.argmax(scores)\n discarded.append(candidate)\n\n for i in range(len(X)):\n scores[i] -= rbf(D[i][candidate], 1.0 / gamma)\n\n scores[candidate] = -np.inf\n\n return np.delete(X, discarded, axis=0), np.delete(y, discarded, axis=0)\n\n\nclass RBFOverSampler:\n def __init__(self, gamma=0.05, epsilon=0.05, n=None, n_samples=10, minimize=False, scaled=False):\n self.gamma = gamma\n self.epsilon = epsilon\n self.n = n\n self.n_samples = n_samples\n self.minimize = minimize\n self.scaled = scaled\n\n def fit_sample(self, X, y):\n if self.scaled:\n gamma = self.gamma * np.sqrt(X.shape[1])\n else:\n gamma = self.gamma\n\n classes = np.unique(y)\n sizes = [sum(y == c) for c in classes]\n majority_class = classes[np.argmax(sizes)]\n minority_class = classes[np.argmin(sizes)]\n\n if self.n is None:\n n = sum(y == majority_class) - sum(y == minority_class)\n else:\n n = self.n\n\n appended = []\n\n while len(appended) < n:\n points = []\n scores = []\n\n for _ in range(self.n_samples):\n idx = np.random.choice(range(len(X[y == minority_class])))\n point = X[y == minority_class][idx]\n point += self.epsilon * (2 * np.random.rand(*point.shape) - 1)\n score = 0.0\n\n for i in range(len(X)):\n multiplier = (1 if y[i] == majority_class else -1)\n score += multiplier * rbf(distance(X[i], point), 1.0 / gamma)\n\n points.append(point)\n scores.append(score)\n\n if self.minimize:\n i = np.argmin(scores)\n else:\n i = np.argmax(scores)\n\n appended.append(points[i])\n\n return np.concatenate([X, appended]), np.concatenate([y, minority_class * np.ones(len(appended))])\n\n\nclass RBFCombined:\n def __init__(self, alpha=0.05, gamma=0.05, epsilon=0.05, n_samples=10, minimize=False, scaled=False):\n self.alpha = alpha\n self.gamma = gamma\n self.epsilon = epsilon\n self.n_samples = n_samples\n self.minimize = minimize\n self.scaled = scaled\n\n def fit_sample(self, X, y):\n classes = np.unique(y)\n sizes = [sum(y == c) for c in classes]\n majority_class = classes[np.argmax(sizes)]\n minority_class = classes[np.argmin(sizes)]\n n = sum(y == majority_class) - sum(y == minority_class)\n n_undersample = int(self.alpha * n)\n n_oversample = n - n_undersample\n\n undersampler = RBFUnderSampler(gamma=self.gamma, n=n_undersample, scaled=self.scaled)\n X_undersampled, y_undersampled = undersampler.fit_sample(X, y)\n oversampler = RBFOverSampler(gamma=self.gamma, epsilon=self.epsilon, n=n_oversample, n_samples=self.n_samples,\n minimize=self.minimize, scaled=self.scaled)\n X_oversampled, y_oversampled = oversampler.fit_sample(X_undersampled, y_undersampled)\n\n return X_oversampled, y_oversampled\n\n\nclass RBFADASYN:\n def __init__(self, gamma=0.05, epsilon=0.025, n=None, n_samples=10, minimize=False, scaled=False):\n self.gamma = gamma\n self.epsilon = epsilon\n self.n = n\n self.n_samples = n_samples\n self.minimize = minimize\n self.scaled = scaled\n\n def fit_sample(self, X, y):\n if self.scaled:\n gamma = self.gamma * np.sqrt(X.shape[1])\n else:\n gamma = self.gamma\n\n classes = np.unique(y)\n sizes = [sum(y == c) for c in classes]\n majority_class = classes[np.argmax(sizes)]\n minority_class = classes[np.argmin(sizes)]\n minority = X[y == minority_class]\n\n if self.n is None:\n n = sum(y == majority_class) - sum(y == minority_class)\n else:\n n = self.n\n\n scores = []\n\n for observation in minority:\n score = 0.0\n\n for i in range(len(X)):\n multiplier = (1.0 if y[i] == majority_class else -1.0)\n score += multiplier * rbf(distance(X[i], observation), 1.0 / gamma) / len(X)\n\n scores.append(score)\n\n confidence = np.array(scores) - np.min(scores) + np.max([np.abs(np.max(scores) - np.min(scores)), 1e-6])\n confidence /= np.sum(confidence)\n synthetic_samples = [int(np.round(c * n)) for c in confidence]\n appended = []\n\n for i in range(len(minority)):\n observation = minority[i]\n\n for _ in range(synthetic_samples[i]):\n points = []\n scores = []\n\n for _ in range(self.n_samples):\n point = observation + self.epsilon * (2 * np.random.rand(*observation.shape) - 1)\n score = 0.0\n\n for j in range(len(X)):\n multiplier = (1.0 if y[j] == majority_class else -1.0)\n score += multiplier * rbf(distance(X[j], point), 1.0 / gamma) / len(X)\n\n points.append(point)\n scores.append(score)\n\n if self.minimize:\n j = np.argmin(scores)\n else:\n j = np.argmax(scores)\n\n appended.append(points[j])\n\n return np.concatenate([X, appended]), np.concatenate([y, minority_class * np.ones(len(appended))])\n\n\nclass CCR:\n def __init__(self, energy=None, n=None):\n self.energy = energy\n self.n = n\n\n def fit_sample(self, X, y):\n classes = np.unique(y)\n sizes = [sum(y == c) for c in classes]\n\n assert len(classes) == len(set(sizes)) == 2\n\n minority_class = classes[np.argmin(sizes)]\n majority_class = classes[np.argmax(sizes)]\n minority = X[y == minority_class]\n majority = X[y == majority_class]\n\n if self.n is None:\n n = len(majority) - len(minority)\n else:\n n = self.n\n\n if self.energy is None:\n energy = 0.25 * np.sqrt(X.shape[1])\n else:\n energy = self.energy\n\n distances = np.zeros((len(minority), len(majority)))\n\n for i in range(len(minority)):\n for j in range(len(majority)):\n distances[i][j] = distance(minority[i], majority[j])\n\n radii = np.zeros(len(minority))\n translations = np.zeros(majority.shape)\n\n for i in range(len(minority)):\n minority_point = minority[i]\n remaining_energy = energy\n r = 0.0\n sorted_distances = np.argsort(distances[i])\n current_majority = 0\n\n while True:\n if current_majority == len(majority):\n if current_majority == 0:\n radius_change = remaining_energy / (current_majority + 1.0)\n else:\n radius_change = remaining_energy / current_majority\n\n r += radius_change\n\n break\n\n radius_change = remaining_energy / (current_majority + 1.0)\n\n if distances[i, sorted_distances[current_majority]] >= r + radius_change:\n r += radius_change\n\n break\n else:\n if current_majority == 0:\n last_distance = 0.0\n else:\n last_distance = distances[i, sorted_distances[current_majority - 1]]\n\n radius_change = distances[i, sorted_distances[current_majority]] - last_distance\n r += radius_change\n remaining_energy -= radius_change * (current_majority + 1.0)\n current_majority += 1\n\n radii[i] = r\n\n for j in range(current_majority):\n majority_point = majority[sorted_distances[j]]\n d = distances[i, sorted_distances[j]]\n\n if d < 1e-20:\n majority_point += (1e-6 * np.random.rand(len(majority_point)) + 1e-6) * np.random.choice([-1.0, 1.0], len(majority_point))\n d = distance(minority_point, majority_point)\n\n translation = (r - d) / d * (majority_point - minority_point)\n translations[sorted_distances[j]] += translation\n\n majority += translations\n\n appended = []\n\n for i in range(len(minority)):\n minority_point = minority[i]\n synthetic_samples = int(np.round(1.0 / (radii[i] * np.sum(1.0 / radii)) * n))\n r = radii[i]\n\n for _ in range(synthetic_samples):\n appended.append(minority_point + taxicab_sample(len(minority_point), r))\n\n return np.concatenate([majority, minority, appended]), \\\n np.concatenate([np.tile([majority_class], len(majority)),\n np.tile([minority_class], len(minority) + len(appended))])\n","sub_path":"algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":10790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"13788806","text":"# =====================================================================================\n# PROBLEM A2 \n#\n# Build a Neural Network Model for Horse or Human Dataset.\n# The test will expect it to classify binary classes. \n# Your input layer should accept 150x150 with 3 bytes color as the input shape.\n# Don't use lambda layers in your model.\n#\n# The dataset used in this problem is created by Laurence Moroney (laurencemoroney.com).\n#\n# Desired accuracy and validation_accuracy > 83%\n# ======================================================================================\n\nimport urllib.request\nimport zipfile\nfrom numpy.lib.financial import rate\nimport tensorflow as tf\nimport os\nfrom keras_preprocessing.image import ImageDataGenerator\nfrom tensorflow import keras\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.python.keras.backend import dropout\n\n\ndef solution_A2():\n #data_url_1 = 'https://dicodingacademy.blob.core.windows.net/picodiploma/Simulation/machine_learning/horse-or-human.zip'\n #urllib.request.urlretrieve(data_url_1, 'horse-or-human.zip')\n #local_file = 'horse-or-human.zip'\n #zip_ref = zipfile.ZipFile(local_file, 'r')\n #zip_ref.extractall('data/horse-or-human')\n\n #data_url_2 = 'https://dicodingacademy.blob.core.windows.net/picodiploma/Simulation/machine_learning/validation-horse-or-human.zip'\n #urllib.request.urlretrieve(data_url_2, 'validation-horse-or-human.zip')\n #local_file = 'validation-horse-or-human.zip'\n #zip_ref = zipfile.ZipFile(local_file, 'r')\n #zip_ref.extractall('data/validation-horse-or-human')\n #zip_ref.close()\n\n\n TRAINING_DIR = 'data/horse-or-human'\n VALIDATION_DIR = 'data/validation-horse-or-human'\n\n # YOUR CODE HERE\n train_datagen = ImageDataGenerator(rescale = 1./255)\n\n train_generator = train_datagen.flow_from_directory(\n TRAINING_DIR, \n target_size=(150, 150), \n batch_size=128,\n class_mode='binary')\n\n test_datagen = ImageDataGenerator(rescale = 1./255 )\n\n validation_generator = test_datagen.flow_from_directory(VALIDATION_DIR,\n batch_size = 32,\n class_mode = 'binary', \n target_size = (150, 150)) \n\n model = tf.keras.models.Sequential([\n # YOUR CODE HERE, end with a Neuron Dense, activated by sigmoid\n tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Conv2D(32, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n ])\n from tensorflow.keras.optimizers import Adam\n\n model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0003), metrics=['accuracy'])\n model.fit(train_generator,steps_per_epoch=8, epochs=100, verbose=1, validation_data = validation_generator, validation_steps= 8 )\n\n return model\n\n\n# The code below is to save your model as a .h5 file.\n# It will be saved automatically in your Submission folder.\nif __name__ == '__main__':\n model = solution_A2()\n model.save(\"model_A2.h5\")","sub_path":"Submission A/Problem_A2.py","file_name":"Problem_A2.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"607667514","text":"from pathlib import Path\n\nimport numpy as np\nimport torch\nfrom itertools import chain\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn.utils import parameters_to_vector\nfrom itertools import chain\nfrom math import ceil\n\n\nclass RBM():\n\n @classmethod\n def from_weights(cls, weights_dir):\n pathdir = Path(weights_dir)\n visible_bias = torch.load(pathdir / 'visible_bias.pt')\n hidden_bias = torch.load(pathdir / 'hidden_bias.pt')\n n_vis = visible_bias.shape[0]\n n_hin = hidden_bias.shape[0]\n rbm = RBM(n_vis=n_vis, n_hin=n_hin)\n rbm.load_params(weights_dir)\n return rbm\n\n def to(self, param):\n self.weights = self.weights.to(param)\n self.hidden_bias = self.hidden_bias.to(param)\n self.visible_bias = self.visible_bias.to(param)\n return self\n\n def __init__(self, n_vis, n_hin):\n super(RBM, self).__init__()\n self.n_vis = n_vis\n self.n_hin = n_hin\n\n self.initialize_parameters()\n\n def initialize_parameters(self):\n self.weights = torch.randn(\n self.n_hin,\n self.n_vis,\n dtype=torch.double\n ) / np.sqrt(self.n_vis)\n\n self.visible_bias = torch.zeros(self.n_vis, dtype=torch.double)\n self.hidden_bias = torch.zeros(self.n_hin, dtype=torch.double)\n\n def effective_energy(self, v):\n v = v.to(self.weights)\n visible_bias_term = torch.matmul(v, self.visible_bias)\n hid_bias_term = F.softplus(F.linear(v, self.weights, self.hidden_bias)).sum(-1)\n\n return -(visible_bias_term + hid_bias_term)\n\n def effective_energy_gradient(self, v):\n v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.weights)\n prob = self.prob_h_given_v(v)\n\n W_grad = -torch.matmul(prob.transpose(0, -1), v)\n vb_grad = -torch.sum(v, 0)\n hb_grad = -torch.sum(prob, 0)\n return W_grad, vb_grad, hb_grad\n\n def prob_v_given_h(self, h):\n return (\n torch.matmul(h, self.weights.data, out=None)\n .add_(self.visible_bias.data)\n .sigmoid_()\n .clamp_(min=0, max=1)\n )\n\n def prob_h_given_v(self, v):\n return (\n torch.matmul(v, self.weights.data.t(), out=None)\n .add_(self.hidden_bias.data)\n .sigmoid_()\n .clamp_(min=0, max=1)\n )\n\n def sample_v_given_h(self, h):\n v = self.prob_v_given_h(h)\n v = torch.bernoulli(v) # overwrite v with its sample\n return v\n\n def sample_h_given_v(self, v):\n h = self.prob_h_given_v(v)\n h = torch.bernoulli(h) # overwrite h with its sample\n return h\n\n def draw_samples(self, k, initial_state):\n v = (initial_state.clone()).to(self.weights)\n h = torch.zeros(*v.shape[:-1], self.n_hin).to(self.weights)\n\n for _ in range(k):\n h = self.sample_h_given_v(v)\n v = self.sample_v_given_h(h)\n\n return v\n\n def wavefunction(self, v):\n return (-self.effective_energy(v)).exp().sqrt()\n\n def gradients(self, batch):\n grads_W, grads_vb, grads_hb = self.effective_energy_gradient(batch)\n grads_W /= float(batch.shape[0])\n grads_vb /= float(batch.shape[0])\n grads_hb /= float(batch.shape[0])\n\n return grads_W, grads_vb, grads_hb\n\n def compute_batch_gradients(self, k, pos_phase_batch, neg_phase_batch):\n grads_W, grads_vb, grads_hb = self.gradients(pos_phase_batch)\n\n vk = self.draw_samples(k, neg_phase_batch)\n neg_grads_W, neg_grads_vb, neg_grads_hb = self.gradients(vk)\n\n grads_W -= neg_grads_W\n grads_vb -= neg_grads_vb\n grads_hb -= neg_grads_hb\n\n return grads_W, grads_vb, grads_hb\n\n def shuffle_data(self, data, batch_size):\n permutation = torch.randperm(data.shape[0])\n data = [\n data[batch_start: (batch_start + batch_size)]\n for batch_start in range(0, len(data), batch_size)\n ]\n return data\n\n def params(self):\n return self.weights, self.visible_bias, self.hidden_bias\n\n def save_params(self, dir):\n pathdir = Path(dir)\n if not pathdir.exists():\n pathdir.mkdir(parents=True)\n\n torch.save(self.weights, pathdir / 'weights.pt')\n torch.save(self.visible_bias, pathdir / 'visible_bias.pt')\n torch.save(self.hidden_bias, pathdir / 'hidden_bias.pt')\n\n def load_params(self, dir):\n pathdir = Path(dir)\n self.weights = torch.load(pathdir / 'weights.pt')\n self.visible_bias = torch.load(pathdir / 'visible_bias.pt')\n self.hidden_bias = torch.load(pathdir / 'hidden_bias.pt')\n\n def update_params(self, grads, lr):\n self.weights -= lr * grads[0]\n self.visible_bias -= lr * grads[1]\n self.hidden_bias -= lr * grads[2]\n\n def train(self, input_data, k=10, batch_size=100, lr=0.01):\n\n num_batches = ceil(input_data.shape[0] / batch_size)\n pos_batches = self.shuffle_data(input_data, batch_size)\n neg_batches = self.shuffle_data(input_data, batch_size)\n\n for b in range(num_batches):\n all_gradients = self.compute_batch_gradients(k, pos_batches[b], neg_batches[b])\n\n self.update_params(all_gradients, lr)\n\n def partition_function(self, space):\n logZ = (-self.effective_energy(space)).logsumexp(0)\n return logZ.exp()\n\n def generate_hilbert_space(self):\n dim = np.arange(2 ** self.n_vis)\n space = ((dim[:, None] & (1 << np.arange(self.n_vis))) > 0)[:, ::-1]\n space = space.astype(int)\n return torch.tensor(space, dtype=torch.double)\n\n def psi(self):\n space = self.generate_hilbert_space()\n return self.wavefunction(space) / self.partition_function(space).sqrt()\n\n def log_likelihood(self, data):\n return self.effective_energy(data).sum().item() / len(data)\n","sub_path":"Project_1_RBM_and_Tomography/RBM_helper.py","file_name":"RBM_helper.py","file_ext":"py","file_size_in_byte":5933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"208574835","text":"#!/usr/bin/env python\n\"\"\"\nString-related functions.\n\nHistory\n create - Feng Zhou (zhfe99@gmail.com), 2014-12\n modify - Feng Zhou (zhfe99@gmail.com), 2016-01\n\"\"\"\nimport re\nfrom cell import zeros\n\n\ndef strLstPre(lst0, pre):\n \"\"\"\n Return a sub-list of string that starts with the specified prefix.\n\n Input\n lst0 - original string list, 1 x n0\n pre - prefix\n\n Output\n lst - new string list, 1 x n\n \"\"\"\n lst = []\n for str0 in lst0:\n if str0[: len(pre)] == pre:\n lst.append(str0)\n\n return lst\n\n\ndef strLstPat(lst0, pats):\n \"\"\"\n Return a sub-list of string that match with the specified pattern.\n\n Input\n lst0 - original string list, n0 x\n pats - pattern list, m x\n\n Output\n lst - new string list, n x\n \"\"\"\n lst = []\n for str0 in lst0:\n for pat in pats:\n if re.match(pat, str0):\n lst.append(str0)\n break\n\n return lst\n\n\ndef strDelSub(name0):\n \"\"\"\n Remove subfix from a file name.\n\n Input\n name0 - original name\n\n Output\n name - new name\n \"\"\"\n tail = name0.find('.')\n if tail == -1:\n name = name0\n else:\n name = name0[: tail]\n\n return name\n\n\ndef strGetSub(name):\n \"\"\"\n Get subfix from a file name.\n\n Input\n name - file name\n\n Output\n subx - subfix\n \"\"\"\n tail = name.find('.')\n if tail == -1:\n subx = ''\n else:\n subx = name[tail + 1:]\n\n return subx\n\n\ndef strRepSub(name0, subx):\n \"\"\"\n Replace subfix to the given one.\n\n Input\n name0 - original name\n subx - new subx\n\n Output\n name - new name\n \"\"\"\n name = strDelSub(name0)\n\n return name + '.' + subx\n\n\ndef strNumCo(s):\n \"\"\"\n Count the number character in a given string.\n\n Example\n in: s = 'a32b'\n call: co = strNumCo()\n out: co = 2\n\n Input\n s - string\n\n Output\n co - #number character\n \"\"\"\n co = 0\n for c in s:\n if c >= '0' and c < '9':\n co += 1\n return co\n\n\ndef strLst2Float(arrS):\n \"\"\"\n Convert a string list to a float list.\n\n Input\n arrS - string list, n x 1 (str)\n\n Output\n arrF - float list, n x 1 (float)\n \"\"\"\n arrF = [float(s.strip()) for s in arrS]\n return arrF\n\n\ndef strLst1NotIn2(arr1, arr2):\n \"\"\"\n Compare two string lists to find the strings in arr1 that\n are not contained in arr2.\n\n Input\n arr1 - array 1, n1 x 1\n arr2 - array 2, n2 x 1\n\n Output\n arrD - different elements in arr1, nD x 1\n \"\"\"\n arrD = []\n m2 = len(arr2)\n vis = zeros(m2)\n for s1 in arr1:\n found = False\n for i2, s2 in enumerate(arr2):\n if vis[i2]:\n continue\n\n if s1 == s2:\n vis[i2] = 1\n found = True\n break\n if not found:\n arrD.append(s1)\n return arrD\n\n\ndef str2ran(s):\n \"\"\"\n Convert a string range to an integer list.\n\n Example 1\n input: s = '1'\n call: lst = str2ran(s)\n output: lst = 1\n\n Example 2\n input: s = '2:10'\n call: lst = str2ran(s)\n output: lst = [2, 3, 4, 5, 6, 7, 8, 9]\n\n Example 3\n input: s = '2:10:2'\n call: lst = str2ran(s)\n output: lst = [2, 4, 6, 8]\n\n Example 4\n input: s = '1,3'\n call: lst = str2ran(s)\n output: lst = [1,3]\n\n Example 5\n input: s = ''\n call: lst = str2ran(s)\n output: lst = []\n\n Input\n s - string\n\n Output\n lst - an integer list\n \"\"\"\n if len(s) == 0:\n lst = []\n\n elif ':' in s:\n parts = s.split(':')\n a = [int(part) for part in parts]\n\n if len(parts) == 1:\n lst = a\n elif len(parts) == 2:\n lst = range(a[0], a[1])\n elif len(parts) == 3:\n lst = range(a[0], a[1], a[2])\n else:\n raise Exception('unsupported')\n\n elif ',' in s:\n parts = s.split(',')\n lst = [int(part) for part in parts]\n\n else:\n lst = [int(s)]\n\n return lst\n","sub_path":"str.py","file_name":"str.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"276378213","text":"from rest_framework.views import APIView\r\nfrom rest_framework.response import Response\r\nfrom rest_framework import status\r\nfrom products.serializers import ProductSerializer\r\n\r\n\r\n# Create your views here.\r\n\r\n\r\nclass PostMultiple(APIView):\r\n def post(self, request, format=None):\r\n\r\n rejected = []\r\n accepted = []\r\n for req in request.data:\r\n\r\n serializer = ProductSerializer(data=req)\r\n\r\n if serializer.is_valid():\r\n serializer.save()\r\n accepted.append(serializer.data)\r\n else:\r\n rejected.append(serializer.data)\r\n if len(rejected) == 0:\r\n return Response(accepted, status=status.HTTP_201_CREATED)\r\n elif len(accepted) == 0:\r\n return Response(rejected, status=status.HTTP_400_BAD_REQUEST)\r\n return Response(rejected, status=status.HTTP_207_MULTI_STATUS)\r\n","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"294035499","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport person_pb2\n\n# 为 all_person 填充数据\npers = person_pb2.all_person()\np1 = pers.Per.add()\np1.id = 1\np1.name = 'xieyanke'\np2 = pers.Per.add()\np2.id = 2\np2.name = 'pythoner'\n\n\n# gen protobuf\ndata = pers.SerializeToString()\n#print data\n\n# parse protobuf\ntarget = person_pb2.all_person()\ntarget.ParseFromString(data)\nprint(target.Per[1].name)\n\n\n\n","sub_path":"protobuf/main_person.py","file_name":"main_person.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"3184355","text":"\"\"\"Pydantic attempts to provide \"strongly typed\" data structures to Python.\n\nFeatures include:\n\n* Type hinting at runtime\n* Data validation\n\nPydantic has two model object base classes: BaseModel and dataclass. By default,\nyou'll want to use `BaseModel`.\n\npydantic.dataclasses are a drop in replacement for Python dataclasses which adds\ntype validation.\n\nDifferences between BaseModel and dataclasses include:\n\n* Mutable field initializers. dataclass requires a default_factory\n* BaseModel handles extra fields\n\"\"\"\n\n#\n# `from __future__ import annotations`` allows you to reference a type as an\n# annotation within the type declaration.\n#\n# For example, we define the attr `children` as a list of `Person`. In order to\n# refer to ``Person`, we must import annotations.`\n#\nfrom __future__ import annotations\n\nimport pydantic\nimport pytest\n\n\ndef test_frozen() -> None:\n class Person(pydantic.BaseModel):\n # Example recursive model\n name: str\n children: list[Person] = []\n\n class Config:\n frozen = True\n\n p = Person(name=\"damon\")\n p.children.append(Person(name=\"kari\"))\n\n with pytest.raises(TypeError):\n p.name = \"no\"\n\n\n\n\ndef test_pydantic() -> None:\n class A(pydantic.BaseModel):\n # By default, Pydantic does not reuse field initializers across\n # instances\n lst: list[int] = pydantic.Field(\n default_factory=list, description=\"A list\", max_items=1\n )\n\n class Config:\n \"\"\"Config allows you to customize Pydantic's behavior\"\"\"\n\n # whether to `ignore`, `allow`, or `forbid` extra attributes during\n # model initialization. default: pydantic.Extra.ignore\n extra = pydantic.Extra.allow\n validate_all = True # whether to validate field defaults\n\n @pydantic.dataclasses.dataclass\n class B:\n lst: list[int] = pydantic.Field(default_factory=list)\n\n a, a2 = A(extra=\"123\"), A()\n b, b2 = B(), B()\n\n a.lst.append(1)\n b.lst.append(2)\n\n assert a.extra == \"123\"\n assert a.lst == [1]\n assert a2.lst == []\n\n assert b.lst == [2]\n assert b2.lst == []\n\n #\n # Pydantic's field validation and rules are only enforced during\n # initialization set directly. For example, appending to a mutable list will\n # *not* raise validation errors.\n #\n # Is this configurable? Shouldn't validation be ran for each field set by\n # default?\n #\n a.lst.append(2)\n assert len(a.lst) == 2\n a.lst = [1, 2]\n\n with pytest.raises(ValueError) as ve:\n A(lst=[1, 2])\n assert ve.match(\"max_items\")\n","sub_path":"tests/libraries/test_pydantic.py","file_name":"test_pydantic.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"560160546","text":"import hashlib, pickle\n\ndef UTXO_Scan(pub_addr):\n with open ('ledger.txt', 'rb') as file:\n k = pickle.load(file)\n ledger = list(k)\n \n block_num = 0\n utxo_scan = True\n while utxo_scan == True:\n \n try:\n # Searches for Public Addr in ledger\n obj = ledger[block_num][2][0][2]\n if obj == str(pub_addr):\n utxo_scan = False\n return (ledger[block_num][2][0][0])\n else:\n block_num = int(block_num) + 1\n\n except IndexError:\n return ('Invalid Transaction')\n\ndef Make_Hash(obj):\n return str(hashlib.sha256(codecs.encode(str(obj), 'utf-8')).hexdigest())\n\n\nwith open ('ledger.txt', 'rb') as file:\n k = pickle.load(file)\n\nprint(k)\n","sub_path":"ledger_scan_function.py","file_name":"ledger_scan_function.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"187630475","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 15 10:58:52 2019\n\n@author: parthgoyal123\n\"\"\"\n\n''' --------- K-Means Clustering ----------- '''\n\n# ====== Preprocessing ====== #\n\n# Importing the required libraries\n# ---> Numpy arrays are the most convinient way to work on Machine Learning models\n# ---> Matplotlib allows us to visualise our model in form of various plots/figures\n# ---> Pandas allows us to import the dataset efficiently\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset using pandas library\n# ---> pandas create a dataframe of the dataset\n# ---> iloc : It locates the column by its index. In other words, using ’iloc’ allows us to take columns by just taking their index.\n# ---> .values : It returns the values of the column (by their index) inside a Numpy array(way more efficient than a list)\ndataset = pd.read_csv(\"Mall_Customers.csv\")\nX = dataset.iloc[:, [3,4]].values.astype(float)\n\n\"\"\"\n# Fixing the missing values from the dataset using sklearn.impute\n# ---> Importing the SimpleImputer class from the sklearn.impute library\n# ---> .fit : The fit part is used to extract some info of the data on which the object is applied\n# ---> .transform : the transform part is used to apply some transformation\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(missing_values = np.nan, strategy = 'mean')\nX[:, [1,2]] = imputer.fit_transform(X[:, [1,2]])\n\n# Encoding categorical data (optional part, depends on the dataset)\n# ---> LabelEncoder encodes the categorical data to [0, n_values]\n# ---> OneHotEncoder seperates the LabelEncoded values to different columns, appended leftmost of the dataset\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_x = LabelEncoder()\nlabelencoder_y = LabelEncoder()\nonehotencoder = OneHotEncoder(categorical_features= [-1])\nX[:, -1] = labelencoder_x.fit_transform(X[:, -1])\nX = onehotencoder.fit_transform(X).toarray()\n\n# When the categorical types >= 3, we need to avoid the dummy variable trap\nX = X[:, 1:]\n\"\"\"\n\n# Dividing the dataset to training and test set\n# ---> train_test_split : function of the sklearn.model_selection library that splits the dataset to training and test sets\n# ---> random_state : this parameter is essential to get the same training and test splits(when improving the model time by time)\nfrom sklearn.model_selection import train_test_split\nX_train, X_test = train_test_split(X, test_size = 0.1, random_state = 1)\n\n\"\"\"\n# Feature Scaling the data\n# ---> StandardScaler : no parameters are to be passed to this class, only an object is to be made\nfrom sklearn.preprocessing import StandardScaler\nscale_X = StandardScaler()\nX_train = scale_X.fit_transform(X_train)\nX_test = scale_X.fit_transform(X_test)\n\"\"\"\n\n# ========= Clustering ========= #\n\n# ---- K-Means Clustering ----- #\n\n# Using the Elbow method to get the correct number of clusters\n# ---> wcss : within cluster sum of squares\n# ---> init : 'kmeans++' is an algorithm for initialising the n_init clusters\nfrom sklearn.cluster import KMeans\nwcss = []\nfor i in range(1,11):\n k_means = KMeans(n_clusters = i, init = 'k-means++', n_init = 10, max_iter = 300)\n k_means.fit(X_train)\n wcss.append(k_means.inertia_)\n\n# Plotting the Elbow curve to get the optimal number of clusters\nfig = plt.figure(dpi = 100, figsize = (8,6))\nplt.plot(range(1,11), wcss, color = 'blue')\nplt.title('Elbow Mehtod for Optimal number of clusters')\nplt.xlabel('Number of Clusters')\nplt.ylabel('WCSS')\nplt.show()\nfig.savefig('Elbow_Method.png')\n\n# ---> From elbow method, we observe that the major difference occurs at Number of clusters = 5\n# Applying K-means with correct number of clusters\nk_means = KMeans(n_clusters = 5, init = 'k-means++', n_init = 10, max_iter = 300)\nk_means.fit(X)\ny_kmeans = k_means.predict(X_train)\ny_pred_test = k_means.predict(X_test)\n\n# Visualising the Clusters of Training set \nfig =plt.figure(dpi = 100, figsize = (8,6))\nplt.scatter(X_train[y_kmeans == 0, 0], X_train[y_kmeans == 0, 1], color = 'red', s=30, marker = '*', label = 'Cluster 1')\nplt.scatter(X_train[y_kmeans == 1, 0], X_train[y_kmeans == 1, 1], color = 'blue', s=30, marker = '*', label = 'Cluster 2')\nplt.scatter(X_train[y_kmeans == 2, 0], X_train[y_kmeans == 2, 1], color = 'green', s=30, marker = '*', label = 'Cluster 3')\nplt.scatter(X_train[y_kmeans == 3, 0], X_train[y_kmeans == 3, 1], color = 'magenta', s=30, marker = '*', label = 'Cluster 4')\nplt.scatter(X_train[y_kmeans == 4, 0], X_train[y_kmeans == 4, 1], color = 'cyan', s=30, marker = '*', label = 'Cluster 5')\nplt.scatter(k_means.cluster_centers_[:, 0], k_means.cluster_centers_[:, 1], color = 'yellow', s=100, label = 'Centroid')\nplt.title('K-Means Clustering')\nplt.xlabel('')\nplt.ylabel('')\nplt.legend()\nplt.show()\nfig.savefig('K-Means_training.png')\n\n# Visualising the Clusters of Test set \nfig =plt.figure(dpi = 100, figsize = (8,6))\nplt.scatter(X_test[y_pred_test == 0, 0], X_test[y_pred_test == 0, 1], color = 'red', s=30, marker = '*', label = 'Cluster 1')\nplt.scatter(X_test[y_pred_test == 1, 0], X_test[y_pred_test == 1, 1], color = 'blue', s=30, marker = '*', label = 'Cluster 2')\nplt.scatter(X_test[y_pred_test == 2, 0], X_test[y_pred_test == 2, 1], color = 'green', s=30, marker = '*', label = 'Cluster 3')\nplt.scatter(X_test[y_pred_test == 3, 0], X_test[y_pred_test == 3, 1], color = 'magenta', s=30, marker = '*', label = 'Cluster 4')\nplt.scatter(X_test[y_pred_test == 4, 0], X_test[y_pred_test == 4, 1], color = 'cyan', s=30, marker = '*', label = 'Cluster 5')\nplt.scatter(k_means.cluster_centers_[:, 0], k_means.cluster_centers_[:, 1], color = 'yellow', s=100, label = 'Centroid')\nplt.title('K-Means Clustering')\nplt.xlabel('')\nplt.ylabel('')\nplt.legend()\nplt.show()\nfig.savefig('K-Means_test.png')\n\n# ============ K-Means Clustering Complete ============ #","sub_path":"4. Clustering/1. K-Means Clustering/K-Means_Clustering.py","file_name":"K-Means_Clustering.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"539096160","text":"import os\nimport shutil\n\nimport numpy as np\nimport cv2\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom pathlib import Path\nfrom typing import Tuple, Union, Dict\n\nfrom ptf import const\n\n\ndef remove_directory(dir: Union[str, Path]) -> None:\n shutil.rmtree(str(dir), ignore_errors=True)\n\n\ndef load_image(\n path: Union[str, Path],\n to_rgb: bool = True\n) -> np.ndarray:\n image = cv2.imread(str(path), cv2.IMREAD_COLOR)\n if to_rgb:\n return image[:, :, ::-1] # BGR -> RGB\n else:\n return image\n\n\ndef load_mask(path: Union[str, Path]) -> np.ndarray:\n mask = cv2.imread(str(path), cv2.IMREAD_GRAYSCALE)\n return mask\n\n\ndef save_img(\n img: np.ndarray,\n name: Union[str, int],\n output_dir: Union[str, Path],\n img_ext: str = '.jpg'\n) -> bool:\n # create outpud directory if needed\n output_dir = Path(output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n # result path\n img_path = output_dir / f'{name}{img_ext}'\n return cv2.imwrite(str(img_path), img)\n\n\ndef show_img(\n img: np.ndarray,\n mask: np.ndarray = None,\n fig_size: Tuple[float, float] = (15.0, 15.0),\n to_rgb: bool = False\n) -> None:\n new_fig_size = list(fig_size)\n orig_fig_size = list(matplotlib.rcParams['figure.figsize'])\n if new_fig_size != orig_fig_size:\n matplotlib.rcParams['figure.figsize'] = new_fig_size\n\n if mask is not None:\n img = cv2.bitwise_and(img, img, mask=mask)\n\n img_shape = img.shape\n\n if len(img_shape) == 2:\n plt.imshow(img, cmap='gray')\n else:\n if img_shape[2] == 4:\n img = img[..., 0:3]\n if to_rgb:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n plt.imshow(img)\n\n plt.show()\n\n\ndef num_cpus() -> int:\n \"Get number of cpus\"\n try:\n return len(os.sched_getaffinity(0))\n except AttributeError:\n return os.cpu_count()\n","sub_path":"ptf/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"160453792","text":"import math\n\n\n#2.3-2\ndef merge(A, p, q, r):\n L = A[p:q]\n R = A[q:r]\n\n i, j, k = 0, 0, p\n while i < len(L) and j < len(R):\n if L[i] < R[j]:\n A[k] = L[i]\n i += 1\n else:\n A[k] = R[j]\n j += 1\n k += 1\n\n if i < len(L):\n while i < len(L):\n A[k] = L[i]\n k += 1\n i += 1\n elif j < len(R):\n while j < len(R):\n A[k] = R[j]\n k += 1\n j += 1\n\n return A\n\n\ndef merge_sort(A, p, r):\n if r - p > 1:\n q = math.floor((p + r) / 2)\n merge_sort(A, p, q)\n merge_sort(A, q, r)\n ret = merge(A, p, q, r)\n print(ret)\n return ret\n\n\nprint(merge_sort([3, 41, 52, 26, 38, 57, 9, 49], 0, 8))\n\n\n#2.3-5\ndef binary_search(A, p, q, val):\n if q >= len(A):\n q = len(A) - 1\n if q < p:\n return -1\n mid = math.floor((p + q) / 2)\n if val == A[mid]:\n return mid\n elif val < A[mid]:\n return binary_search(A, p, mid - 1, val)\n else:\n return binary_search(A, mid + 1, q, val)\n\n\n","sub_path":"chpter2/prob2.3.py","file_name":"prob2.3.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"522480614","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nUSAGE\n$ python make_synapse.py [target_directory]\n\"\"\"\n\nimport os\nimport sys\nimport random\n\n\"\"\"\ncomps_to_ln[300_301 or 301_300][pre or post] by Park\ncomps_to_pn[300_200 or 301_200][pre or post] by Park\n\"\"\"\n\ncomps_to_ln = \\\n[[[21015,14644, 8240,15191, 8804,21375, 8239,15706,15171,21088],\\\n[11079,10877,11534,11093,11257,10351,11580,11879,11949,11038]],\\\n[[10351,10877,11083,10380,11422,11681,10351,11083,11219,11093],\\\n[21374,15191, 8242,21309, 8239, 9486,21436, 8804,21088,15070]]]\n\ncomps_to_pn = \\\n[[[2739, 2730, 2366, 2363, 2055, 2050, 1809, 1801, 1590, 1579],\\\n[110, 107, 104, 101, 98, 97, 94, 114, 109, 102]],\\\n[[3572, 4958, 5487, 1846, 6691, 7140, 3610, 3524, 2733, 4517],\\\n[101, 102, 94, 98, 125, 119, 114, 108, 103, 93]]]\n\n\ndef make_synapse_Park():\n gid_to_ln = 3000000\n gid_to_pn = 2000000\n\n n = 10\n\n for file in files:\n pre_cell, post_cell, _ = file.split(\"_\")\n if post_cell[0] == \"3\":\n with open(target + file, \"w\") as f:\n f.write(\"$ PRE_CELL %s\\n\" % pre_cell)\n f.write(\"$ POST_CELL %s\\n\" % post_cell)\n f.write(\"$ NCONNECTIONS %d\\n\" % n)\n index = int(pre_cell[2])\n for i in xrange(n):\n f.write(\"%d %d %d\\n\" % (comps_to_ln[index][0][i], comps_to_ln[index][1][i], gid_to_ln+i))\n gid_to_ln += n\n elif post_cell[0] == \"2\":\n with open(target + file, \"w\") as f:\n f.write(\"$ PRE_CELL %s\\n\" % pre_cell)\n f.write(\"$ POST_CELL %s\\n\" % post_cell)\n f.write(\"$ NCONNECTIONS %d\\n\" % n)\n index = int(pre_cell[2])\n for i in xrange(10):\n f.write(\"%d %d %d\\n\" % (comps_to_pn[index][0][i], comps_to_pn[index][1][i], gid_to_pn+i))\n gid_to_pn += n\n\n\ndef make_synapse_Arase(n):\n gid_to_ln = 3000000\n gid_to_pn = 2000000\n\n for file in files:\n pre_cell, post_cell, _ = file.split(\"_\")\n if post_cell[0] == \"3\":\n with open(target + file, \"w\") as f:\n f.write(\"$ PRE_CELL %s\\n\" % pre_cell)\n f.write(\"$ POST_CELL %s\\n\" % post_cell)\n f.write(\"$ NCONNECTIONS %d\\n\" % nconnections)\n pre_index = int(pre_cell[2])\n post_index = int(post_cell[2])\n for i in xrange(n):\n if i < len(comps_to_ln[pre_index][0]):\n f.write(\"%d %d %d\\n\" % (comps_to_ln[pre_index][0][i], comps_to_ln[pre_index][1][i], gid_to_ln+i))\n else:\n while True:\n pre_comp = random.randint(1, n_comps[pre_index])\n post_comp = random.randint(1, n_comps[post_index])\n if not (pre_comp in comps_to_ln[pre_index][0] and post_comp in comps_to_ln[pre_index][1]):\n break\n f.write(\"%d %d %d\\n\" % (pre_comp, post_comp, gid_to_ln+i))\n gid_to_ln += n\n elif post_cell[0] == \"2\":\n with open(target + file, \"w\") as f:\n f.write(\"$ PRE_CELL %s\\n\" % pre_cell)\n f.write(\"$ POST_CELL %s\\n\" % post_cell)\n f.write(\"$ NCONNECTIONS %d\\n\" % n)\n index = int(pre_cell[2])\n for i in xrange(10):\n f.write(\"%d %d %d\\n\" % (comps_to_pn[index][0][i], comps_to_pn[index][1][i], gid_to_pn+i))\n gid_to_pn += n\n\n\nif __name__ == \"__main__\":\n target = os.path.abspath(sys.argv[1]) + \"/\"\n files = os.listdir(target)\n\n nconnections = 10\n n_comps = [22928-5, 12525-5] #[300, 301]\n\n # make_synapse_Park()\n make_synapse_Arase(nconnections)\n","sub_path":"input/synapse_list/make_synapse.py","file_name":"make_synapse.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"383298103","text":"\"\"\"Write the function valid_email(...) to check if the input string is a valid email address or not.\n\nAn email is a string (a subset of ASCII characters) separated into two parts by @ symbol, a “user_info” and a domain_info, that is personal_info@domain_info:\nin case of correct email the function should be displayed the corresponding message – \"Email is valid\"\nin case of incorrect email the function should be displayed the corresponding message – \"Email is not valid\"\n\nNote: in the function you must use the \"try except\" construct.\n\n\nFunction example:\n\nvalid_email(\"trafik@ukr.tel.com\") #output: \"Email is valid\"\n\nvalid_email(\"trafik@ukr_tel.com\") #output: \"Email is not valid\"\n\nvalid_email(\"tra@fik@ukr.com\") #output: \"Email is not valid\"\n\nvalid_email(\"ownsite@our.c0m\") #output: \"Email is not valid\"\n\n\n\"\"\"\n\ndef valid_email(email):\n import re\n pattern = \"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$\"\n try:\n if (re.search(pattern,email)):\n return f\"Email is valid\"\n else:\n raise ValueError(\"Email is not valid\")\n except ValueError as e:\n return e\n","sub_path":"sprint_05_[Exception handling]/Task_1.py","file_name":"Task_1.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"314316446","text":"from random import choice\nfrom time import sleep\nfrom os import path\n\n\nclass Player:\n\n def __init__(self, name, money=200):\n self.name = name\n self.money = money\n self.cards = [] # [suit, number]\n self.current_hand = [] # number only\n self.win = 0\n self.high_score = 0\n self.test = []\n self.max_money = 200\n self.bet = 0\n self.load_data()\n\n def set_up(self, deck):\n self.current_hand = []\n self.cards = []\n self.draw_card(deck)\n self.draw_card(deck)\n self.current_hand = [card[1] for card in self.cards]\n # self.display_cards()\n\n def draw_card(self, deck, display=False):\n deck.check_deck()\n random_suit = choice(list(deck.cards))\n random_number = choice(deck.cards[random_suit])\n deck.cards[random_suit].remove(random_number)\n self.cards.append([random_suit, random_number])\n self.current_hand.append(random_number)\n if display:\n print('--------------------------------')\n print('{name} draws {number} of {suit}\\n'.format(name=self.name, number=random_number, suit=random_suit))\n return random_suit, random_number\n\n def display_cards(self):\n print('-------------------------')\n print(\"{name}'s cards are\\n\".format(name=self.name))\n for card in self.cards:\n if card[1] == 11 or card[1] == 1:\n print('{number} of {suit}'.format(number='Ace', suit=card[0]))\n else:\n print('{number} of {suit}'.format(number=card[1], suit=card[0]))\n\n def player_bet(self):\n print('You have {money} to bet'.format(money=self.money))\n while True:\n try:\n bet = int(input('what is your bet: '))\n except ValueError:\n print('Bet must be a number!')\n else:\n while bet > self.money:\n try:\n bet = int(input('Must bet {} or less: '.format(self.money)))\n except ValueError:\n print('Bet must be a number!')\n self.money -= bet\n self.bet = bet\n break\n return bet\n\n def check_blackjack(self, display=True):\n if len(self.current_hand) == 2:\n if 'Ace' in self.current_hand and 10 in self.current_hand:\n self.current_hand = [21]\n self.win += 1\n self.high_score += 1\n if display:\n print('BlackJack!\\n\\nYou win {bet}\\n'.format(bet=self.bet * 2))\n if self.win >= self.high_score:\n print('New win streak, {streak} games won in a row! '.format(streak=self.high_score))\n print('-------------------------')\n self.money += self.bet * 2\n return False\n elif 'Ace' in self.current_hand and 'Queen' in self.current_hand:\n self.current_hand = [21]\n self.win += 1\n self.high_score += 1\n print('BlackJack!\\n\\nYou win {bet}\\n'.format(bet=self.bet * 2))\n if display:\n if self.win >= self.high_score:\n print('New win streak, {streak} games won in a row! '.format(streak=self.high_score))\n print('-------------------------')\n self.money += self.bet * 2\n return False\n elif 'Ace' in self.current_hand and 'King' in self.current_hand:\n self.current_hand = [21]\n self.win += 1\n self.high_score += 1\n if display:\n print('BlackJack!\\n\\nYou win {bet}\\n'.format(bet=self.bet * 2))\n if self.win >= self.high_score:\n print('New win streak, {streak} games won in a row! '.format(streak=self.high_score))\n print('-------------------------')\n self.money += self.bet * 2\n return False\n elif 'Ace' in self.current_hand and 'Jack' in self.current_hand:\n self.current_hand = [21]\n self.win += 1\n self.high_score += 1\n if display:\n print('BlackJack!\\n\\nYou win {bet}\\n'.format(bet=self.bet * 2))\n if self.win >= self.high_score:\n print('New win streak, {streak} games won in a row! '.format(streak=self.high_score))\n print('-------------------------')\n self.money += self.bet * 2\n return False\n return True\n\n def check_cards(self, deck):\n sleep(2)\n print('-------------------------')\n if len(self.current_hand) == 1: # Check for one card meaning cards were split\n self.draw_card(deck, display=True)\n if not self.check_blackjack(self.bet):\n return False\n self.current_hand = []\n for number in self.cards: # sorting cards and evaluating values\n if number[1] == 'Ace':\n continue\n else:\n self.current_hand.append(deck.values[number[1]])\n for number in self.cards:\n if number[1] == 'Ace':\n if sum(self.current_hand) >= 11:\n self.current_hand.append(deck.values[number[1]][0])\n else:\n self.display_cards()\n print('Current total is {total} '.format(total=sum(self.current_hand)))\n answer = ''\n while answer != 11 and answer != 1:\n try:\n answer = int(input('Would you like your ace to be an 11 or 1? '))\n except ValueError:\n print('Must be a number!')\n number[1] = answer\n self.clear()\n sleep(2)\n self.current_hand.append(answer)\n total = sum(self.current_hand)\n if total == 21:\n self.display_cards()\n print('\\nYou have 21\\n')\n print('-------------------------')\n sleep(2)\n return False\n elif total > 21:\n print('\\nBust!\\nYou had {total}\\n'.format(total=total))\n print('-------------------------')\n return False\n else:\n self.display_cards()\n print('\\nTotaling {total}\\n'.format(total=total))\n print('-------------------------')\n return True\n\n def hit(self, deck):\n if self.money >= self.bet:\n answer = input('Hit or Stand or Double Down (h/s/dd) \\n')\n while answer != 'h' and answer != 's' and answer != 'dd':\n answer = input('Must choose (h) for Hit and (s) for Stand or (dd) for Double Down')\n if answer == 'h':\n self.clear()\n card_suit, card_number = self.draw_card(deck)\n print('{name} draws a'.format(name=self.name))\n print('{number} of {suit}\\n'.format(suit=card_suit, number=card_number))\n return True\n elif answer == 'dd':\n self.money -= self.bet\n self.bet = self.bet * 2\n self.clear()\n card_suit, card_number = self.draw_card(deck)\n print('{name} draws a'.format(name=self.name))\n print('{number} of {suit}\\n'.format(suit=card_suit, number=card_number))\n self.check_cards(deck)\n sleep(2)\n return False\n else:\n answer = input('Hit or Stand (h/s) \\n')\n while answer != 'h' and answer != 's':\n answer = input('Must choose (h) for Hit and (s) for Stand ')\n if answer == 'h':\n self.clear()\n card_suit, card_number = self.draw_card(deck)\n print('{name} draws a'.format(name=self.name))\n print('{number} of {suit}\\n'.format(suit=card_suit, number=card_number))\n return True\n sleep(2)\n return False\n\n def check_split(self):\n if self.money == 0:\n return False\n if self.current_hand.count(self.current_hand[0]) == 2:\n return True\n return False\n\n def clear(self):\n print('\\n' * 50)\n\n def load_data(self):\n # load high score\n HS_FILE = 'highscore.txt'\n self.dir = path.dirname(__file__)\n try:\n open(path.join(self.dir, HS_FILE), 'r+')\n except FileNotFoundError:\n open(path.join(self.dir, HS_FILE), 'w+')\n\n with open(path.join(self.dir, HS_FILE), 'r+') as f:\n try:\n self.highscore = int(f.read())\n except:\n self.highscore = 0\n\n\nclass Dealer:\n\n def __init__(self):\n self.name = 'Dealer'\n self.cards = []\n self.current_hand = []\n\n def draw_card(self, deck, display=False):\n deck.check_deck()\n random_suit = choice(list(deck.cards))\n random_number = choice(deck.cards[random_suit])\n deck.cards[random_suit].remove(random_number)\n self.cards.append([random_suit, random_number])\n self.current_hand.append(random_number)\n if display:\n print('{name} draws {number} of {suit}'.format(name=self.name, number=random_number, suit=random_suit))\n return random_suit, random_number\n\n def set_up(self, deck):\n self.current_hand = []\n self.cards = []\n self.draw_card(deck)\n self.draw_card(deck)\n self.current_hand = [card[1] for card in self.cards]\n self.display_cards()\n\n def display_cards(self): # Dealer can only show one card at first\n self.clear()\n sleep(1)\n print('-------------------------')\n print('The Dealer is showing')\n print('{number} of {suit}'.format(number=self.cards[1][1], suit=self.cards[1][0]))\n print('-------------------------')\n\n def display_all_cards(self):\n print('-------------------------')\n print('The Dealer cards are:')\n for card in self.cards:\n print('{number} of {suit}'.format(number=card[1], suit=card[0]))\n\n def check_blackjack(self):\n if len(self.cards) == 2:\n temp = [card[1] for card in self.cards]\n if 'Ace' in temp and 10 in temp:\n print('Dealer BlackJack!')\n return True\n elif 'Ace' in temp and 'Queen' in temp:\n print('Dealer BlackJack!')\n return True\n elif 'Ace' in temp and 'King' in temp:\n print('Dealer BlackJack!')\n return True\n elif 'Ace' in temp and 'Jack' in temp:\n print('Dealer BlackJack!')\n return True\n\n def check_cards(self): # Dealer's Ace will be 11 if sum of cards is less than 11. Else 1\n temp = []\n for number in self.current_hand:\n index = self.current_hand.index(number)\n if number == \"Ace\":\n continue\n if number == 'Queen' or number == 'Jack' or number == 'King':\n self.current_hand.remove(number)\n self.current_hand.insert(index, 10)\n temp.append(10)\n else:\n temp.append(number)\n for number in self.current_hand: # Deciding what to do with the Ace\n total = sum(temp)\n index = self.current_hand.index(number)\n if number == 'Ace':\n self.current_hand.remove(number)\n if total < 11:\n self.current_hand.insert(index, 11)\n temp.append(11)\n else:\n self.current_hand.insert(index, 1)\n temp.append(1)\n total = sum(self.current_hand)\n # Evaluate hand\n if total == 21:\n self.display_all_cards()\n print('Dealer has 21')\n print('-------------------------')\n sleep(2)\n return False\n elif total > 21:\n self.display_all_cards()\n print('\\nDealer Bust!')\n print('-------------------------')\n sleep(2)\n return False\n else:\n self.display_all_cards()\n print('Totaling {total}\\n'.format(total=total))\n print('-------------------------')\n sleep(2)\n return True\n\n def hit(self, deck):\n dealer_total = sum(self.current_hand)\n if dealer_total < 17:\n card_suit, card_number = self.draw_card(deck)\n print('{name} draws {number} of {suit}\\n'.format(name=self.name, suit=card_suit, number=card_number))\n sleep(2)\n return True\n return False\n\n def clear(self):\n print('\\n' * 50)\n\n\nclass Deck:\n\n def __init__(self):\n self.cards = {'clubs': [2, 3, 4, 5, 6, 7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace'],\n 'spades': [2, 3, 4, 5, 6, 7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace'],\n 'hearts': [2, 3, 4, 5, 6, 7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace'],\n 'diamonds': [2, 3, 4, 5, 6, 7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace']}\n\n self.values = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11,\n 'Jack': 10, 'Queen': 10, 'King': 10, 'Ace': [1, 11]}\n\n def check_deck(self):\n for number, suit in self.cards.items():\n if not suit:\n print('\\n---New deck added---\\n')\n self.cards = {'clubs': [2, 3, 4, 5, 6, 7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace'],\n 'spades': [2, 3, 4, 5, 6, 7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace'],\n 'hearts': [2, 3, 4, 5, 6, 7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace'],\n 'diamonds': [2, 3, 4, 5, 6, 7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace']}\n","sub_path":"variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":14083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"111921955","text":"# Train model following the official document\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\n\nfrom models import return_pytorch04_xception, return_pytorch04_xception_ft\nfrom transform import xception_default_data_transforms\n\nprint(\"PyTorch Version: \", torch.__version__)\nprint(\"Torchvision Version: \", torchvision.__version__)\n\n# Top level data directory. Here we assume the format of the directory conforms\n# to the ImageFolder structure\n# data_dir = \"/home/jc/Faceforensics_onServer/Final_Faceforensics++\"\ndata_dir = \"/home/jc/Faceforensics_onServer/Final_Faceforensics++no_NT-Big\" # no NT\n\n# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]\nmodel_name = \"squeezenet\"\n\n# Number of classes in the dataset(to fine-tune)\nnum_classes = 2\n\n# Batch size for training (change depending on how much memory you have)\nbatch_size = 32\n\n\ndef train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):\n since = time.time()\n\n val_acc_history = []\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n # Get model outputs and calculate loss\n # Special case for inception because in training it has an auxiliary output. In train\n # mode we calculate the loss by summing the final output and the auxiliary output\n # but in testing we only consider the final output.\n if is_inception and phase == 'train':\n # From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958\n outputs, aux_outputs = model(inputs)\n loss1 = criterion(outputs, labels)\n loss2 = criterion(aux_outputs, labels)\n loss = loss1 + 0.4*loss2\n else:\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n\n _, preds = torch.max(outputs, 1)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n if phase == 'val':\n val_acc_history.append(epoch_acc)\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model, val_acc_history\n\n\n# if we are feature extracting and only want to compute gradients for the newly initialized layer\n# then we want all of the other parameters to not require gradients. This will make more sense later.\ndef set_parameter_requires_grad(model, feature_extracting):\n if feature_extracting:\n # for param in model.parameters():\n # param.requires_grad = False\n for para in list(model.parameters())[:-1]:\n para.requires_grad = False\n\n\nif __name__ == '__main__':\n print(\"Initializing Datasets and Dataloaders...\")\n\n # Create training and validation datasets\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), xception_default_data_transforms[x])\n for x in ['train', 'val']}\n # Create training and validation dataloaders\n dataloaders_dict = {\n x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=0)\n for x in ['train', 'val']}\n\n # Detect if we have a GPU available\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n p = parser = argparse.ArgumentParser(description='Process some integers.')\n p.add_argument('--pretrain3epochs', action='store_true')\n args = p.parse_args()\n\n if args.pretrain3epochs:\n pretrain3epoch = True\n num_epochs = 0\n print('True')\n else:\n pretrain3epoch = False\n num_epochs = 15\n print('False')\n\n\n model_ft = return_pytorch04_xception_ft(True, pretrain3epoch)\n set_parameter_requires_grad(model_ft, pretrain3epoch)\n if pretrain3epoch:\n num_ftrs = model_ft.last_linear.in_features\n model_ft.last_linear = nn.Linear(num_ftrs, num_classes)\n\n print(model_ft)\n\n model_ft = model_ft.to(device)\n\n # Gather the parameters to be optimized/updated in this run. If we are\n # finetuning we will be updating all parameters. However, if we are\n # doing feature extract method, we will only update the parameters\n # that we have just initialized, i.e. the parameters with requires_grad\n # is True.\n params_to_update = model_ft.parameters()\n print(\"Params to learn:\")\n if pretrain3epoch:\n params_to_update = []\n for name, param in model_ft.named_parameters():\n\n if param.requires_grad == True:\n params_to_update.append(param)\n print(\"\\t\", name)\n else:\n for name, param in model_ft.named_parameters():\n if param.requires_grad == True:\n print(\"\\t\", name)\n\n\n # Observe that all parameters are being optimized\n optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)\n\n # Setup the loss fxn\n criterion = nn.CrossEntropyLoss()\n\n # Train and evaluate\n model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs)\n\n if pretrain3epoch:\n # torch.save(model_ft.state_dict(), '/home/jc/Faceforensics_onServer/Model/xception-b5690688-after3epochs-noNT-Big.pth')\n torch.save(model_ft.state_dict(),\n '/home/jc/Faceforensics_onServer/Model/xception-b5690688-after0epochs-noNT-Big.pth')\n else:\n torch.save(model_ft.state_dict(),\n '/home/jc/Faceforensics_onServer/Model/xception-b5690688-after15epochs-noNT-Big.pth')\n","sub_path":"TrainModel/TrainModel_2.py","file_name":"TrainModel_2.py","file_ext":"py","file_size_in_byte":7638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"247457387","text":"import Simulation as Games\n\nNUMBER_GAMES = 1000 # number of games within each simulation\n\nNUMBER_FLIPS = 20 # number of coin flips within each game\nHEADS_PROB = 0.4 # probability of heads\n\n# Create simulation called myMoney\nmyMoney = Games.Simulation(1, NUMBER_GAMES, HEADS_PROB)\n# Run each game within myMoney\nmyMoney.simulate(NUMBER_FLIPS)\n\n# Calculate the Expected Value\nprint(myMoney.get_expected_value())","sub_path":"Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"88611071","text":"from django.conf import settings\n\n\nclass SecurityHeaderMiddleware(object):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n response = self.get_response(request)\n\n response['X-XSS-Protection'] = '1; mode=block'\n response['X-DNS-Prefetch-Control'] = 'off'\n response['X-Download-Options'] = 'noopen'\n response['X-Content-Type-Options'] = 'nosniff'\n if not settings.DEBUG:\n response['Strict-Transport-Security'] = 'max-age=15552000; includeSubDomains'\n \n return response\n","sub_path":"main/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"470279464","text":"# *-* encoding: utf-8 *-*\nimport argparse,json,os,csv,sys,datetime,re,unicodedata,itertools\n\ndef parserTweetDate(filein,fileout_hsh,fileout_usrhsh,fileout_hshhsh,date):\n counter_sk = 0\n counter_notw = 0\n counter_ok = 0\n counter_line = 0\n\n for line in filein.readlines():\n counter_line += 1\n if counter_line % 100000 == 0:\n print(counter_line,'lines treated')\n try:\n tw = json.loads(line)\n except:\n counter_sk += 1\n continue\n if 'twitter' not in tw.keys():\n counter_notw += 1\n continue\n\n d = [int(i) for i in re.findall(r'[0-9]+',tw['twitter']['created_at'])]\n dt = datetime.datetime(*d[0:6])\n ts = dt.timestamp()\n\n if ts < date:\n if 'hashtags' in tw['twitter']:\n uid = tw['twitter']['user']['id']\n hashtags = tw['twitter']['hashtags']\n hashtags_f = []\n for h in hashtags:\n h_f = ''.join(c for c in unicodedata.normalize('NFD', h) if unicodedata.category(c) != 'Mn').lower()\n t = re.sub(r'[^a-zA-Z0-9]+','',h_f)\n if t == h_f:\n hashtags_f.append(h_f)\n fileout_usrhsh.write(str(uid)+','+h_f+','+str(ts)+'\\n')\n fileout_hsh.write(h_f+','+str(ts)+'\\n')\n if len(hashtags_f) > 1:\n for el in itertools.combinations(hashtags_f,2):\n fileout_hshhsh.write(el[0]+','+el[1]+','+str(ts)+'\\n')\n fileout_hshhsh.write(el[1]+','+el[0]+','+str(ts)+'\\n')\n counter_ok+=1\n\n print('Finished treating file',filein.name,'for date',date)\n print(counter_line,'lines treated')\n print(counter_sk,'lines with no json format')\n print(counter_notw,'lines with no tweet format')\n print(counter_ok,'lines for which entries were created')\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i','--inputfile',\n type=argparse.FileType('r'),\n default=sys.stdin,\n help=\"Input file, json format, tweets\")\n parser.add_argument('-o1','--outputHashes',\n type=argparse.FileType('a'),\n required=True,\n help=\"Output file for hash per period\")\n parser.add_argument('-o2','--outputUserHashes',\n type=argparse.FileType('a'),\n required=True,\n help=\"Output file for user hash per period\")\n parser.add_argument('-o3','--outputHashesHashes',\n type=argparse.FileType('a'),\n required=True,\n help=\"Output file for hash co occurences per period\")\n parser.add_argument('-d','--date',\n type=float,\n required=True,\n help=\"Limit period (epoch time)\")\n\n args = parser.parse_args()\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n parserTweetDate(args.inputfile,args.outputHashes,args.outputUserHashes,args.outputHashesHashes,args.date)\n\n args.inputfile.close()\n args.outputHashes.close()\n args.outputUserHashes.close()\n args.outputHashesHashes.close()\n\nif __name__ == '__main__':\n main()\n\n \n\n","sub_path":"version_20160401.dir/study_period_4_weeks_3_week_ovlerap.dir/evolvingnetwork.dir/intervalfiles.dir/crawlerperperiod.dir/extract_hash_per_period.py","file_name":"extract_hash_per_period.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"630401755","text":"import requests\nimport urllib3\nimport json\nimport urllib\nimport urllib.request\n\nheaders = {\n 'Accept': '*/*',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',\n 'Referer':\"https://item.jd.com/100000177760.html#comment\"\n}\n\n# flag\nflag = True\n\n# https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv102&productId=100006154988&score=3&sortType=5&page=0&pageSize=10&isShadowSku=0&fold=1\n\n\ndef getjsontxt(url):\n try:\n r = requests.get(url, timeout = 30, headers = headers)\n r.raise_for_status()\n\n return r.text\n except:\n return \"存在异常\"\n\n\nstr = \"https://sclub.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv102&productId=100006154988&score=3&sortType=5&page=0&pageSize=100&isShadowSku=0&fold=0\"\nres = getjsontxt(str)\nres = res.split('(')[1].split(')')[0]\nprint(res)\n\ndef json_op(result):\n global flag\n j = json.loads(result)\n comments = j[\"comments\"]\n # print(comments)\n for i in comments:\n text = i[\"content\"]\n if flag:\n flag = False\n with open(\"D://CCCC.txt\",'w', encoding='UTF-8') as file:\n file.write(text + \"\\n-----------------------\\n\")\n file.close()\n else:\n with open(\"D://CCCC.txt\", 'a', encoding='UTF-8') as file:\n file.write(text + \"\\n-----------------------\\n\")\n file.close()\n\njson_op(res)","sub_path":"CrawlerSpider.py","file_name":"CrawlerSpider.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"266499365","text":"\"\"\"empty message\n\nRevision ID: 4b2424d9f6a6\nRevises: b54b3d223e13\nCreate Date: 2017-04-02 22:50:01.833232\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4b2424d9f6a6'\ndown_revision = 'b54b3d223e13'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('session_levels',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('session_id', sa.Text(length=128), nullable=True),\n sa.Column('phone_number', sa.String(length=25), nullable=True),\n sa.Column('level', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('session_id')\n )\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=True),\n sa.Column('phone_number', sa.String(length=64), nullable=False),\n sa.Column('city', sa.String(length=64), nullable=True),\n sa.Column('registration_date', sa.DateTime(), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.Column('account', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_users_phone_number'), 'users', ['phone_number'], unique=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_users_phone_number'), table_name='users')\n op.drop_table('users')\n op.drop_table('session_levels')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/4b2424d9f6a6_.py","file_name":"4b2424d9f6a6_.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"481579076","text":"class EnumMeta(type):\n def __new__(cls, name, bases, dict):\n\n dict['__mapping__'] = {}\n members = {k: v for (k, v) in dict.items() if not (k.startswith('__') and k.endswith('__'))}\n enum = super().__new__(cls, name, bases, dict)\n for key, value in members.items():\n value = enum(value)\n value.name = key\n setattr(enum, key, value)\n return enum\n\n def __iter__(self):\n return (self.__name__ + \".\" + name for name in self.__dict__.keys() if\n not (name.startswith('__') and name.endswith('__')))\n\n def __getitem__(self, item):\n try:\n return self.__dict__[item]\n except KeyError:\n return \"KeyError: '{}'\".format(item)\n\n\nclass Enum(metaclass=EnumMeta):\n __mapping__ = {}\n\n def __new__(cls, value):\n if value in cls.__mapping__:\n return cls.__mapping__[value]\n v = super().__new__(cls)\n v.value = value\n v.name = ''\n cls.__mapping__[value] = v\n return v\n\n def __repr__(self):\n if self.name in Direction.__dict__:\n return '<{}.{}: {}>'.format(self.__class__.__name__, self.name, self.value)\n else:\n return \"ValueError: {} is not a valid Direction\".format(self.value)\n\n\nclass Direction(Enum):\n north = 0\n east = 90\n south = 180\n west = 270\n\n\nprint(Direction.north)\nprint(Direction.south)\nprint(Direction.north.name)\nprint(Direction.north.value)\nprint(Direction(0))\nprint(Direction(30))\nDirection(30)\n\nfor d in Direction:\n print(d)\n\nprint(\"id of Direction.north: \" + str(id(Direction.north)))\nprint(\"id of Direction(0): \" + str(id(Direction(0))))\n\nprint(Direction['west'])\nprint(Direction['north-west'])\n","sub_path":"lesson OOP-2/Bogucharov_HW2.py","file_name":"Bogucharov_HW2.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"96360883","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport os\nimport sys\nfrom pathlib import Path\nimport pystache\nimport datetime\nimport base64\nimport html\n\nnow = datetime.datetime.now()\n\nhandles = {\n 'YEAR': now.year\n}\n\ntry:\n handles['REPO_TITLE'] = os.environ['REPO_TITLE']\n handles['REPO_NAME'] = os.environ['REPO_NAME']\n handles['RELEASE'] = os.environ['RELEASE']\n handles['MINOR'] = os.environ['MINOR']\n handles['PATCH'] = os.environ['PATCH']\n handles['REPO_PATH'] = os.environ['REPO_PATH']\n handles['DEVELOPER'] = os.environ['DEVELOPER']\nexcept:\n print(\"Error: %s\\n\" % sys.exc_info()[0])\n print(\"Is direnv working correctly?\\n\")\n os.Exit(1)\n\n\ndef load_snippets():\n res = subprocess.getoutput(\"cd / && find %s/repo/snippets -type f -not -path '*/\\.*'\" % handles['REPO_PATH'])\n overlay = str.split(res, '\\n')\n\n for file in overlay:\n # Reverse the full path, cut the short name off, reverse back\n v = str.split(file[::-1],'/')[0][::-1]\n handles[v] = Path(file).read_text()\n\ndef load_commit_count():\n handles['COMMIT_COUNT'] = int(subprocess.getoutput(\"git rev-list --count HEAD\"))\n\ndef load_branch():\n handles['BRANCH'] = subprocess.getoutput(\"git branch | egrep '^\\*' | rev | cut -d'/' -f1 | cut -d' ' -f1 | rev\")\n\n\ndef load_version():\n branch = str.split(subprocess.getoutput(\"git branch|grep '^*'|cut -d' ' -f2\"),'/')\n if branch[0] == 'master' or branch[0] == 'release':\n handles['VERSION'] = \"%s.%s.%s\" % (handles['RELEASE'], handles['MINOR'], handles['PATCH'])\n handles['DEV_VERSION'] = \"\"\n else:\n handles['VERSION'] = \"%s.%s.%s-%s%d\" % (handles['RELEASE'], handles['MINOR'], handles['PATCH'], handles['BRANCH'], handles['COMMIT_COUNT'])\n handles['DEV_VERSION'] = base64.b32encode(bytearray(\"%s.%s\" % (handles['DEVELOPER'], now),'ascii')).decode('utf-8')\n\ndef update_repo():\n res = subprocess.getoutput(\"cd / && find %s/repo/root -type f -not -path '*/\\.*'\" % handles['REPO_PATH'])\n overlay = str.split(res, '\\n')\n\n for template in overlay:\n t = Path(template).read_text()\n d = str.split(template,'repo/root/')[1]\n #print(\"%s -> %s/%s:\\n%s\" % (template, handles['REPO_PATH'], d, pystache.render(t,handles)))\n Path(\"%s/%s\" % (handles['REPO_PATH'], d)).write_text(html.unescape(pystache.render(t,handles)))\n subprocess.getoutput(\"git add %s/%s\" % (handles['REPO_PATH'], d))\n\nstart_dir = os.getcwd()\n\nload_commit_count()\nload_branch()\nload_version()\nload_snippets()\n\n#####\n\nupdate_repo()\n\n#print(handles)\n#fh = Path('../root/README.md').read_text()\n#print(pystache.render(fh,handles))\n\nos.chdir(start_dir)\n","sub_path":"repo/bin/overlay_repo.py","file_name":"overlay_repo.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"216435231","text":"#!/usr/bin/env python\n\n\"\"\"This module defines custom color schemes for pretty-printing colored\noutput to the console via Pygments. This requires ANSI colors, so it\n === WILL NOT WORK IN WINDOWS. ===\n\nTo add a style:\n\n 1. Create a class extending pygments.style.Style and follow the directions\n here: .\n\n See for a list of Pygments tokens and\n their meanings, and look through pygments.lexers.web.JsonLexer to see\n which ones the JSON lexer uses. TL;DR: DarkStyle (below) has everything\n you need to color JSON.\n\n 2. In get_style_by_name(), add an entry that points to your class. The\n name you give the key is what you'd pass to --style on the command\n line. If it contains a space, even just one space, absolutely nobody\n will like you.\n\n Nobody.\n\n So no spaces.\n\n\"\"\"\n\nfrom pygments.style import Style\nfrom pygments.styles import STYLE_MAP as builtin_styles\nfrom pygments.token import Keyword, Name, String, Number, Punctuation\n\n\n# This highlight scheme used when --style is omitted. Not to be confused with\n# the string 'default', which is passed to Pygments when the style that WAS\n# given with --style is not a valid name.\nDEFAULT_STYLE = 'monoclone'\n\n\n# If you don't add your style here, the --style argument won't pick it up. -----\n\n\ndef get_style_by_name(name):\n custom_styles = {\n 'monoclone': MonocloneStyle\n }\n\n try:\n return custom_styles[name]\n except KeyError:\n # 'default' is a magic default value used by Terminal256Formatter.\n return name if name in builtin_styles else 'default'\n\n\n# Custom styles begin here -----------------------------------------------------\n\n\nclass MonocloneStyle(Style):\n \"\"\"A clone of the Monokai scheme.\n\n \"\"\"\n default_style = ''\n\n styles = {\n # true, false, null\n Keyword: '#F92672',\n # { } : ,\n Punctuation: '#f8f8f2',\n # Key names, including their quotes\n Name: '#F92672',\n # Integers\n Number.Integer: '#AE81FF',\n # Floating-point numbers\n Number.Float: '#AE81FF',\n # String values, not including keys\n String.Double: '#E6DB74'\n }\n","sub_path":"styles.py","file_name":"styles.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"280937938","text":"import spacy\nfrom spacy.matcher import Matcher\n\nnlp = spacy.load(\"fr_core_news_sm\")\nmatcher = Matcher(nlp.vocab)\n\ndoc = nlp(\n \"Après avoir effectué la mise à jour d'iOS vous ne constaterez pas de \"\n \"renouveau radical : rien de commun avec le bouleversement que nous \"\n \"avions connu avec iOS 7. Globalement iOS 11 reste très semble à iOS 10. \"\n \"Mais vous découvrirez quelques changements en approfondissant un peu.\"\n)\n\n# Écris un motif des versions complètes d'iOS (\"iOS 7\", \"iOS 11\", \"iOS 10\")\npattern = [{\"TEXT\": \"iOS\"}, {\"IS_DIGIT\": True}]\n\n# Ajoute le motif au matcher et applique le matcher au doc\nmatcher.add(\"IOS_VERSION_PATTERN\", None, pattern)\nmatches = matcher(doc)\nprint(\"Nombre de correspondances trouvées :\", len(matches))\n\n# Itère sur les correspondances et affiche la portion de texte\nfor match_id, start, end in matches:\n print(\"Correspondance trouvée :\", doc[start:end].text)\n","sub_path":"exercises/fr/solution_01_12_01.py","file_name":"solution_01_12_01.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"143664815","text":"\"\"\"\n一周中上一次出现某天时的日期\n\"\"\"\nimport datetime as dt\nfrom dateutil.relativedelta import relativedelta\nfrom dateutil.rrule import *\n\nweekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n\n\ndef get_previous_by_day(day_name, start_date=None):\n if start_date is None:\n start_date = dt.datetime.today()\n day_num = start_date.weekday()\n day_num_target = weekdays.index(day_name)\n days_ago = (7 + day_num - day_num_target) % 7\n if days_ago == 0:\n days_ago = 7\n target_date = start_date - dt.timedelta(days=days_ago)\n return target_date\n\n\nnow = dt.datetime.now()\nprint(now)\nprint(now + relativedelta(weekday=MO))\nprint(now + relativedelta(weekday=MO(-1)))\n","sub_path":"3 数字、日期和时间/13 计算上周五的日期.py","file_name":"13 计算上周五的日期.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"356443283","text":"import configparser\nimport datetime\nimport json\nimport time\nimport traceback\nfrom collections import Counter, deque\nfrom random import random\n\nimport httplib2\nfrom googleapiclient.discovery import build\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.file import Storage\nfrom oauth2client.tools import run_flow\n\nimport pywemo\n\nSTORAGE = Storage(\"credentials.storage\")\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config.ini\")\n\n# Console output will show the temperature in F or C.\nFAHRENHEIT = config.getboolean(\"DEFAULT\", \"Fahrenheit\")\nPOLLING_PERIOD_S = config.getint(\"DEFAULT\", \"PollingPeriodS\")\naux_heat_thresh = config.getint(\"DEFAULT\", \"AuxHeatThreshold\")\nAUX_HEAT_THRESHOLD_C = aux_heat_thresh * 5 / 9.0 if FAHRENHEIT else aux_heat_thresh\nHUMIDITY_PERCENT_TARGET = config.getint(\"DEFAULT\", \"HumidityPercentTarget\")\nHUMIDITY_PERCENT_THRESHOLD = config.getint(\"DEFAULT\", \"HumidityPercentThreshold\")\n\nGOOGLE_ENTERPRISE = config[\"google\"][\"Enterprise\"]\nGOOGLE_CLIENT_SECRET = config[\"google\"][\"ClientSecretFile\"]\nGOOGLE_SCOPE = \"https://www.googleapis.com/auth/sdm.service\"\n\nWEMO_HEATING_DEVICE_NAMES = set(json.loads(config.get(\"wemo\", \"HeatingDeviceNames\")))\nWEMO_COOLING_DEVICE_NAMES = set(json.loads(config.get(\"wemo\", \"CoolingDeviceNames\")))\nWEMO_AUXILLIARY_HEATING_DEVICE_NAMES = set(\n json.loads(config.get(\"wemo\", \"AuxiliaryHeatingDeviceNames\"))\n)\nWEMO_HUMIDIFIER_DEVICE_NAMES = set(json.loads(config.get(\"wemo\", \"HumidifierNames\")))\n\n\ndef authorize_credentials():\n \"\"\"Start the OAuth flow to retrieve credentials.\n This may require launching a browser, one time.\"\"\"\n # Fetch credentials from storage\n credentials = STORAGE.get()\n # If the credentials doesn't exist in the storage location then run the flow.\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(GOOGLE_CLIENT_SECRET, scope=GOOGLE_SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials\n\n\nservice = None\n\n\ndef nest_client():\n global service\n if service is None:\n credentials = authorize_credentials()\n http = credentials.authorize(httplib2.Http())\n service = build(\n serviceName=\"smartdevicemanagement.googleapis.com\",\n version=\"v1\",\n http=http,\n discoveryServiceUrl=\"https://{api}/$discovery/rest?version={apiVersion}\",\n )\n return service\n\n\ndef get_nest_devices():\n devices = (\n nest_client()\n .enterprises()\n .devices()\n .list(parent=\"enterprises/\" + GOOGLE_ENTERPRISE)\n .execute()\n )\n return devices[\"devices\"]\n\n\ndef get_thermostats():\n devices = get_nest_devices()\n return [x for x in devices if x[\"type\"] == \"sdm.devices.types.THERMOSTAT\"]\n\n\ndef get_nest_device(name):\n return nest_client().enterprises().devices().get(name=name).execute()\n\n\nthermostat_name = None\n\n\ndef get_first_thermostat():\n global thermostat_name\n if thermostat_name is not None:\n try:\n return get_nest_device(thermostat_name)\n except Exception as e:\n # Thermostat has changed?\n print(\"Unable to read thermostat {}: {}\".format(thermostat_name, e))\n thermostat_name = None\n # First time, or the old thermostat is offline\n if thermostat_name is None:\n thermostat_name = get_thermostats()[0][\"name\"]\n print(\"New thermostat discovered: {}\".format(thermostat_name))\n return get_nest_device(thermostat_name)\n\n\n# This tells how far back to remember a wemo device that isn't showing\n# up in discovery any more.\nWEMO_DISCOVERY_HISTORY_LEN = 10\nwemo_discovery_history = deque(maxlen=WEMO_DISCOVERY_HISTORY_LEN)\n# Once the discovery history is full, it refreshes much less frequently\n# (controlled by the refresh probability) and relies on the cached results.\nWEMO_REFRESH_PROB = 0.05\n\n\ndef get_wemo_devices():\n # Merges the last few discovery attempts, in case some wemos\n # intermittently fail to appear.\n if (\n len(wemo_discovery_history) < WEMO_DISCOVERY_HISTORY_LEN\n or random() < WEMO_REFRESH_PROB\n ):\n try:\n wemo_discovery_history.appendleft(pywemo.discover_devices())\n except:\n print(\"Wemo discovery exception:\")\n traceback.print_exc()\n # Merge and filter out duplicates by MAC address\n devices = set()\n macs = set()\n for discovery in wemo_discovery_history:\n for device in discovery:\n if device.mac not in macs:\n macs.add(device.mac)\n devices.add(device)\n return devices\n\n\ndevice_error_count = Counter()\nMAX_RETRIES = config.getint(\"wemo\", \"MaxPowerOffRetries\")\n\n\ndef reset_wemo_devices(device_set, skipping=None):\n \"\"\"Turns as set of wemos off, and removes them from the set if successful.\n Devices specified by 'skipping' are not turned off, although they are\n still removed from the set.\"\"\"\n\n if skipping:\n device_set.difference_update(skipping)\n toggled_successfully = set()\n for device in device_set:\n try:\n print(\"Turning {} off.\".format(device.name))\n device.off()\n toggled_successfully.add(device)\n except:\n print(\"Unable to toggle {}\".format(device.name))\n traceback.print_exc()\n device_error_count[device.mac] += 1\n if device_error_count[device.mac] > MAX_RETRIES:\n print(\n \"Giving up on {} after {} retries.\".format(device.name, MAX_RETRIES)\n )\n toggled_successfully.add(device)\n for device in toggled_successfully:\n device_set.discard(device)\n del device_error_count[device.mac]\n\n\nactivated_heating_devices = set()\nactivated_cooling_devices = set()\nactivated_humidifier_devices = set()\n\n\ndef power_off_unneeded_wemos(hvac_status):\n # Turns off wemos that aren't needed in the current state.\n # Should not mess with devices that were manually toggled, since it acts only\n # on devices that this script turned on.\n if hvac_status == \"COOLING\":\n reset_wemo_devices(\n activated_heating_devices, skipping=activated_humidifier_devices\n )\n elif hvac_status == \"HEATING\":\n reset_wemo_devices(\n activated_cooling_devices, skipping=activated_humidifier_devices\n )\n else:\n reset_wemo_devices(\n activated_heating_devices, skipping=activated_humidifier_devices\n )\n reset_wemo_devices(\n activated_cooling_devices, skipping=activated_humidifier_devices\n )\n\n\ndef power_on_needed_wemo(device, hvac_status):\n # powers on a wemo and adds it to an active set so we can remember\n # to turn if off later when HVAC status changes.\n print(\"Turning {} on for {}.\".format(device.name, hvac_status))\n try:\n device.on()\n if hvac_status == \"COOLING\":\n activated_cooling_devices.add(device)\n activated_heating_devices.discard(device)\n elif hvac_status == \"HEATING\":\n activated_heating_devices.add(device)\n activated_cooling_devices.discard(device)\n elif hvac_status == \"HUMIDIFYING\":\n activated_humidifier_devices.add(device)\n else:\n print(\"Unexpected hvac status to enable a wemo: {}\".format(hvac_status))\n except:\n print(\"Wemo powering exception:\")\n traceback.print_exc()\n\n\ndef aux_heat_is_needed(thermostat):\n # Actual room temperature.\n temperature_c = thermostat[\"traits\"][\"sdm.devices.traits.Temperature\"][\n \"ambientTemperatureCelsius\"\n ]\n # The temperature that the heater is \"set\" to.\n heat_temperature_c = thermostat[\"traits\"][\n \"sdm.devices.traits.ThermostatTemperatureSetpoint\"\n ][\"heatCelsius\"]\n hvac_status = thermostat[\"traits\"][\"sdm.devices.traits.ThermostatHvac\"][\"status\"]\n return (\n hvac_status == \"HEATING\"\n and heat_temperature_c - temperature_c > AUX_HEAT_THRESHOLD_C\n )\n\n\ndef forget_user_controlled_wemos(all_wemos):\n # If code turned a switch on but the user manually turned it off,\n # then forget about turning it off by code later. The user has taken\n # responsibility.\n global activated_heating_devices\n global activated_cooling_devices\n global activated_humidifier_devices\n activated_wemos = (\n activated_heating_devices\n | activated_cooling_devices\n | activated_humidifier_devices\n )\n user_toggled = set()\n\n for device in activated_wemos:\n if device.is_off():\n user_toggled.add(device)\n\n # As a second pass, also check for user-toggled devices by mac address.\n # This might be important if a wemo device's name is changed.\n activated_mac_to_wemo = {x.mac: x for x in activated_wemos}\n for device in all_wemos:\n if device.mac in activated_mac_to_wemo and device.is_off():\n user_toggled.add(activated_mac_to_wemo[device.mac])\n\n activated_heating_devices -= user_toggled\n activated_cooling_devices -= user_toggled\n activated_humidifier_devices -= user_toggled\n\n\ndef print_temp(thermostat):\n # Actual room temperature.\n temperature_c = thermostat[\"traits\"][\"sdm.devices.traits.Temperature\"][\n \"ambientTemperatureCelsius\"\n ]\n if FAHRENHEIT:\n temperature_f = (temperature_c * 9 / 5.0) + 32\n print(\n \"{} temperature: {:.1f} degrees F\".format(\n start.strftime(\"%Y-%m-%d %H:%M\"), temperature_f\n )\n )\n else:\n print(\n \"{} temperature: {:.1f} degrees C\".format(\n start.strftime(\"%Y-%m-%d %H:%M\"), temperature_c\n )\n )\n\n\n# Normally we don't take control of already-running devices since we don't want to override user\n# intent. But on first launch, we do. This prevents devices from getting orphaned on if the script\n# is restarted.\nfirst_iteration = True\nprev_hvac_status = None\naux_heat_engaged = False\nhumidifiers_engaged = False\nwhile True:\n # Detect when the HVAC status changes to heating, cooling, or neither.\n # Toggle Wemo switches accordingly.\n # Remember that some switches may be for both heating and cooling.\n start = datetime.datetime.now()\n try:\n wemos = get_wemo_devices()\n thermostat = get_first_thermostat()\n print_temp(thermostat)\n hvac_status = thermostat[\"traits\"][\"sdm.devices.traits.ThermostatHvac\"][\n \"status\"\n ]\n\n forget_user_controlled_wemos(wemos)\n\n if hvac_status != prev_hvac_status:\n # hvac status has changed. flick some switches.\n aux_heat_engaged = False\n for wemo in wemos:\n if hvac_status == \"COOLING\" and wemo.name in WEMO_COOLING_DEVICE_NAMES:\n power_on_needed_wemo(wemo, hvac_status)\n elif (\n hvac_status == \"HEATING\" and wemo.name in WEMO_HEATING_DEVICE_NAMES\n ):\n power_on_needed_wemo(wemo, hvac_status)\n\n # Humidifiers can kick on or off independent of the hvac\n humidity = thermostat[\"traits\"][\"sdm.devices.traits.Humidity\"][\n \"ambientHumidityPercent\"\n ]\n if (\n not humidifiers_engaged\n and humidity < HUMIDITY_PERCENT_TARGET - HUMIDITY_PERCENT_THRESHOLD\n ):\n humidifiers_engaged = True\n for wemo in wemos:\n if wemo.name in WEMO_HUMIDIFIER_DEVICE_NAMES and (\n wemo.is_off() or first_iteration\n ):\n # dummy hvac status, but our method understands it anyway.\n power_on_needed_wemo(wemo, \"HUMIDIFYING\")\n elif humidity > HUMIDITY_PERCENT_TARGET + HUMIDITY_PERCENT_THRESHOLD:\n humidifiers_engaged = False\n reset_wemo_devices(\n activated_humidifier_devices,\n skipping=activated_cooling_devices | activated_heating_devices,\n )\n\n # Auxiliary heat can kick on in the middle of a cycle, but only once per cycle.\n if aux_heat_is_needed(thermostat) and not aux_heat_engaged:\n aux_heat_engaged = True\n # aux heat includes stuff like little space heaters. If you turned one on\n # manually, I want to leave it out of automatic control so you can have\n # your room as toasty as you like. Hence the \"is_off()\" check before\n # starting automatic control here.\n for wemo in wemos:\n if wemo.name in WEMO_AUXILLIARY_HEATING_DEVICE_NAMES and (\n wemo.is_off() or first_iteration\n ):\n power_on_needed_wemo(wemo, hvac_status)\n power_off_unneeded_wemos(hvac_status)\n prev_hvac_status = hvac_status\n except:\n print(\"Top-level exception:\")\n traceback.print_exc()\n first_iteration = False\n first_iteration = False\n iteration_s = (datetime.datetime.now() - start).total_seconds()\n time.sleep(max(POLLING_PERIOD_S - iteration_s, 5))","sub_path":"wenestmo.py","file_name":"wenestmo.py","file_ext":"py","file_size_in_byte":13088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"215419952","text":"import json\nimport sys\n\nfrom flask import Flask, request\n\nimport consume\nimport parse\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nsys.path.insert(1, '/home/fabian/cos/scrapi/')\n\napp = Flask(__name__)\n\n\n@app.route('/consume', methods=['GET', 'POST'])\ndef consume_day():\n return consume.consume()\n\n\n@app.route('/process', methods=['GET', 'POST'])\ndef parse_all():\n result = json.loads(request.args.get('doc'))\n timestamp = request.args.get('timestamp')\n return parse.parse(result, timestamp)\n\n\nif __name__ == '__main__':\n app.run(\n host=\"0.0.0.0\",\n port=1338,\n debug=True\n )\n","sub_path":"website/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"277864367","text":"import os\nimport time\nimport datetime\nimport collections\nimport socket\nfrom calendar import timegm\nfrom future.utils import iteritems\n\nimport json\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\ntry:\n from threading import get_ident\nexcept ImportError:\n from thread import get_ident\n\nfrom pandaharvester.harvesterconfig import harvester_config\nfrom pandaharvester.harvestercore import core_utils\nfrom pandaharvester.harvestercore.plugin_factory import PluginFactory\nfrom pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy\nfrom pandaharvester.harvestercore.db_interface import DBInterface\n\n# attribute list\n_attribute_list = ['id', 'item', 'score']\n\n# fifo object spec\nFifoObject = collections.namedtuple('FifoObject', _attribute_list, verbose=False, rename=False)\n\n# logger\n_logger = core_utils.setup_logger('fifos')\n\n\n# base class of fifo message queue\nclass FIFOBase(object):\n # constructor\n def __init__(self, **kwarg):\n for tmpKey, tmpVal in iteritems(kwarg):\n setattr(self, tmpKey, tmpVal)\n self.hostname = socket.gethostname()\n self.os_pid = os.getpid()\n self.dbProxy = DBProxy()\n self.dbInterface = DBInterface()\n\n # get process identifier\n def get_pid(self):\n thread_id = get_ident()\n if thread_id is None:\n thread_id = 0\n return '{0}_{1}-{2}'.format(self.hostname, self.os_pid, format(get_ident(), 'x'))\n\n # make logger\n def make_logger(self, base_log, token=None, method_name=None, send_dialog=True):\n if send_dialog and hasattr(self, 'dbInterface'):\n hook = self.dbInterface\n else:\n hook = None\n return core_utils.make_logger(base_log, token=token, method_name=method_name, hook=hook)\n\n # intialize fifo from harvester configuration\n def _initialize_fifo(self):\n self.fifoName = '{0}_fifo'.format(self.titleName)\n self.config = getattr(harvester_config, self.titleName)\n if hasattr(self.config, 'fifoEnable') and self.config.fifoEnable:\n self.enabled = True\n else:\n self.enabled = False\n return\n pluginConf = vars(self.config).copy()\n pluginConf.update( {'titleName': self.titleName} )\n if hasattr(self.config, 'fifoModule') and hasattr(self.config, 'fifoClass'):\n pluginConf.update( {'module': self.config.fifoModule,\n 'name': self.config.fifoClass,} )\n else:\n if not hasattr(harvester_config, 'fifo'):\n return\n pluginConf.update( {'module': harvester_config.fifo.fifoModule,\n 'name': harvester_config.fifo.fifoClass,} )\n pluginFactory = PluginFactory()\n self.fifo = pluginFactory.get_plugin(pluginConf)\n\n # encode\n def encode(self, obj):\n obj_serialized = pickle.dumps(obj, -1)\n return obj_serialized\n\n # decode\n def decode(self, obj_serialized):\n obj = pickle.loads(obj_serialized)\n return obj\n\n # size of queue\n def size(self):\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='size')\n retVal = self.fifo.size()\n mainLog.debug('size={0}'.format(retVal))\n return retVal\n\n # enqueue\n def put(self, obj, score=None, encode_item=True):\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='put')\n if encode_item:\n # obj_serialized = json.dumps(obj, cls=PythonObjectEncoder)\n obj_serialized = self.encode(obj)\n else:\n obj_serialized = obj\n if score is None:\n score = time.time()\n retVal = self.fifo.put(obj_serialized, score)\n mainLog.debug('score={0}'.format(score))\n return retVal\n\n # enqueue by id, which is unique\n def putbyid(self, id, obj, score=None, encode_item=True):\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='putbyid')\n if encode_item:\n # obj_serialized = json.dumps(obj, cls=PythonObjectEncoder)\n obj_serialized = self.encode(obj)\n else:\n obj_serialized = obj\n if score is None:\n score = time.time()\n retVal = self.fifo.putbyid(id, obj_serialized, score)\n mainLog.debug('id={0} score={1}'.format(id, score))\n return retVal\n\n # dequeue to get the first fifo object\n def get(self, timeout=None, protective=False, decode_item=True):\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='get')\n object_tuple = self.fifo.get(timeout, protective)\n # retVal = json.loads(obj_serialized, object_hook=as_python_object)\n if object_tuple is None:\n retVal = None\n else:\n id, obj_serialized, score = object_tuple\n if obj_serialized is not None and decode_item:\n obj = self.decode(obj_serialized)\n else:\n obj = obj_serialized\n retVal = FifoObject(id, obj, score)\n mainLog.debug('called. protective={0}'.format(protective))\n return retVal\n\n # dequeue to get the last fifo object\n def getlast(self, timeout=None, protective=False, decode_item=True):\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='getlast')\n object_tuple = self.fifo.getlast(timeout, protective)\n # retVal = json.loads(obj_serialized, object_hook=as_python_object)\n if object_tuple is None:\n retVal = None\n else:\n id, obj_serialized, score = object_tuple\n if obj_serialized is not None and decode_item:\n obj = self.decode(obj_serialized)\n else:\n obj = obj_serialized\n retVal = FifoObject(id, obj, score)\n mainLog.debug('called. protective={0}'.format(protective))\n return retVal\n\n # get tuple of the first object and its score without dequeuing\n # If item is large un unnecessary to show int peek, set skip_item=True\n def peek(self, skip_item=False):\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='peek')\n object_tuple = self.fifo.peek(skip_item=skip_item)\n if object_tuple is None:\n retVal = None\n mainLog.debug('fifo empty')\n else:\n id, obj_serialized, score = object_tuple\n # retVal = (json.loads(obj_serialized, object_hook=as_python_object), score)\n if obj_serialized is None and score is None:\n retVal = FifoObject(None, None, None)\n else:\n if score is None:\n score = time.time()\n retVal = FifoObject(id, obj_serialized, score)\n mainLog.debug('score={0}'.format(score))\n return retVal\n\n # get tuple of the last object and its score without dequeuing\n def peeklast(self, skip_item=False):\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='peeklast')\n object_tuple = self.fifo.peeklast(skip_item=skip_item)\n if object_tuple is None:\n retVal = None\n mainLog.debug('fifo empty')\n else:\n id, obj_serialized, score = object_tuple\n # retVal = (json.loads(obj_serialized, object_hook=as_python_object), score)\n if obj_serialized is None and score is None:\n retVal = FifoObject(None, None, None)\n else:\n if score is None:\n score = time.time()\n retVal = FifoObject(id, obj_serialized, score)\n mainLog.debug('score={0}'.format(score))\n return retVal\n\n # get tuple of the object by id without dequeuing\n def peekbyid(self, id, temporary=False, skip_item=False):\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='peekbyid')\n object_tuple = self.fifo.peekbyid(id, temporary, skip_item=skip_item)\n if object_tuple is None:\n retVal = None\n mainLog.debug('fifo empty')\n else:\n id_gotten, obj_serialized, score = object_tuple\n # retVal = (json.loads(obj_serialized, object_hook=as_python_object), score)\n if obj_serialized is None and score is None:\n retVal = FifoObject(None, None, None)\n else:\n if score is None:\n score = time.time()\n retVal = FifoObject(id, obj_serialized, score)\n mainLog.debug('id={0} score={1} temporary={2}'.format(id, score, temporary))\n return retVal\n\n # remove objects by list of ids from temporary space, return the number of objects successfully removed\n def release(self, ids):\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='release')\n retVal = self.fifo.delete(ids)\n mainLog.debug('released {0} objects in {1}'.format(retVal, ids))\n return retVal\n\n # restore objects by list of ids from temporary space to fifo; ids=None to restore all objects\n def restore(self, ids=None):\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='restore')\n retVal = self.fifo.restore(ids)\n if ids is None:\n mainLog.debug('restored all objects')\n else:\n mainLog.debug('restored objects in {0}'.format(ids))\n return retVal\n\n\n# Special fifo base for non havester-agent\nclass SpecialFIFOBase(FIFOBase):\n # constructor\n def __init__(self, **kwarg):\n FIFOBase.__init__(self, **kwarg)\n self.fifoName = '{0}_fifo'.format(self.titleName)\n pluginConf = {}\n pluginConf.update( {'titleName': self.titleName} )\n pluginConf.update( {'module': harvester_config.fifo.fifoModule,\n 'name': harvester_config.fifo.fifoClass,} )\n pluginFactory = PluginFactory()\n self.fifo = pluginFactory.get_plugin(pluginConf)\n\n\n# Benchmark fifo\nclass BenchmarkFIFO(SpecialFIFOBase):\n titleName = 'benchmark'\n\n\n# monitor fifo\nclass MonitorFIFO(FIFOBase):\n titleName = 'monitor'\n\n # constructor\n def __init__(self, **kwarg):\n FIFOBase.__init__(self, **kwarg)\n self._initialize_fifo()\n\n def populate(self, seconds_ago=0, clear_fifo=False):\n \"\"\"\n Populate monitor fifo with all active worker chunks and timeNow as score from DB\n with modificationTime earlier than seconds_ago seconds ago\n object in fifo = [(queueName_1, [[worker_1_1], [worker_1_2], ...]), (queueName_2, ...)]\n \"\"\"\n if clear_fifo:\n self.fifo.clear()\n try:\n fifoMaxWorkersToPopulate = self.config.fifoMaxWorkersToPopulate\n except AttributeError:\n fifoMaxWorkersToPopulate = 2**32\n try:\n fifoMaxWorkersPerChunk = self.config.fifoMaxWorkersPerChunk\n except AttributeError:\n fifoMaxWorkersPerChunk = 500\n workspec_iterator = self.dbProxy.get_active_workers(fifoMaxWorkersToPopulate, seconds_ago)\n last_queueName = None\n workspec_chunk = []\n timeNow_timestamp = time.time()\n score = timeNow_timestamp\n for workspec in workspec_iterator:\n workspec.set_work_params({'lastCheckAt': timeNow_timestamp})\n if last_queueName is None:\n try:\n score = timegm(workspec.modificationTime.utctimetuple())\n except Exception:\n pass\n workspec_chunk = [[workspec]]\n last_queueName = workspec.computingSite\n elif workspec.computingSite == last_queueName \\\n and len(workspec_chunk) < fifoMaxWorkersPerChunk:\n workspec_chunk.append([workspec])\n else:\n self.put((last_queueName, workspec_chunk), score)\n try:\n score = timegm(workspec.modificationTime.utctimetuple())\n except Exception:\n pass\n workspec_chunk = [[workspec]]\n last_queueName = workspec.computingSite\n if len(workspec_chunk) > 0:\n self.put((last_queueName, workspec_chunk), score)\n\n def to_check_workers(self, check_interval=harvester_config.monitor.checkInterval):\n \"\"\"\n Justify whether to check any worker by the modificationTime of the first worker in fifo\n retVal True if OK to dequeue to check;\n retVal False otherwise.\n Return retVal, overhead_time\n \"\"\"\n mainLog = self.make_logger(_logger, 'id={0}-{1}'.format(self.fifoName, self.get_pid()), method_name='to_check_worker')\n retVal = False\n overhead_time = None\n timeNow_timestamp = time.time()\n peeked_tuple = self.peek(skip_item=True)\n if peeked_tuple is not None:\n # if False:\n # mainLog.warning('False. Got a null object but with score in FIFO')\n # try:\n # obj_gotten = self.get(timeout=1, protective=False, decode_item=False)\n # if obj_gotten is None:\n # mainLog.debug('Got nothing. Skipped')\n # elif obj_gotten.item is None:\n # mainLog.info('Removed a null object')\n # else:\n # self.put(obj_gotten.item, score=obj_gotten.score, encode_item=False)\n # mainLog.debug('Released an non-null object and put it back')\n # except Exception as _e:\n # mainLog.warning('Error when trying to remove a null object: {0} . Skipped'.format(_e))\n score = peeked_tuple.score\n overhead_time = timeNow_timestamp - score\n if overhead_time > 0:\n retVal = True\n if score < 0:\n mainLog.debug('True. Preempting')\n overhead_time = None\n else:\n mainLog.debug('True')\n mainLog.info('Overhead time is {0} sec'.format(overhead_time))\n else:\n mainLog.debug('False. Workers too young to check')\n mainLog.debug('Overhead time is {0} sec'.format(overhead_time))\n else:\n mainLog.debug('False. Got nothing in FIFO')\n return retVal, overhead_time\n","sub_path":"pandaharvester/harvestercore/fifos.py","file_name":"fifos.py","file_ext":"py","file_size_in_byte":14680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"223948892","text":"import os, stat\nallFiles = os.listdir('.')\nscripts = [name for name in allFiles if name.endswith(('.sh', '.py'))]\nprint(scripts)\n\n# 显示文件信息\nprint(os.stat('a.sh'))\n# os.stat('e.py').st_mode 显示文件权限\n# oct(os.stat('e.py').st_mode human reading 显示文件权限\n# os.chmod('e.py', os.stat('e.py'.st_mode) | stat.S_IXUSR) 给用户加执行权限\n","sub_path":"第三章/2nd.py","file_name":"2nd.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"232201801","text":"from weather import Weather, Unit\nimport datetime\nimport click\n\n\ndef get_scale_info(scale):\n if scale == 'c':\n unit = Unit.CELSIUS\n scale_name = 'celcius'\n elif scale == 'f':\n unit = Unit.FAHRENHEIT\n scale_name = 'fahrenheit'\n return unit, scale_name\n\n\ndef forecast_length(forecast):\n split_forecast = forecast.split(\"+\")\n if len(split_forecast) == 1:\n forecast_len = 0\n else:\n forecast_len = int(split_forecast[1])\n return forecast_len\n\n\ndef print_weather_by_city(city, forcasts_list, scale_name, forecast_len = 0):\n print('The weather in {0} today is {1} with temperatures trailing from {2} - {3} {4}'\n .format(city, forcasts_list[0].text, forcasts_list[0].low, forcasts_list[0].high, scale_name))\n\n if forecast_len > 0:\n print('Forecast for the next {0} days:'.format(forecast_len))\n for i in range(1,forecast_len + 1):\n print('{0} {1} with temperatures trailing from {2} - {3} {4}'\n .format(forcasts_list[i].date, forcasts_list[i].text, forcasts_list[i].low, forcasts_list[i].high, scale_name))\n\n\n@click.command()\n@click.option('--city', help='Name of the city')\n@click.option('--forecast', default='TODAY', help='TODAY or TODAY+n for future forecast, where n = [1-9]',show_default=True)\n@click.option('--scale', type=click.Choice(['c', 'f']), default='c', help='Temperature unit')\n\ndef main(city, forecast, scale):\n unit, scale_name = get_scale_info(scale)\n weather = Weather(unit)\n city_weather_info = weather.lookup_by_location(city)\n forecasts_list = city_weather_info.forecast\n forecast_len = forecast_length(forecast)\n print_weather_by_city(city, forecasts_list, scale_name, forecast_len)\n\nif __name__ == '__main__':\n main()","sub_path":"home-assignments/session2/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"161121974","text":"import numpy as np\nfrom vision import *\nimport cv2\nimport matplotlib.pyplot as plt\n\nclass CameraPlayer():\n\t\"\"\" \n\tPlayer object that deals with given image and Amazon logics.\n\tNote that the board matrix coordination is different from Amazon\n\tboard, i.e., the rows are flipped.\n\tAlso, note that the board shown on PC is different from real world\n\tboard, i.e., the cols are flipped.\n\t\"\"\"\n\tdef __init__(self, params, mac, edge, delay=2):\n\t\t\"\"\"\n\t\t:param: PARAMS: Classifier parameters.\n\t\t:param: MAC: MAC address.\n\t\t:param: EDGE: Edge coordinations.\n\t\t:param: DELAY: Delay time for camera capturing.\n\t\t\"\"\"\n\t\tself.params = params\n\t\tself.video = 'http://admin:admin@%s:8081' % mac\n\t\tself.delay = delay\n\t\tself.board = np.zeros((SIZE, SIZE))\n\t\tself.edge = edge\n\t\t# BOARD init. Keep it synchronized with Java.\n\t\tif SIZE == 10:\n\t\t\tself.board[6][0] = 1\n\t\t\tself.board[6][9] = 1\n\t\t\tself.board[9][3] = 1\n\t\t\tself.board[9][6] = 1\n\t\t\tself.board[3][0] = 2\n\t\t\tself.board[3][9] = 2\n\t\t\tself.board[0][3] = 2\n\t\t\tself.board[0][6] = 2\n\t\telse:\n\t\t\tif SIZE != 7:\n\t\t\t\tprint('error: unsupported SIZE encountered')\n\t\t\t\texit(-1)\n\t\t\tself.board[2][0] = 2\n\t\t\tself.board[2][6] = 2\n\t\t\tself.board[0][2] = 2\n\t\t\tself.board[0][4] = 2\n\t\t\tself.board[4][0] = 1\n\t\t\tself.board[4][6] = 1\n\t\t\tself.board[6][2] = 1\n\t\t\tself.board[6][4] = 1\n\t\tself.turn = 0 # Not used.\n\t\t\n\tdef get_img(self):\n\t\tdelay = self.delay\n\t\tcapture = cv2.VideoCapture(self.video)\n\t\tprint('delay for %f seconds...' % delay)\n\t\tcv2.namedWindow('camera')\n\t\tdelay = delay * 1000\n\t\tperiod = 0\n\t\twhile period < delay:\n\t\t\tsuccess, img = capture.read() # Have to read it continuously \n\t\t\t # to keep the camera activated,\n\t\t\t # else the picture is blackened.\n\t\t\tif not success:\n\t\t\t\tprint('video capturing failed')\n\t\t\t\tcapture.release()\n\t\t\t\texit(0)\n\t\t\tcv2.imshow('camera', img)\n\t\t\tcv2.waitKey(100)\n\t\t\tperiod += 100\n\t\tcv2.destroyWindow('camera')\n\t\tsuccess, img = capture.read()\n\t\tif not success:\n\t\t\tprint('video capturing failed')\n\t\t\tcapture.release()\n\t\t\texit(0)\n\t\tcapture.release()\n\t\treturn img\n\t\t\n\t\t\n\tdef parse_img(self, img):\n\t\t\"\"\"\n\t\tReturns a board matrix given a image.\n\t\t\"\"\"\n\t\tboard = find_board(img, thresh=(115, 170), edge=self.edge, test=False)\n\t\tddw = self.params['ddw']\n\t\tddh = self.params['ddh']\n\t\tbins = self.params['bins']\n\t\though_vote = self.params['hough_vote']\n\t\tvectors = self.params['vectors']\n\t\tcls = CellClassifier(board, ddw=ddw, ddh=ddh, bins=bins, \n\t\t hough_vote=hough_vote, vectors=vectors)\n\t\tcls.parse_board()\n\t\treturn cls.board\n\t\t\n\tdef update_board(self, movement):\n\t\t\"\"\" \n\t\tUpdate my board given a movement. Check the movement before using it.\n\t\t\"\"\"\n\t\tstart, temp = movement.split('-')\n\t\tend, spear = temp.split('(')\n\t\tspear = spear[:-1]\n\t\t\n\t\tx, y = self.parse_string(start)\n\t\tside = int(self.board[SIZE - 1 - y][x])\n\t\tif side != 1 and side != 2: # Start is not a queen.\n\t\t\tprint('Illegal movement!')\n\t\t\texit(-1)\n\t\tself.board[SIZE - 1 - y][x] = 0\n\t\tx, y = self.parse_string(end)\n\t\tself.board[SIZE - 1 - y][x] = side\n\t\tx, y = self.parse_string(spear)\n\t\tself.board[SIZE - 1 - y][x] = 3\n\t\treturn movement\n\t\t\n\tdef check_legal_move(self, move):\n\t\t\"\"\"\n\t\tA movement is legal iff start to end is unblocked and \n\t\tend to spear is unblocked given start as empty.\n\t\t\"\"\"\n\t\tstart, temp = move.split('-')\n\t\tend, spear = temp.split('(')\n\t\tspear = spear[:-1]\n\t\tstart = self.parse_string(start)\n\t\tend = self.parse_string(end)\n\t\tspear = self.parse_string(spear)\n\t\treturn self.isUnblockedMove(start, end, None) \\\n\t\t and self.isUnblockedMove(end, spear, start)\n\t\n\tdef parse_board(self, board, me=1):\n\t\t\"\"\"\n\t\tME represents human player.\n\t\tReturns iff the new movement is legal and the parsed movement.\n\t\tThe movement is parsed by simply comparing the differences after \n\t\tmaking sure the board is legal.\n\t\tNote that SELF.BOARD is the old one, while BOARD is the one prepared to update.\n\t\t\"\"\"\n\t\topponent = 1 if me == 2 else 2\n\t\tif board[board == me].shape[0] != 4 \\\n\t\t or board[board == opponent].shape[0] != 4:\n\t\t\tprint('queens lost!')\n\t\t\treturn False, None\n\t\t# Old spears should be left unmoved.\n\t\tif not np.all(board[self.board == 3] == 3):\n\t\t\tprint('spear inconsistent!')\n\t\t\treturn False, None\n\t\t# Opponent should stay unmoved.\n\t\tif not np.all(board[self.board == opponent] == opponent):\n\t\t\tprint('opponent inconsistent!')\n\t\t\treturn False, None\n\t\t# Find new spear.\n\t\tspears = list(np.argwhere(board == 3)) # Bugs happen if it is an nparray instead of list.\n\t\told_spears = list(np.argwhere(self.board == 3))\n\t\tspears = [[x[0], x[1]] for x in spears]\n\t\told_spears = [[x[0], x[1]] for x in old_spears]\n\t\tspear = None\n\t\tdifference = 0\n\t\tfor sp in spears:\n\t\t\tif sp not in old_spears:\n\t\t\t\tspear = sp\n\t\t\t\tdifference += 1\n\t\tif difference != 1:\n\t\t\tprint('spear num error! difference:', difference)\n\t\t\treturn False, None\n\t\t# Find the moved queen.\t\t\t\n\t\tmy_queens = list(np.argwhere(board == me))\n\t\tmy_old_queens = list(np.argwhere(self.board == me))\n\t\tmy_queens = [[x[0], x[1]] for x in my_queens]\n\t\tmy_old_queens = [[x[0], x[1]] for x in my_old_queens]\n\t\tdifference = 0\n\t\tend = None\n\t\tfor q in my_queens:\n\t\t\tif q not in my_old_queens:\n\t\t\t\tend = q\n\t\t\t\tdifference += 1\n\t\tif difference > 1:\n\t\t\tprint('you CHEATED!')\n\t\t\treturn False, None\n\t\t\t\n\t\tstart = None\n\t\tfor q in my_old_queens:\n\t\t\tif q not in my_queens:\n\t\t\t\tstart = q\n\t\tassert end and start and spear\n\t\t\n\t\t### transform coordination ###\n\t\tstart = [start[1], SIZE - 1 - start[0]]\n\t\tend = [end[1], SIZE - 1 - end[0]]\n\t\tspear = [spear[1], SIZE - 1 - spear[0]]\n\t\t### encode movement ###\n\t\tstart_str = chr(start[0] + 97) + str(start[1] + 1)\n\t\tend_str = chr(end[0] + 97) + str(end[1] + 1)\n\t\tspear_str = chr(spear[0] + 97) + str(spear[1] + 1)\n\t\tmove = start_str + '-' + end_str + '(' + spear_str + ')'\n\t\tresult = self.check_legal_move(move)\n\t\tif result:\n\t\t\treturn True, move\n\t\treturn False, None\n\t\t\n\tdef play(self, img=None, software=False):\n\t\t\"\"\"\n\t\t:param: IMG: Use the img instead of camera capturing if it is not none.\n\t\t:param: SOFTWARE: Manually input the movement iff true.\n\t\t\"\"\"\n\t\tif software:\n\t\t\tmove = input('Pure PC mode:')\n\t\t\tif self.check_legal_move(move): return self.update_board(move)\n\t\t\telse:\n\t\t\t\tprint('illegal input, retrying')\n\t\t\t\treturn self.play(img, software)\n\t\tif img is None:\n\t\t\timg = self.get_img()\n\t\t\t\n\t\tboard = self.parse_img(img)\n\t\tlegal, move = self.parse_board(board)\n\t\tif not legal:\n\t\t\tplt.subplot(1, 2, 1)\n\t\t\tplt.imshow(self.board)\n\t\t\tplt.subplot(1, 2, 2)\n\t\t\tplt.imshow(board)\n\t\t\tplt.show()\n\t\t\tcmd = input('how to deal with it: (manual / retry)')\n\t\t\tif cmd == 'retry': \n\t\t\t\timg = self.get_img()\n\t\t\t\treturn self.play(img, software)\n\t\t\telif cmd == 'manual':\n\t\t\t\tmove = input('type your movement here:')\n\t\t\t\tif self.check_legal_move(move): return self.update_board(move)\n\t\t\t\telse:\n\t\t\t\t\timg = self.get_img()\n\t\t\t\t\tprint('illegal input, retrying')\n\t\t\t\t\treturn self.play(img, software)\n\t\t\telif cmd == 'quit': exit(0)\n\t\t\telse:\n\t\t\t\tprint('illegal input, retrying')\n\t\t\t\timg = self.get_img()\n\t\t\t\treturn self.play(img, software)\n\t\treturn self.update_board(move)\n\t\t \n ### Adapted from Java program, these are lower level functions ###\t\t\n\tdef parse_string(self, string):\n\t\tx = ord(string[0]) - ord('a')\n\t\ty = int(string[1:]) - 1\n\t\treturn x, y\n\t\t\t\n\tdef isUnblockedMove(self, start, end, asempty=None):\n\t\tcolor = 0\n\t\tif asempty:\n\t\t\tcolor = self.board[SIZE - 1 - asempty[1]][asempty[0]]\n\t\t\tself.board[SIZE - 1 - asempty[1]][asempty[0]] = 0\n\t\tresult = self.isLegal(start, end)\n\t\tif asempty:\n\t\t\tself.board[SIZE - 1 - asempty[1]][asempty[0]] = color\n\t\treturn result\n\t\t\n\tdef isLegal(self, start, end):\n\t\t\"\"\"\n\t\tCheck if routes from start to end is unblocked.\n\t\t\"\"\"\n\t\tdirections = [\n\t\t [0, 1], [1, 1], [1, 0], [1, -1],\n\t\t [0, -1], [-1, -1], [-1, 0], [-1, 1]\n\t\t] # [dx, dy]\n\t\tdx, dy = [e - s for e, s in zip(end, start)]\n\t\tdx = dx / abs(dx) if dx != 0 else 0\n\t\tdy = dy / abs(dy) if dy != 0 else 0\n\t\tdir = -1\n\t\tfor i in range(8):\n\t\t\tif [dx, dy] == directions[i]: \n\t\t\t\tdir = i\n\t\t\t\tbreak\n\t\tif dir == -1 or self.check_beset(start) is False:\n\t\t\treturn False\n\t\tcolor = self.board[SIZE - 1 - start[1]][start[0]]\n\t\tminx, maxx = min(start[0], end[0]), max(start[0], end[0])\n\t\tminy, maxy = min(start[1], end[1]), max(start[1], end[1])\n\t\tself.board[SIZE - 1 - start[1]][start[0]] = 0 #temporally set it empty\n\t\tresult = True\n\t\tif dir % 4 == 0:\n\t\t\tfor i in range(miny, maxy + 1):\n\t\t\t\tif self.board[SIZE - 1 - i][minx] != 0:\n\t\t\t\t\tresult = False\n\t\telif dir % 4 == 2:\n\t\t\tfor i in range(minx, maxx + 1):\n\t\t\t\tif self.board[SIZE - 1 - miny][i] != 0:\n\t\t\t\t\tresult = False\n\t\telif dir % 4 == 1:\n\t\t\tfor i, j in zip(range(minx, maxx + 1), range(miny, maxy + 1)):\n\t\t\t\tif self.board[SIZE - 1 - j][i] != 0:\n\t\t\t\t\tresult = False\n\t\telif dir % 4 == 3:\n\t\t\tfor i, j in zip(range(minx, maxx + 1), \n\t\t\t reversed(range(miny, maxy + 1))):\n\t\t\t\tif self.board[SIZE - 1 - j][i] != 0:\n\t\t\t\t\tresult = False\n\t\tself.board[SIZE - 1 - start[1]][start[0]] = color\n\t\treturn result\n\t\t\n\t\n\tdef check_beset(self, pos):\n\t\t\"\"\"\n\t\tCheck if POS is surrounded by solid cells.\n\t\t\"\"\"\n\t\tx, y = pos\n\t\tnum = 0\n\t\tblocks = 0\n\t\tfor i in range(max(0, x-1), min(SIZE, x+2)):\n\t\t\tfor j in range(max(0, y-1), min(SIZE, y+2)):\n\t\t\t\tif i == x and j == y: continue\n\t\t\t\tnum += 1\n\t\t\t\tif self.board[SIZE - 1 - j][i] != 0:\n\t\t\t\t\tblocks += 1\n\t\tif blocks == num: return False\n\t\treturn True\n\ndef test(cam): # Just for testing, don't mind.\n\t# test 1: pass\n\t#img = cv2.imread('../python_vision/2/frames_2.jpg')\n\t#board = cam.parse_img(img)\n\t#plt.subplot(1, 2, 1)\n\t#plt.imshow(cam.board)\n\t#plt.subplot(1, 2, 2)\n\t#plt.imshow(board)\n\t#plt.show()\n\t# test 2:\n\timg = cv2.imread('../python_vision/2/frames_3.jpg')\n\tboard = cam.parse_img(img)\n\tlegal, move = cam.parse_board(board, me=1)\n\tprint(legal, move)\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cam.board)\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(board)\n\tplt.show()\n\tcam.update_board(move)\n\timg = cv2.imread('../python_vision/2/frames_4.jpg')\n\tboard = cam.parse_img(img)\n\tlegal, move = cam.parse_board(board, me=2)\n\tprint(legal, move)\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cam.board)\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(board)\n\tplt.show()\n\tcam.update_board(move)\n\timg = cv2.imread('../python_vision/2/frames_5.jpg')\n\tboard = cam.parse_img(img)\n\tlegal, move = cam.parse_board(board, me=1)\n\tprint(legal, move)\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cam.board)\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(board)\n\tplt.show()\n\tcam.update_board(move)\n\timg = cv2.imread('../python_vision/2/frames_6.jpg')\n\tboard = cam.parse_img(img)\n\tlegal, move = cam.parse_board(board, me=2)\n\tprint(legal, move)\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cam.board)\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(board)\n\tplt.show()\n\tcam.update_board(move)\n\timg = cv2.imread('../python_vision/2/frames_7.jpg')\n\tboard = cam.parse_img(img)\n\tlegal, move = cam.parse_board(board, me=1)\n\tprint(legal, move)\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cam.board)\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(board)\n\tplt.show()\n\tcam.update_board(move)\n\timg = cv2.imread('../python_vision/2/frames_8.jpg')\n\tboard = cam.parse_img(img)\n\tlegal, move = cam.parse_board(board, me=2)\n\tprint(legal, move)\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cam.board)\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(board)\n\tplt.show()\n\tcam.update_board(move)\n\timg = cv2.imread('../python_vision/2/frames_9.jpg')\n\tboard = cam.parse_img(img)\n\tlegal, move = cam.parse_board(board, me=1)\n\tprint(legal, move)\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cam.board)\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(board)\n\tplt.show()\n\tcam.update_board(move)\n\timg = cv2.imread('../python_vision/2/frames_10.jpg')\n\tboard = cam.parse_img(img)\n\tlegal, move = cam.parse_board(board, me=2)\n\tprint(legal, move)\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cam.board)\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(board)\n\tplt.show()\n\tcam.update_board(move)\n\timg = cv2.imread('../python_vision/2/frames_11.jpg')\n\tboard = cam.parse_img(img)\n\tlegal, move = cam.parse_board(board, me=1)\n\tprint(legal, move)\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cam.board)\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(board)\n\tplt.show()\n\tcam.update_board(move)\n\timg = cv2.imread('../python_vision/2/frames_12.jpg')\n\tboard = cam.parse_img(img)\n\tlegal, move = cam.parse_board(board, me=2)\n\tprint(legal, move)\n\tplt.subplot(1, 2, 1)\n\tplt.imshow(cam.board)\n\tplt.subplot(1, 2, 2)\n\tplt.imshow(board)\n\tplt.show()\n\tcam.update_board(move)\n\nif __name__ == '__main__':\n\tvision_params = {\n\t 'ddw': 0,\n\t 'ddh': 0,\n\t 'bins': 16,\n\t 'hough_vote': 15,\n\t 'vectors': None # this must be changed\n\t}\n\tcam = CameraPlayer(vision_params, '0.0.0.0', (170, 224, 392, 461))\n\ttest(cam)\n\t\n","sub_path":"amazons/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":12453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"60464023","text":"#!/usr/bin/env python\nfrom bs4 import BeautifulSoup\nimport urllib2\nimport csv\nimport json\n\ndef load_json(file):\n with open(file) as data_file:\n data = json.load(data_file)\n return data\n\ndef hit_wiki():\n pres_dict = load_json('../data/presidents.json')\n failures = []\n for p in pres_dict:\n p = clean_pres(p)\n try:\n slug = p['president'].replace(' ', '_')\n url = 'http://en.wikipedia.org/wiki/'+slug\n usock = urllib2.urlopen(url)\n html_data = usock.read()\n usock.close()\n soup = BeautifulSoup(html_data)\n images = soup.find(\"table\", class_=\"infobox vcard\").find_all('img')\n p['image'] = images[0].get('src').replace('//', '')\n except:\n failures.append(p['president'])\n\n dump_to_json(pres_dict)\n\ndef clean_pres(p):\n if p[\"president\"] == \"George Bush\":\n p[\"president\"] = \"George H. W. Bush\"\n return p\n\ndef dump_to_json(data_dict):\n with open('../data/presidents_img.json', 'w') as outfile:\n json.dump(data_dict, outfile)\n\nhit_wiki()\n","sub_path":"python/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"609881541","text":"import torch\nfrom typing import List, Tuple, Optional\nfrom .adam import Adam\n\ndef get_group_params(\n named_parameters: List[Tuple[str, torch.nn.Parameter]],\n weight_decay: float,\n no_decay: Optional[List[str]] = None\n):\n \"\"\"\n package the parameters in 2 groups for proper weight decay\n :param named_parameters: named parameters list\n :param weight_decay: weight decay to use\n :param no_decay: list of parameter with no decay\n :return:\n \"\"\"\n optimizer_grouped_parameters = [\n dict(\n params=[p for n, p in named_parameters if not any(nd in n for nd in no_decay)],\n weight_decay=weight_decay\n ), dict(\n params=[p for n, p in named_parameters if any(nd in n for nd in no_decay)],\n weight_decay=0.\n )\n ]\n return optimizer_grouped_parameters\n\ndef get_optimizer(**kwargs) -> torch.optim.Optimizer:\n method = kwargs.get(\"method\")\n params = kwargs.get(\"params\")\n\n if method == \"sgd\":\n optim_cls = torch.optim.sgd.SGD\n optim_params = dict(\n params=params,\n lr=kwargs.get(\"lr\", 0.001)\n )\n elif method == \"rmsprop\":\n optim_cls = torch.optim.rmsprop.RMSprop\n optim_params = dict(\n params=params,\n lr=kwargs.get(\"lr\", 0.001),\n momentum=kwargs.get(\"momentum\", 0.)\n )\n elif method == \"adam\":\n optim_cls = Adam\n optim_params = dict(\n params=params,\n lr=kwargs.get(\"lr\", 0.001),\n )\n else:\n raise NotImplementedError(f\"method {method} not implemented yet\")\n\n return optim_cls(**optim_params)","sub_path":"src/utils/optim/optimizers.py","file_name":"optimizers.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"313345563","text":"\"\"\"Implementation of Pollsters for ebay.\"\"\"\n__author__ = 'mizeng'\n\nfrom ceilometer.compute import plugin\nfrom ceilometer.compute.pollsters import util\nfrom ceilometer_ebay.compute.virt import inspector as virt_inspector\nfrom ceilometer.openstack.common.gettextutils import _ # noqa\nfrom ceilometer.openstack.common import log\nfrom ceilometer import sample\n\nLOG = log.getLogger(__name__)\n\n\nclass CPUPollster(plugin.ComputePollster):\n\n def get_samples(self, manager, cache, resources):\n for instance in resources:\n LOG.info(_('checking instance %s'), instance.id)\n try:\n cpu_info = manager.inspector.inspect_cpus(instance.id)\n LOG.info(_(\"CPUTIME USAGE: %(instance)s %(time)d\"),\n {'instance': instance.__dict__,\n 'time': cpu_info.time})\n cpu_num = {'cpu_number': cpu_info.number}\n yield util.make_sample_from_instance(\n instance,\n name='cpu',\n type=sample.TYPE_CUMULATIVE,\n unit='ns',\n volume=cpu_info.time,\n additional_metadata=cpu_num,\n )\n except virt_inspector.InstanceNotFoundException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('Exception while getting samples %s'), err)\n except NotImplementedError:\n # Selected inspector does not implement this pollster.\n LOG.debug(_('Obtaining CPU time is not implemented for %s'\n ), manager.inspector.__class__.__name__)\n except Exception as err:\n LOG.error(_('could not get CPU time for %(id)s: %(e)s') % (\n {'id': instance.id, 'e': err}))\n LOG.exception(err)\n\n\nclass CPUUtilPollster(plugin.ComputePollster):\n\n def get_samples(self, manager, cache, resources):\n self._inspection_duration = self._record_poll_time()\n for instance in resources:\n LOG.debug(_('Checking CPU util for instance %s'), instance.id)\n try:\n cpu_info = manager.inspector.inspect_cpu_util(\n instance, self._inspection_duration)\n LOG.debug(_(\"CPU UTIL: %(instance)s %(util)d\"),\n ({'instance': instance.__dict__,\n 'util': cpu_info.util}))\n yield util.make_sample_from_instance(\n instance,\n name='cpu_util',\n type=sample.TYPE_GAUGE,\n unit='%',\n volume=cpu_info.util,\n )\n except virt_inspector.InstanceNotFoundException as err:\n # Instance was deleted while getting samples. Ignore it.\n LOG.debug(_('Exception while getting samples %s'), err)\n except NotImplementedError:\n # Selected inspector does not implement this pollster.\n LOG.debug(_('Obtaining CPU Util is not implemented for %s'\n ), manager.inspector.__class__.__name__)\n except Exception as err:\n LOG.error(_('Could not get CPU Util for %(id)s: %(e)s'), (\n {'id': instance.id, 'e': err}))\n","sub_path":"ceilometer_ebay/compute/pollsters/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"177205194","text":"# -*- coding: utf-8 -*-\n\nfrom nose.tools import raises\n\nfrom rod.parser import ParserConf as conf\nfrom rod.exception import ParserConfFileNotFound\n\n\ndef test_parser_conf_init():\n conf()\n\n assert conf._ParserConf__conf_instance is not None\n assert conf.get('xpath.maintenance_msg') is not None\n\n cf = conf.get_config()\n assert cf.get('xpath', 'maintenance_msg') is not None\n\n\n\"\"\"\nDefault config file is conf/parser.ini\n\"\"\"\ndef test_parser_conf_no_conf_file():\n\n conf('conf/parser.ini')\n assert conf._ParserConf__conf_instance is not None\n\n\n\"\"\"\nWhen invalid file path is given,\nParserConf raises ParserConfFileNotFound exception.\n\"\"\"\n@raises(ParserConfFileNotFound)\ndef test_parser_conf_no_conf_file():\n conf('')\n","sub_path":"tests/test_parser_conf.py","file_name":"test_parser_conf.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"513934309","text":"# !/usr/bin/env python\nimport sys\nsys.path.append(\"/Users/bdaudert/EE/nasa-roses-datastore\")\n\nimport logging\nimport json\nimport urllib2\nimport hashlib\nfrom datetime import datetime\n\nimport ee\n# Needed to add data to datastore from outside app engine\nfrom google.cloud import datastore\n\n\nimport config\nimport Utils\n\n# Set logging level\nlogging.basicConfig(level=logging.DEBUG)\n\n\ndef populate_datastore(region, ds, et_model, compute=True):\n yr_range = config.statics['all_years'][ds]\n for yr in range(int(yr_range[0]), int(yr_range[1])):\n year = str(yr)\n msg = 'PROCESSING Region/Year/Dataset/Model ' + region + '/' + year + '/' + ds + '/' + et_model\n logging.info(msg)\n ET_helper = ET_Util(region, year, ds, et_model)\n data_entities, meta_entities = ET_helper.get_data_and_set_db_entities(compute=compute)\n ET_helper.add_to_db(data_entities)\n ET_helper.add_to_db(meta_entities)\n\n\nclass ET_Util(object):\n '''\n Computes ET statistics for all temporal resolutions\n Args:\n :region Unique ID of geojson obbject, e.g. USFields\n :geoFName geojson file name\n :year year of geojson dataset, might be ALL if not USFields\n USField geojsons change every year\n :dataset MODSI, Landsat or gridMET\n :et_model Evapotranspiration modfel, e.g. SIMS, SSEBop, METRIC\n '''\n def __init__(self, region, year, dataset, et_model):\n self.region = region\n self.year = year\n if self.region in ['Mason', 'US_fields']:\n self.geoFName = region + '_' + year + '_GEOM.geojson'\n else:\n self.geoFName = region + '_GEOM.geojson'\n self.dataset = dataset\n self.et_model = et_model\n self.missing_value = -9999\n self.geo_bucket_url = config.GEO_BUCKET_URL\n self.data_bucket_url = config.DATA_BUCKET_URL\n # Needed to add data to datastore from outside app engine\n self.client = datastore.Client(config.PROJECT_ID)\n\n def read_data_from_bucket(self, fl_path):\n contents = json.load(urllib2.urlopen(fl_path))\n return contents\n\n def get_collection(self, t_res):\n '''\n Gets the ee collection (by name)\n :param dataset: MODIS or LANDSAT\n :param model: et model: SSEBop, SIMS, METRIC etc\n :return: ee.ImageCollection\n '''\n ds = self.dataset\n m = self.et_model\n coll_name = config.statics['ee_coll_name'][ds][m][t_res]\n logging.info('EE CALL: ee.ImageCollection({})'.format(coll_name))\n coll = ee.ImageCollection(coll_name)\n return coll\n\n def filter_coll_by_dates(self, coll, dS_str, dE_str):\n '''\n Gets the ee collection (by nameand tem_res)\n and filters by variable and start/end dates\n\n :param coll ee.ImageCollection\n :param variable:\n \"et\": \"Actual ET\",\n \"etrf\": \"Fractional ET\",\n \"etr\": \"Reference ET\",\n \"pr\": \"Precipitation\"\n :param dS_str start date, format yyyy-mm-dd\n :param dE_str end date, format yyyy-mm-dd\n :return: ee.ImageCollection filtered by variable and dates\n '''\n dS_obj = ee.Date(dS_str, 'GMT')\n dE_obj = ee.Date(dE_str, 'GMT')\n # logging.debug('EE CALL: collection.filterDate({}, {})'\n # .format(dS_str, dE_str))\n f_coll = coll.map(lambda x: x.double())\\\n .filterDate(dS_obj, dE_obj.advance(1, 'day'))\n return f_coll\n\n def filter_coll_by_var(self, coll, variable):\n '''\n Gets the ee collection (by nameand tem_res)\n and filters by variable and start/end dates\n\n :param coll ee.ImageCollection\n :param variable:\n \"et\": \"Actual ET\",\n \"etrf\": \"Fractional ET\",\n \"etr\": \"Reference ET\",\n \"pr\": \"Precipitation\"\n :return: ee.ImageCollection filtered by variable\n '''\n # logging.debug('EE CALL: collection.select({})'.format(variable))\n return coll.select([variable], [variable])\n\n def reduce_collection_to_img(self, coll, stat):\n '''\n Reduces the ee.ImageCollection to a single ee image by applying\n the statistic stat\n\n :param coll ee.ImageCollection\n :param stat statistic: max, min, mean, median\n :return: ee.Image\n '''\n\n if stat == 'Median':\n img = coll.median()\n elif stat == 'Mean':\n img = coll.mean()\n elif stat == 'Max':\n img = coll.max()\n elif stat == 'Min':\n img = coll.min()\n elif stat == 'Total':\n img = coll.sum()\n else:\n img = coll.mean()\n return img\n\n def set_meta_properties(self, geo_props, geom):\n '''\n Populates metadata from the geo properties\n Defined in the geojson data file\n '''\n props = {}\n for prop in config.statics['geo_meta_cols'][self.region]:\n if prop in geo_props.keys():\n props[prop] = geo_props[prop]\n return props\n\n def compute_et_stats(self, coll, var, geom):\n '''\n Computes annual, seasonal (April - Sept) and monthly et stats\n :param coll:\n :param var:\n :param geom:\n :return:\n '''\n def average_over_region(img):\n '''\n Averages the ee.Image over all pixels of ee.Geometry\n '''\n reduced_image_data = img.reduceRegion(\n ee.Reducer.mean(),\n geometry=geom,\n scale=1000,\n tileScale=1,\n crs='EPSG:4326',\n crsTransform=None,\n bestEffort=True\n )\n return ee.Feature(None, reduced_image_data)\n\n etdata = {}\n imgs = []\n for res in config.statics['start_end_mon_days_by_res'].keys():\n # logging.info('PROCESSING STATISTIC ' + res)\n # Filer collection by date\n dS_str = str(self.year) + '-' +\\\n config.statics['start_end_mon_days_by_res'][res][0]\n dE_str = str(self.year) + '-' +\\\n config.statics['start_end_mon_days_by_res'][res][1]\n coll_t = self.filter_coll_by_dates(coll, dS_str, dE_str)\n temporal_stat = config.statics['t_stat_by_var'][var]\n img = self.reduce_collection_to_img(coll_t, temporal_stat)\n # feats = ee.FeatureCollection(average_over_region(img))\n imgs.append(img)\n ee_imgs = ee.ImageCollection(imgs)\n feats = ee.FeatureCollection(ee_imgs.map(average_over_region))\n\n try:\n f_data = feats.getInfo()\n except Exception as e:\n f_data = {'features': []}\n logging.error(e)\n\n for res_idx, res in enumerate(config.statics['start_end_mon_days_by_res'].keys()):\n if 'features' not in f_data.keys() or not f_data['features']:\n etdata['data_' + res] = -9999\n continue\n try:\n feat = f_data['features'][res_idx]\n except:\n etdata['data_' + res] = -9999\n continue\n\n if 'properties' not in feat.keys():\n etdata['data_' + res] = -9999\n continue\n try:\n val = feat['properties'][var + '_' + res]\n etdata['data_' + res] = round(val, 4)\n except:\n etdata['data_' + res] = -9999\n continue\n return etdata\n\n def set_db_data_entity(self, UNIQUE_ID, feat_idx, etdata, var):\n '''\n sets up datastore client and datastore entity belonging to DATA\n Args:\n UNIQUE_ID,: unique identity of the feature, used to define the db key\n f_idx: feature index, need this to query for multiple features\n etdata: dictionary of etdata\n Returns:\n datstore entitity\n '''\n # Instantiates a client and the datastore kind DATA\n db_key = self.client.key('DATA', UNIQUE_ID,)\n entity = datastore.Entity(key=db_key)\n entity.update({\n 'feat_idx': feat_idx,\n 'region': self.region,\n 'year': int(self.year),\n 'dataset': self.dataset,\n 'et_model': self.et_model,\n 'variable': var\n })\n # Set the etdata\n for key, val in etdata.iteritems():\n entity.update({\n key: val\n })\n return entity\n\n def set_db_metadata_entity(self, UNIQUE_ID, feat_idx, meta_props):\n '''\n sets up datastore client and datastore entity belonging to METADATA\n Args:\n UNIQUE_ID,: unique identity of the feature, used to define the db key\n f_idx: feature index, need this to query for multiple features\n etdata: dictionary of etdata\n Returns:\n datstore entitity\n '''\n # Instantiates a client and the datastore kind DATA\n\n db_key = self.client.key('METADATA', UNIQUE_ID, )\n entity = datastore.Entity(key=db_key)\n entity.update({\n 'feat_idx': feat_idx,\n 'region': self.region,\n 'year': int(self.year)\n })\n # Set the metadata\n for key, val in meta_props.iteritems():\n entity.update({\n key: val\n })\n return entity\n\n\n def add_to_db(self, entity_list):\n '''\n Adds multiple data to datastore via the datastore client\n NOTES:\n can be run outside of app engine\n we can only add 500 entries to bd at a time\n '''\n ent_len = len(entity_list)\n num_chunks = ent_len / 500\n if ent_len % 500 != 0:\n end_chunk_len = ent_len % 500\n num_chunks += 1\n num_added = 0\n count = 0\n while num_added < ent_len:\n count +=1\n logging.info('ADDING CHUNK {0} of {1}'.format(str(count), str(num_chunks)))\n start = num_added\n end = start + 500\n if end > ent_len:\n end = start + end_chunk_len\n ents_to_add = entity_list[start:end]\n self.client.put_multi(ents_to_add)\n num_added = end\n\n def get_data_and_set_db_entities(self, compute=True):\n '''\n Gets geo features from geojson file\n and computes the et stats for all variables\n and temporal resolutions\n\n if compute is True, we compute the et stats in EE\n if compute is False, we read the et data from a local\n data file (et data was provided in the data file)\n '''\n # FIX ME: add more vars as data comes online\n # MODIS SSEBop only has et right now\n t_res_list = config.statics['all_t_res']\n var_list = ['et']\n # Read the geo data from the bucket\n fl_path = self.geo_bucket_url + self.geoFName\n geo_data = self.read_data_from_bucket(fl_path)\n if 'features' not in geo_data.keys():\n logging.error('NO DATA FOUND IN BUCKET, FILE: ' + self.geoFName)\n\n data_entities = []\n meta_entities = []\n if not compute:\n # Get the etdata from the local file\n fl_name = self.region + '_' + self.year + '_DATA.json'\n fl_path = self.data_bucket_url + self.et_model + '/' + fl_name\n j_data = self.read_data_from_bucket(fl_path)\n local_etdata = j_data['features']\n else:\n # Get the colllections so we don't have to do it for each feature\n colls = {}\n for t_res in t_res_list:\n coll = self.get_collection(t_res)\n for var in var_list:\n coll = self.filter_coll_by_var(coll, var)\n colls[t_res + '_' + var] = coll\n\n for f_idx, geo_feat in enumerate(geo_data['features']):\n feat_coords = geo_feat['geometry']['coordinates']\n if geo_feat['geometry']['type'] == 'MultiPolygon':\n geom_coords = Utils.orient_polygons_ccw(feat_coords)\n geom = ee.Geometry.MultiPolygon(geom_coords)\n elif geo_feat['geometry']['type'] == 'Polygon':\n geom_coords = [Utils.orient_polygon_ccw(c) for c in feat_coords]\n geom = ee.Geometry.Polygon(geom_coords)\n else:\n continue\n # Add metadata to METADATA Datastore entity\n meta_props = self.set_meta_properties(geo_feat['properties'], geo_feat['geometry'])\n unique_meta_str = ('-').join([self.region, self.year, str(f_idx)])\n UNIQUE_META_ID = hashlib.md5(unique_meta_str).hexdigest()\n meta_entities.append(self.set_db_metadata_entity(UNIQUE_META_ID, str(f_idx), meta_props))\n for var in var_list:\n unique_str = ('-').join([self.region, self.dataset, self.et_model, self.year, var, str(f_idx)])\n UNIQUE_ID = hashlib.md5(unique_str).hexdigest()\n if compute:\n etdata = self.compute_et_stats(coll, var, geom)\n else:\n etdata = {}\n for res in config.statics['start_end_mon_days_by_res'].keys():\n etdata_key = var + '_' + res\n new_key = 'data_' + res\n try:\n etdata[new_key] = local_etdata[f_idx]['properties'][etdata_key]\n except:\n etdata[new_key] = -9999\n data_entities.append(self.set_db_data_entity(UNIQUE_ID, f_idx, etdata, var))\n return data_entities, meta_entities\n\n###################################################\n# M A I N\n###################################################\nif __name__ == \"__main__\":\n startTime = datetime.now()\n print(startTime)\n for region in ['US_states_west_500k', 'US_counties_west_500k']:\n # for region in ['US_states_west_500k']:\n if region == 'Mason':\n # Compute ee stats in EE\n comp = True\n else:\n # use fake data on localhost\n comp = False\n for ds in ['MODIS']:\n for et_model in ['SSEBop']:\n populate_datastore(region, ds, et_model, compute=comp)\n print(datetime.now() - startTime)\n\n\n","sub_path":"mypython/standalone_populate_datastore.py","file_name":"standalone_populate_datastore.py","file_ext":"py","file_size_in_byte":14346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"540637458","text":"import socket,time,datetime,hashlib\r\n'''\r\nSocket is most important module for network binding\r\n'''\r\ns = socket.socket()#Creating a object\r\nprint(\"-------------------Dred-----------------------\")\r\nuserin = input(\"\\n1.New User\\n2.Existing User\\n3.Check History\\n4.Help\\nChoose \")\r\nif userin == \"4\": #For Help\r\n print(\"\\nWhat if you choose New User Option?\\nYou will redirected to a new fresh network between server and client.\\nBut if you choose option 2 then you will be able to access to history of data\\nIf you choose 1 then you will lost your history in you file.\")\r\n print(\"\\nThis works as a server connector to clients where client is strongly connected to server and transmiss data to server\")\r\n exit()\r\nif userin == \"3\": #For accesing history direct from command line.\r\n with open(\"LapyData.txt\",mode=\"r\") as read:\r\n ac = read.read()\r\n print(\"\\nHistory--\")\r\n read.close()\r\n exit()\r\nport = 6380 #Creating a Port\r\ns.bind(('', port))\r\n'''\r\ns.bind with any ip adrress and port of 6380\r\n'''\r\nprint(\"\\nBinded\")\r\ns.listen(5) #Waiting for 5 connections for connect\r\nprint(\"Connecting\")\r\nwhile True: #For New Messages and Connection\r\n c,addr = s.accept()\r\n print(\"\\nGot Connection\",addr,datetime.datetime.now())\r\n if userin == \"2\":\r\n History = ()\r\n with open(\"LapyData.txt\",mode = \"r\") as f: #For Returning One..\r\n f.read()\r\n f.close()\r\n ap = \"Server - Thanks For Connecting\"\r\n c.send(ap.encode())\r\n recieve = str(c.recv(1024))\r\n print(recieve,\"\\n\")\r\n while True:\r\n recieve2 = str(c.recv(1024))\r\n with open(\"LapyData.txt\" ,mode=\"a\") as file:\r\n file.write(\"\\n\"+recieve2+\" \"+str(datetime.datetime.now()))\r\n file.close()\r\n print(recieve2,datetime.datetime.now())\r\n if userin == \"1\":\r\n History = ()\r\n with open(\"LapyData.txt\", mode=\"w\") as f: # For New One One..\r\n f.write(str(History))\r\n f.close()\r\n ap = \"Server - Thanks For Connecting\"\r\n c.send(ap.encode())\r\n recieve = str(c.recv(1024))\r\n print(recieve, \"\\n\")\r\n while True:\r\n recieve2 = str(c.recv(1024))\r\n with open(\"LapyData.txt\", mode=\"a\") as file:\r\n file.write(\"\\n\" + recieve2+\" \"+str(datetime.datetime.now()))\r\n file.close()\r\n print(recieve2, datetime.datetime.now())\r\n c.close()","sub_path":"Client Server.py","file_name":"Client Server.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"609533640","text":"#!/usr/bin/env python3\n\nfrom lexed_to_parsed import *\nfrom lifted_to_rust import *\n\nimport sys\n\nfrom parsed_passes import BINOPS, UNIOPS\n\n\ndef flatten(root, exp, inputs, consts):\n\n def _flatten(exp):\n if type(exp[0]) is list:\n return _flatten(exp[1])\n if exp[0] in ['InputInterval', 'Interval']:\n l = _flatten(exp[1]).replace('[','').replace(']','')\n r = _flatten(exp[2]).replace('[','').replace(']','')\n return \"[{}, {}]\".format(l,r)\n if exp[0] in ['Float', 'Integer']:\n return \"[{}]\".format(exp[1])\n if exp[0] in ['Variable', 'Input']:\n tlist = [t for t in inputs if t[0] == exp[1]]\n if len(tlist) == 1:\n return tlist[0][1]\n else:\n return _flatten(lookup(exp[1]))\n if exp[0] == 'Const':\n return consts[int(exp[1])]\n if exp[0] == 'Return':\n return _flatten(exp[1])\n if exp[0] in ops:\n return \"({}{}{})\".format(_flatten(exp[1]), ops[exp[0]], _flatten(exp[2]))\n if exp[0] in funcs:\n return \"{}({})\".format(funcs[exp[0]], _flatten(exp[1]))\n if exp[0] == 'abs':\n return \"abs({})\".format(_flatten(exp[1]))\n if exp[0] == 'Neg':\n return \"-({})\".format(_flatten(exp[1]))\n if exp[0] == 'pow':\n return \"pow({},{})\".format(_flatten(exp[1]), _flatten(exp[2]))\n if exp[0] == \"ipow\":\n c = consts[int(exp[2][1])]\n return \"pow({},{})\".format(_flatten(exp[1]), c)\n if exp[0] == \"sqrt\":\n return\"sqrt({})\".format(_flatten(exp[1]))\n print(\"Error flattening '{}'\".format(exp))\n sys.exit(-1)\n\n def lookup(var):\n tmp = root\n while tmp[0] != 'Return':\n assign = tmp[0]\n new_var = assign[1][1]\n if new_var == var:\n return assign[2]\n tmp = tmp[1]\n print(\"Invalid lookup: {}\\nIn:{}\\n\".format(var, root))\n sys.exit(-1)\n\n return _flatten(exp)\n\n \n \n\ndef runmain():\n ''' Wrapper to allow constant lifter to run with direct\n command line input '''\n try:\n filename = sys.argv[1]\n with open(filename, 'r') as f:\n data = f.read()\n except IndexError:\n sys.stdout.write('Reading from standard input (type EOF to end):\\n')\n data = sys.stdin.read()\n\n exp = function_parser.parse(data)\n inputs = lift_inputs(exp)\n consts = lift_constants(exp, inputs)\n flattened = flatten(exp, exp, inputs, consts)\n\n print(\"flattened:\")\n print(flattened)\n print()\n print(\"expresions:\")\n while type(exp[0]) is list:\n print(exp[0])\n exp = exp[1]\n print(exp)\n print()\n print(\"inputs:\")\n for i in inputs:\n print(i)\n print()\n print(\"constants:\")\n for c in consts:\n print(c)\n\n# On call run as a util, taking in text and printing the constant lifted version\nif __name__ == \"__main__\":\n runmain()\n","sub_path":"src/frontend/function_transforms/parsed_flatten_pass.py","file_name":"parsed_flatten_pass.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"187802019","text":"# -*- coding: utf-8 -*-\n\n# Preparation for running locally:\n# pip install kaggle numpy pandas tensorflow transformers\n# mkdir -p ~/.kaggle\n# cp kaggle.json ~/.kaggle/\n# ls ~/.kaggle\n# chmod 600 /root/.kaggle/kaggle.json\n# kaggle competitions download -c nlp-getting-started -p input\n\nimport math\nimport os\nimport sys\nimport time\nimport numpy as np\nimport pandas as pd\nimport shutil\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"1\"\nimport tensorflow as tf\nfrom transformers import BertConfig, BertTokenizer, TFBertForSequenceClassification\n\nall = True\nbatch_size = 16\ntokens = 84\ntrain_ratio = .75\nval_ratio = .25\nmax_epochs = 80\nlearning_rate = 2e-5\n\nassert train_ratio + val_ratio <= 1\n\n\ndef read(fname, labeled):\n dtype = {\n \"id\": str,\n #\"keyword\": str,\n \"text\": str,\n #\"location\": str,\n }\n if labeled:\n dtype[\"target\"] = np.int32\n p = os.path.join(os.pardir, \"input\", fname)\n return pd.read_csv(p, dtype=dtype)\n\n\ndef bert_encode_text(df, tokenizer):\n d = tokenizer(text=list(df[\"text\"]),\n padding=\"max_length\",\n truncation=True,\n max_length=tokens,\n return_tensors=\"tf\",\n verbose=1)\n return {\n \"input_ids\": d.input_ids,\n \"attention_mask\": d.attention_mask,\n \"token_type_ids\": d.token_type_ids,\n }\n\n\ndef solve(model, timestamp, train_x, train_y, val_x, val_y, test_x):\n log_dir = os.path.join(os.pardir, \"logs\", timestamp)\n min_val_loss = math.inf\n epochs_with_worse_val_loss = 0\n epochs_since_braking = 0\n test_y = None\n\n def inspect(epoch, logs):\n loss = model.evaluate(train_x, train_y, verbose=0)[0]\n if logs is not None:\n print(f\"\\n\\tloss reported : {logs['loss']:.4f}\")\n print(f\"\\tloss evaluated: {loss:.4f}\")\n\n def settle_down(epoch, logs):\n nonlocal min_val_loss, epochs_with_worse_val_loss, epochs_since_braking, test_y\n val_loss = logs[\"val_loss\"]\n if min_val_loss >= val_loss:\n min_val_loss = val_loss\n epochs_with_worse_val_loss = 0\n if epoch >= 2:\n test_y = np.argmax(model.predict(test_x).logits, axis=1)\n else:\n epochs_with_worse_val_loss += 1\n if epochs_with_worse_val_loss > 2:\n model.stop_training = True\n\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n optimizer = tf.keras.optimizers.Adam(learning_rate,\n epsilon=learning_rate * 1e-03)\n model.compile(loss=loss, optimizer=optimizer)\n model.fit(\n x=train_x,\n y=train_y,\n epochs=max_epochs,\n batch_size=batch_size,\n callbacks=[\n tf.keras.callbacks.TensorBoard(log_dir=log_dir),\n #tf.keras.callbacks.LambdaCallback(on_epoch_end=inspect),\n tf.keras.callbacks.LambdaCallback(on_epoch_end=settle_down),\n ],\n validation_data=(val_x, val_y),\n verbose=1)\n return test_y\n\n\ndef main():\n timestamp = time.strftime(\"%Y%m%d-%H%M\")\n shutil.copyfile(sys.argv[0], timestamp + \".py\")\n\n labeled_df = read(\"train.csv\", labeled=True)\n test_df = read(\"test.csv\", labeled=False)\n\n train_size = int(len(labeled_df) * train_ratio)\n val_size = int(len(labeled_df) * val_ratio)\n if not all:\n train_size = 300\n val_size = 100\n test_df = test_df[:10]\n np.random.seed(int(19680516 * (train_ratio + val_ratio)))\n labeled_pick = np.random.permutation(labeled_df.index)\n train_df = labeled_df.iloc[labeled_pick[:train_size]]\n val_df = labeled_df.iloc[labeled_pick[train_size:train_size + val_size]]\n print(len(train_df), \"samples to train on, bincount\",\n np.bincount(train_df[\"target\"]))\n print(len(val_df), \"samples to validate, bincount\",\n np.bincount(val_df[\"target\"]))\n print(len(test_df), \"samples to test\")\n\n model_name = 'bert-base-multilingual-cased'\n tokenizer = BertTokenizer.from_pretrained(model_name)\n bert_config, unused_kwargs = BertConfig.from_pretrained(\n model_name,\n return_unused_kwargs=True,\n output_attentions=False,\n output_hidden_states=False,\n )\n assert not unused_kwargs, unused_kwargs\n train_x = bert_encode_text(train_df, tokenizer)\n val_x = bert_encode_text(val_df, tokenizer)\n test_x = bert_encode_text(test_df, tokenizer)\n train_y = train_df[\"target\"].to_numpy()\n val_y = val_df[\"target\"].to_numpy()\n\n strategy = tf.distribute.get_strategy()\n with strategy.scope():\n bert_inputs = [\n tf.keras.Input(name=name,\n shape=(tokens, ),\n dtype=tf.int32,\n ragged=True) for name in tokenizer.model_input_names\n ]\n assert bert_config.num_labels == 2\n bert_model = TFBertForSequenceClassification(config=bert_config)\n bert_output = bert_model(bert_inputs)\n model = tf.keras.Model(inputs=bert_inputs, outputs=[bert_output])\n model.summary()\n test_y = solve(model=model,\n timestamp=timestamp,\n train_x=train_x,\n train_y=train_y,\n val_x=val_x,\n val_y=val_y,\n test_x=test_x)\n\n submission = test_df[[\"id\"]].assign(target=test_y)\n submission.to_csv(\"submission.csv\", index=False)\n\n\nmain()\n","sub_path":"submission8.py","file_name":"submission8.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"272233284","text":"#!usr/bin/env python3\n #run this in any directory add -v for verbose \n#get Pillow (fork of PIL) from pip before running --> pip install Pillow\n\nimport os\nimport sys\nfrom PIL import Image\nimport shutil\n\n\nsave_path = \"d_C2/\"# change here accordingly\n\ndef compressMe(file, verbose=False):\n\tglobal save_path\n\tfilepath = os.path.join(sys.argv[1], file)\n\toldsize = os.stat(filepath).st_size\n\tprint(\"old:\",oldsize)\n\tpicture = Image.open(filepath)\n\tdim = picture.size\n\t\n\t#set quality= to the preferred quality. \n\t#I found that 85 has no difference in my 6-10mb files and that 65 is the lowest reasonable number\n\tpicture.save(save_path+file,\"JPEG\",optimize=True,quality= 85)\n\t\n\tnewsize = os.stat(save_path+file).st_size\n\t#print(\"new:\",newsize)\n\tpercent = (oldsize-newsize)/float(oldsize)*100\n\tif (verbose):\n\t\tprint(\"File compressed from {0} to {1} or {2}%\".format(oldsize,newsize,percent))\n\treturn percent\n\ndef main():\n\tverbose = False\n\t#checks for verbose flag\n\tif (len(sys.argv)>1):\n\t\tif (sys.argv[1].lower()==\"-v\"):\n\t\t\tverbose = True\n\n\tif not os.path.exists(save_path):\n\t\tos.mkdir(save_path)\n\ttot = 0\n\tnum = 0\n\t\n\tfor file in os.listdir(sys.argv[1]):\n\t\tif os.path.splitext(file)[1].lower() in ('.jpg', '.jpeg'):\n\t\t\tnum += 1\n\t\t\tif os.stat(sys.argv[1]+'/'+file).st_size >2500000:#only compress image greater than 2.5mb\n\t\t\t\ttot += compressMe(file, verbose)\n\t\t\telse:\n\t\t\t\tshutil.copy(sys.argv[1]+'/'+file, save_path+file)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"Project/Code/utils/image_compression.py","file_name":"image_compression.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"344241281","text":"import machine\nimport utime as time\n\nhall_sensor_a = machine.ADC(0) #ADC connector\nhall_sensor_d = machine.Pin(4, machine.Pin.IN, machine.Pin.PULL_UP)\n\nnetworkpin = machine.Pin(2, machine.Pin.OUT)\nnetworkpin.off()\n\nwhile True:\n print('DBG: sensor D(): {}, '\n 'sensor A(): {} '\n 'at: {}'.format(hall_sensor_d.value(),\n hall_sensor_a.read(),\n time.time()))\n networkpin.off()\n time.sleep(0.1)\n if ((hall_sensor_d.value() == 0)or(hall_sensor_a.read() < 1024)):\n networkpin.on()\n\n","sub_path":"demo-app/demo-app-010/testing/syncholl01/sensortest.py","file_name":"sensortest.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"404159475","text":"# =============================================================================\n# Author: Shuo Zhou, szhou20@sheffield.ac.uk\n# Haiping Lu, h.lu@sheffield.ac.uk or hplu@ieee.org\n# =============================================================================\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef _none2dict(kwarg):\n if kwarg is None:\n return {}\n else:\n return kwarg\n\n\ndef plot_weights(\n weight_img, background_img=None, color_marker_pos=\"rs\", color_marker_neg=\"gs\", im_kwargs=None, marker_kwargs=None\n):\n \"\"\"Visualize model weights\n\n Args:\n weight_img (array-like): Model weight/coefficients in 2D, could be a 2D slice of a 3D or higher order tensor.\n background_img (array-like, optional): 2D background image. Defaults to None.\n color_marker_pos (str, optional): Color and marker for weights in positive values. Defaults to red \"rs\".\n color_marker_neg (str, optional): Color and marker for weights in negative values. Defaults to blue \"gs\".\n im_kwargs (dict, optional): Key word arguments for background images. Defaults to None.\n marker_kwargs (dict, optional): Key word arguments for background images. Defaults to None.\n\n Returns:\n [matplotlib.figure.Figure]: Figure to plot.\n \"\"\"\n if type(weight_img) != np.ndarray:\n weight_img = np.array(weight_img)\n if len(weight_img.shape) != 2:\n raise ValueError(\n \"weight_img is expected to be a 2D matrix, but got an array in shape %s\" % str(weight_img.shape)\n )\n im_kwargs = _none2dict(im_kwargs)\n marker_kwargs = _none2dict(marker_kwargs)\n fig = plt.figure()\n ax = fig.add_subplot()\n if background_img is not None:\n ax.imshow(background_img, **im_kwargs)\n weight_img[np.where(background_img == 0)] = 0\n\n weight_pos_coords = np.where(weight_img > 0)\n weight_neg_coords = np.where(weight_img < 0)\n\n ax.plot(weight_pos_coords[1], weight_pos_coords[0], color_marker_pos, **marker_kwargs)\n ax.plot(weight_neg_coords[1], weight_neg_coords[0], color_marker_neg, **marker_kwargs)\n\n return fig\n\n\ndef plot_multi_images(images, n_cols=10, n_rows=None, marker_locs=None, im_kwargs=None, marker_kwargs=None):\n \"\"\"Plot multiple images with markers in one figure.\n\n Args:\n images (array-like): Images to plot, shape(n_samples, dim1, dim2)\n n_cols (int, optional): Number of columns for plotting multiple images. Defaults to 10.\n n_rows (int, optional): Number of rows for plotting multiple images. If None, n_rows = n_samples / n_cols.\n marker_locs (array-like, optional): Locations of markers, shape (n_samples, 2 * n_markers). Defaults to None.\n im_kwargs (dict, optional): Key word arguments for plotting images. Defaults to None.\n marker_kwargs (dict, optional): Key word arguments for background images. Defaults to None.\n\n Returns:\n [matplotlib.figure.Figure]: Figure to plot.\n \"\"\"\n if n_rows is None:\n n_rows = int(images.shape[0] / n_cols) + 1\n im_kwargs = _none2dict(im_kwargs)\n marker_kwargs = _none2dict(marker_kwargs)\n fig = plt.figure(figsize=(20, 36))\n\n for i in range(images.shape[0]):\n fig.add_subplot(n_rows, n_cols, i + 1)\n plt.axis(\"off\")\n plt.imshow(images[i, ...], **im_kwargs)\n if marker_locs is not None:\n coords = marker_locs[i, :].reshape((-1, 2))\n n_landmark = coords.shape[0]\n for j in range(n_landmark):\n ix = coords[j, 0]\n iy = coords[j, 1]\n plt.plot(ix, iy, **marker_kwargs)\n plt.title(i + 1)\n\n return fig\n","sub_path":"kale/interpret/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"181879283","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.python.framework import ops\n\ndef binaryRound(x):\n \"\"\"\n Rounds a tensor whose values are in [0,1] to a tensor with values in {0, 1},\n using the straight through estimator for the gradient.\n ref: https://r2rt.com/binary-stochastic-neurons-in-tensorflow.html\n \"\"\"\n g = tf.get_default_graph()\n\n with ops.name_scope(\"BinaryRound\") as name:\n with g.gradient_override_map({\"Round\": \"Identity\"}):\n return tf.round(x, name=name)\n\n#As per Xaiver init, this should be 2/n(input), though many different initializations can be tried. \ndef init_weights(shape,stddev=.1):\n \"\"\" Weight initialization \"\"\"\n weights = tf.random_normal(shape, stddev=stddev)\n return tf.Variable(weights)\n\ndef init_bias(shape, stddev=.1):\n \"\"\" Bias initialization \"\"\"\n biases = tf.random_normal([shape], stddev=stddev)\n return tf.Variable(biases)\n\ndef save_weights(weights,biases,output_folder,weight_name_save,num_layers):\n for i in range(0, num_layers+1):\n weight_i = weights[i].eval()\n np.savetxt(output_folder+weight_name_save+\"/w_\"+str(i)+\".txt\",weight_i,delimiter=',')\n bias_i = biases[i].eval()\n np.savetxt(output_folder+weight_name_save+\"/b_\"+str(i)+\".txt\",bias_i,delimiter=',')\n return\n\ndef load_weights(output_folder,weight_load_name,num_layers):\n weights = []\n biases = []\n for i in range(0, num_layers+1):\n weight_i = np.loadtxt(output_folder+weight_load_name+\"/w_\"+str(i)+\".txt\",delimiter=',')\n w_i = tf.Variable(weight_i,dtype=tf.float32)\n weights.append(w_i)\n bias_i = np.loadtxt(output_folder+weight_load_name+\"/b_\"+str(i)+\".txt\",delimiter=',')\n b_i = tf.Variable(bias_i,dtype=tf.float32)\n biases.append(b_i)\n return weights , biases\n\ndef forwardprop(X, weights, biases, num_layers,):\n for i in range(0, num_layers):\n if i ==0:\n htemp = tf.nn.sigmoid(tf.add(tf.matmul(X, weights[i]), biases[i]))\n else:\n htemp = tf.nn.sigmoid(tf.add(tf.matmul(htemp, weights[i]), biases[i]))\n yval = tf.add(tf.matmul(htemp, weights[-1]), biases[-1])\n return yval","sub_path":"FCNN_1THz/dbr_core.py","file_name":"dbr_core.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"625411246","text":"from importlib import import_module\n\nfrom db.session import session\n\nadditional_value_type = {\n \"book\": (\"Author\", \"authors\"),\n \"author\": (\"Book\", \"books\")\n}\n\n\ndef get_cls(package, cls):\n return getattr(import_module(package), cls)\n\n\ndef prepare_search_response(query_type, objects):\n \"\"\"Serializing function for objects set based on\n query type ( in this app there's only 2 query\n types - book and author) for search requests.\n\n :param query_type:\n request query type : book or author\n :param objects:\n requested type objects\n\n \"\"\"\n return {\n \"results\": [{\n \"id\": obj.id,\n \"name\": obj.name,\n additional_value_type.get(query_type)[1]: [{\n \"id\": a_obj.id,\n \"name\": a_obj.name\n } for a_obj in getattr(obj, additional_value_type.get(query_type)[1])]\n } for obj in objects]\n }\n\n\ndef prepare_view_response(query_type, object_):\n \"\"\"Serializing function for object based on query type\n\n :param query_type:\n request query type : book or author\n :param object_:\n request object\n\n \"\"\"\n if not object_:\n return {\n \"type\": query_type,\n \"name\": \"\",\n \"id\": \"\",\n additional_value_type.get(query_type)[1]: []\n }\n return {\n \"type\": query_type,\n \"id\": object_.id,\n \"name\": object_.name,\n additional_value_type.get(query_type)[1]: [{\n \"id\": a_obj.id,\n \"name\": a_obj.name\n } for a_obj in getattr(object_, additional_value_type.get(query_type)[1])]\n }\n\n\ndef get_additional_objects_set(cls, data):\n \"\"\"Prepopulates additional objects set for create/update\n requests.\n\n :param cls:\n Objects class to be prepopulated\n :param data:\n Request data. Contains list of deserialized objects.\n Every object contains id ( may be empty, in this case\n new object must be created ) and name.\n \"\"\"\n db_session = session()\n existing_objects_ids = [obj.get(\"id\") for obj\n in filter(lambda x: \"id\" in x.keys(), data)]\n existing_objects_set = db_session.query(cls).\\\n filter(cls.id.in_(existing_objects_ids)).\\\n all()\n new_objects_set = [cls(obj.get(\"name\")) for obj\n in filter(lambda x: \"id\" not in x.keys(), data)]\n return existing_objects_set + new_objects_set\n\n\ndef save_object(data):\n \"\"\"Saves object for create/update request.\n\n :param data:\n Request data. Contains :\n - id : May be empty ( in this case new object\n must be created )\n -name : Object name attr\n -extra : Deserialized related objects set\n -type : Object type\n\n \"\"\"\n db_session = session()\n object_cls = get_cls(\"db.models\", data.get(\"type\").capitalize())\n additional_object_cls = get_cls(\n \"db.models\", additional_value_type.get(data.get(\"type\"))[0]\n )\n object_id = data.get(\"id\", None)\n if object_id:\n object_ = db_session.query(object_cls).\\\n filter(object_cls.id == object_id).\\\n one()\n object_.name = data.get(\"name\")\n else:\n object_ = object_cls(data.get(\"name\"))\n setattr(\n object_,\n additional_value_type.get(data.get(\"type\"))[1],\n get_additional_objects_set(\n additional_object_cls, data.get(\"additionalObjects\")\n )\n )\n db_session.add(object_)\n db_session.commit()\n return {\"type\": data.get(\"type\"), \"id\": object_.id}","sub_path":"library/extensions/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"632243768","text":"import sentencepiece as spm\nimport numpy as np\nimport os\nimport pickle\nimport fasttext\nimport argparse\ndef train_sentencepiece_model(path_to_text_data,vocab_size,model_name='model_sentencepiece'):\n \n spm.SentencePieceTrainer.Train('--input='+str(path_to_text_data)+' --model_prefix='+str(model_name)+' --vocab_size='+str(vocab_size))\n# spm.SentencePieceTrainer.Train('--input=/home/sriharshkamma/final/BERT-pytorch/data/version1_test_corpus.small --model_prefix='+str(model_name)+' --vocab_size=10000')\n \ndef create_vocab_file(sentence_piece_model_path,directory_path,vocab_file_name):\n sp=spm.SentencePieceProcessor()\n sp.load(sentence_piece_model_path)\n vocab_sentence_piece=[]\n for i in range(sp.get_piece_size()):\n vocab_sentence_piece.append(sp.id_to_piece(i))\n print(directory_path+'/temp.small')\n with open(directory_path+'/temp.small','w') as f:\n for i in vocab_sentence_piece:\n f.write(i+'\\n')\n print('python dataset/vocab.py -c '+directory_path+'/temp.small'+' -o '+directory_path+'/'+vocab_file_name+'.small')\n os.system('bert-vocab -c '+directory_path+'/temp.small'+' -o '+directory_path+'/'+vocab_file_name+'.small')\n os.system('rm -r '+directory_path+'/temp.small')\n \n \ndef prepare_data(i):\n if len(i.strip())!=0 and len(i.split())>2:\n te=''\n m=len(i.split())\n c=0\n for j in i.split():\n if c==(m//2) -1 :\n te+=j+' \\t '\n else:\n te+=j+' '\n c+=1\n return te.strip().replace('.','')\n else:\n return ''\n\ndef change_data_to_input_format(file_path_input,file_path_output,sentence_piece_model_path):\n with open(file_path_input,'r') as f:\n data=f.read()\n data=data.split('\\n')[:-1]\n sp=spm.SentencePieceProcessor()\n sp.load(sentence_piece_model_path)\n req_data=[]\n for i in data: \n changed_data=prepare_data(' '.join(sp.EncodeAsPieces(i)))\n if changed_data!='':\n req_data.append(changed_data)\n with open(file_path_output,'w') as f:\n for i in req_data:\n f.write(i+'\\n')\n \ndef load_vocab(vocab_path: str):\n with open(vocab_path, \"rb\") as f:\n return pickle.load(f)\n \ndef create_fasttext_embeddings(vocab_file_path,fasttext_model_bin_path,path_to_save_fasttext_embeddings):\n vocab_data=load_vocab(vocab_file_path)\n model=fasttext.load_model(fasttext_model_bin_path)\n arr=[]\n for i in vocab_data.stoi:\n arr.append(model.get_sentence_vector(i))\n arr=np.array(arr)\n np.save(path_to_save_fasttext_embeddings,arr)\n \ndef train_fasttext_model(input_text_file,save_model_path): \n model=fasttext.train_unsupervised(input_text_file,model='skipgram',wordNgrams=3,verbose=1,dim=100,epoch=10)\n model.save_model(save_model_path)\ndef train():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-path_to_text_data\", \"--path_to_text_data\", required=True, type=str, help=\"path to text data\")\n parser.add_argument(\"-vocab_size\", \"--vocab_size\", required=True, type=int,help=\"vocab_size\")\n parser.add_argument(\"-path_to_modified_text_data\", \"--path_to_modified_text_data\", type=str, help=\"path to modified data\")\n parser.add_argument(\"-path_to_fasttext_model\", \"--path_to_fasttext_model\", type=str, help=\"path to fasttext model\")\n \n args = parser.parse_args()\n \n# train_sentencepiece_model(args.path_to_text_data,args.vocab_size)\n# create_vocab_file(os.getcwd()+'/model_sentencepiece.model',os.getcwd(),'vocabfile')\n# print('vocab file created')\n# change_data_to_input_format(args.path_to_text_data,args.path_to_modified_text_data,os.getcwd()+'/model_sentencepiece.model')\n# print('input format created')\n train_fasttext_model(args.path_to_modified_text_data,args.path_to_fasttext_model)\n print('fasttext model created')\n create_fasttext_embeddings(os.getcwd()+'/vocabfile.small',args.path_to_fasttext_model,'./fasttext_vectors.npy')\n #'/home/sriharshkamma/model_filename_big.bin'\n \nif __name__ == \"__main__\":\n train() \n#python preprocessing.py -path_to_text_data /home/sriharshkamma/final/BERT-pytorch/data/version1_test_corpus.small -vocab_size 100 -path_to_modified_text_data ./modified_text_data.small -path_to_fasttext_model ./fasttext.bin","sub_path":"bert_pytorch/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"300369530","text":"\nimport mystery_word as mw\n\n# import pdb\nimport random\nimport re\nimport sys\nimport webbrowser\n\n\nclass DemonWord(mw.MysteryWord):\n \"\"\"DemonWord class is a mystery word game which evilly dodges user guesses\n word_length is the number of letters in word to guess\n difficulty is 'easy'/'medium'/'hard'/'evil'\n\n medium = normal hangman game, computer picks a mystery word\n hard = computer dodges your guesses, always maximizing the number\n of possible words\n evil = same as hard, but hints are misleading\n (suggests worst possible guess)\n easy = same AI as hard mode, but tries to maximize\n player's chance of correct guesses\n\n\n \"\"\"\n def __init__(self, word_length=6, difficulty='evil'):\n \"\"\"Init for DemonWord class\"\"\"\n super(DemonWord, self).__init__()\n self.word_length = 6\n self.regexp = '.'*6\n self.word = None\n self.debug_output = False\n self.hint = ''\n self.word_families = {}\n self.word_list = ['echo', 'heal', 'best', 'lazy']\n self.difficulty = difficulty\n self.current_guess = ''\n self.lying_hints = False\n\n def set_word_length(self, word_length=6):\n \"\"\"Sets the word_length and initial blanke regexp,\n as well as filtering the word_list for the number of characters\n \"\"\"\n self.word_length = word_length\n self.regexp = '.' * word_length\n self.word_list = self.filter_word_list(self.word_list, self.regexp)\n\n def filter_word_list(self, word_list, regexp):\n \"\"\"Converts our simplified regexp to proper python regexp syntax\n Regexp consists of any character that has been correctly guessed\n or '.' if location is as yet unassigned\n \"\"\"\n word_list = [word for word in word_list if len(word) == len(regexp)]\n regexp = ''.join(['[a-z]' if char == '.' else char for char in regexp])\n regextp = ' ' + regexp + ' '\n return re.findall(regexp, ' '.join(word_list))\n\n def attempt_guess(self, letter):\n \"\"\"Return False if invalid, otherwise add to guesses list and return True\n This also triggers re-evaluation of the current word_list\n \"\"\"\n if self.difficulty == 'easy': # irrelevant in medium/normal mode\n evil = False\n else:\n evil = True\n # pdb.set_trace()\n if not self.is_valid_guess(letter):\n return False\n letter = letter.lower()\n\n old_regexp = self.regexp\n self.word_families = self.find_word_families(self.regexp,\n self.word_list, letter)\n self.word_list = self.pick_word_family(self.word_families,\n letter, evil)\n self.guesses.append(letter)\n possible_word = self.word_list[0]\n self.regexp = self.find_word_family(self.regexp, possible_word, letter)\n\n if self.regexp == old_regexp:\n print('Incorrect guess.\\n')\n self.num_guesses_left -= 1\n else:\n print('Correct!\\n')\n\n if not self.check_win():\n self.word = self.pick_single_word() # The final lie\n\n return True\n\n def find_word_families(self, regexp, word_list, guess):\n \"\"\"Given current regexp game state, the current word list, and letter\n guess, returns dictionary containing lists of words indexed by the\n regexp which would include them (if that word family is chosen)\n \"\"\"\n word_families = {}\n family_members = []\n for word in word_list:\n word_family = self.find_word_family(regexp, word, guess)\n family_members = word_families.get(word_family, [])\n family_members.append(word)\n word_families[word_family] = family_members\n return word_families\n\n def find_word_family(self, current_regexp, word, guess):\n \"\"\"Returns the regexp which would leave word in play\n with given guess letter\n \"\"\"\n # assert game.find_word_family('.....', 'river', 'r') == 'r...r'\n # output_list = [self.display_regexp_char(letter, word)\n # for letter in word]\n new_regexp = list(current_regexp)\n if self.debug_output:\n print('current_regexp: {}, word: {}'.format(repr(current_regexp),\n repr(word)))\n for slot, char in enumerate(current_regexp):\n # pdb.set_trace()\n if word[slot] == guess:\n new_regexp[slot] = guess\n output = ''.join(new_regexp)\n return output\n\n def pick_word_family(self, word_families, guess='a', evil=True):\n \"\"\"Picks 'hardest' word list based on word_families dictionary\"\"\"\n max = 0\n word_family = ''\n if (not evil) and len(word_families) > 1:\n # print('word_families: {}'.format(word_families))\n if self.current_guess in ''.join(word_families):\n # if guessed letter is somewhere in the keys\n try:\n # temp = word_families[self.regexp]\n del(word_families[self.regexp])\n # remove incorrect guesses as an option\n except:\n # word_families[self.regexp] = temp\n pass\n # Dirty hack for bug with easy, long, 'q', 'u'\n # -- index out of range\n if self.debug_output:\n print('word_families:{}'.format(word_families))\n for key, value in word_families.items():\n # Refactor this with a lambda\n if len(value) > max:\n max = len(value)\n word_family = key\n if self.debug_output:\n print('{},'.format(len(value)), end='')\n if self.debug_output:\n print('\\nword_family: {}, return word list: {}'.format(\n repr(word_family), repr(word_families[word_family])))\n # consider adding check if it is the last turn to force a loss\n\n return word_families[word_family]\n\n def pick_best_letter(self, lie=False):\n \"\"\"Recommend best letter for user to pick if lie=False, otherwise worst\n \"\"\"\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n available = [letter for letter in alphabet\n if letter not in self.guesses]\n letter_scores = {}\n\n def simple_pick():\n \"\"\"Just pick a letter known to be within >= 1 word_list words\"\"\"\n word_list_set = set(''.join(self.word_list)) # removes duplicates\n available_hits = [letter for letter in available\n if letter in word_list_set]\n self.hint = random.choice(available_hits)\n\n if len(self.word_list) < 3 and not lie:\n simple_pick()\n return\n\n for letter in available:\n # # word_families = self.find_word_families(self.regexp,\n # # self.word_list, letter)\n # # print('word_families: {}'.format(self.word_families))\n # # letter_scores[letter] = ([word_families[x]\n # # for x in word_families])\n # possible_word = self.word_list[0]\n # potential_regexp = self.find_word_family(self.regexp,\n # possible_word, letter)\n # potential_wordlist = self.filter_word_list(self.word_list,\n # potential_regexp)\n # letter_scores[letter] = len(potential_wordlist)\n potential_word_families = self.find_word_families(self.regexp,\n self.word_list,\n letter)\n potential_word_list = self.pick_word_family(potential_word_families,\n letter)\n # possible_word = potential_word_list[0]\n # possible_regexp = self.find_word_family(self.regexp,\n # possible_word, letter)\n\n letter_scores[letter] = potential_word_list\n if self.debug_output:\n print('letter scores: {}'.format([len(letter_scores[x])\n for x in letter_scores]))\n\n try:\n min_score = min([len(letter_scores[letter]) for letter in available\n if letter in ''.join(letter_scores[letter])])\n max_score = max([len(letter_scores[letter]) for letter in available\n if letter not in ''.join(letter_scores[letter])])\n\n except:\n min_score = min([len(letter_scores[letter])\n for letter in available])\n max_score = max([len(letter_scores[letter])\n for letter in available])\n\n if (max_score == 1 and lie is True) or (min_score == 1\n and lie is False):\n simple_pick()\n return\n\n for letter in letter_scores:\n if lie is True and len(letter_scores[letter]) == max_score:\n self.hint = letter\n\n elif lie is False and len(letter_scores[letter]) == min_score:\n self.hint = letter\n\n def display_word(self):\n \"\"\"Returns a string showing which letters from letter_list are in word\n \"\"\"\n output_list = [self.display_letter(letter) for letter in self.regexp]\n output = ' '.join(output_list)\n return output\n\n def is_word_complete(self):\n \"\"\"Returns True if all letters in word are in letter_list\"\"\"\n for letter in self.regexp:\n if letter == '.':\n return False\n return True\n\n def pick_single_word(self):\n \"\"\"Returns a randomly selected word in self.word_list\"\"\"\n return random.choice(self.word_list)\n\n def quick_play(self, silent=False, lying_hint=False):\n \"\"\"Not yet implemented\"\"\"\n pass\n '''\n for _ in range(self.num_guesses_left):\n letter = self.pick_best_letter(lie=lying_hint)\n if not silent:\n print('You guessed {}'.format(letter))\n self.attempt_guess(letter)\n if not silent:\n print(self)\n if self.check_win() is not None:\n break\n '''\n\n\ndef user_interface(show_hint=False, lying_hints=False, show_debug_output=False):\n \"\"\"Gets input from user to conduct a DemonWords game\n show_hint=True shows hints at each turn (overridden by user menu)\n lying_hints=True shows hints that make it harder to win\n (may be overriden by user menu)\n debug_output=True provides prints extra information about each turn\n (may be overriden by command line options)\n \"\"\"\n def guess_prompt():\n guess = ''\n while not game.is_valid_guess(guess):\n guess = input('Please choose a letter: ').lower()\n if not game.is_valid_guess(guess):\n print('Invalid letter, try again...')\n game.current_guess = guess\n return guess\n\n def welcome_menu():\n print('Welcome to Mystery Word!')\n print(\"Please select from the following options.\")\n\n def select_difficulty_menu():\n game.difficulty = one_key_menu(\n choices={'e': 'easy', 'm': 'medium', 'h': 'hard', 'v': 'evil'},\n prompt='Choose a difficulty level -- [E]asy, [M]edium, '\n '[H]ard, e[V]il: ',\n default='m',\n force_compliance=True,\n force_msg='Please choose from the listed options, or q to exit.',\n exit_words=['q', 'quit', 'end', 'exit'])\n\n def choose_hints_menu():\n return one_key_menu(choices={'y': True, 'n': False},\n prompt='Would you like friendly hints? [y/N] : ',\n default='n',\n force_compliance=False,\n force_msg='',\n exit_words=['q', 'quit', 'end', 'exit'])\n\n def word_length_menu():\n valid_choices = 'sml'\n choice = ' '\n while (choice not in valid_choices) or choice == '':\n choice = input('Please choose word length: '\n '[S]hort [M]edium or [L]ong: ').lower()\n if choice == 's':\n game.set_word_length(random.randrange(4, 7))\n if choice == 'm':\n game.set_word_length(random.randrange(6, 9))\n if choice == 'l':\n game.set_word_length(random.randrange(8, 13))\n # Move these elsewhere, if possible:\n if game.difficulty == 'medium':\n\n game.word = random.choice([x for x in game.word_list\n if len(x) == game.word_length])\n game.word_list = [game.word]\n\n if game.difficulty == 'evil':\n game.lying_hints = True\n\n def game_loop():\n while True:\n guess = guess_prompt()\n game.attempt_guess(guess)\n print(game)\n if show_hint:\n show_hints()\n if game.check_win() is not None:\n break\n\n def define_word_menu():\n my_word = game.word if game.check_win() is False else game.regexp\n my_prompt = \"Would you like to know what the heck '{}' means? [y/N]: \"\n my_prompt = my_prompt.format(my_word)\n show_definition = one_key_menu(\n choices={'y': True, 'n': False},\n prompt=my_prompt,\n default='n',\n force_compliance=False,\n force_msg='',\n exit_words=['q', 'quit', 'end', 'exit'])\n if show_definition:\n my_url = 'https://search.yahoo.com/search;?p=define%3A+' + my_word\n webbrowser.open(my_url)\n\n def play_again():\n define_word_menu()\n return one_key_menu(choices={'y': True, 'n': False},\n prompt='Play again [Y/n]?',\n default='y',\n force_compliance=False,\n force_msg='',\n exit_words=['q', 'quit', 'end', 'exit'])\n\n def show_hints():\n if game.check_win() is None:\n game.pick_best_letter(game.lying_hints)\n s = 's' if len(game.word_list) > 1 else ''\n print('Current word list has {} word{}. '.format(\n len(game.word_list), s), end='')\n print(\"Might I recommend you try '{}'?\\n\".format(game.hint))\n\n def one_key_menu(choices={'y': True, 'n': False}, prompt='Y/n?',\n default='y', force_compliance=False,\n force_msg='Please try again. \\n',\n exit_words=['quit', 'end', 'exit']):\n \"\"\"Function for capturing case-insensitive single letter input\n Probably could also be used for >1 letter input with a list input\n into acceptabld choices is an iterable that contains all valid input\n options, must be all lowercase\n\n prompt is the text to display on the line taking input\n\n default is the value to choose on blank or bogus input,\n must be lowercase\n\n force_compliance set to True loops the input prompt until an\n acceptable answer is met\n\n force_msg is a message to display on improper input, including\n newlines if needed\n\n exit_words contains allowed input for exiting the loop,\n must be lowercase\n \"\"\"\n kb_input = input(prompt).lower()\n if kb_input in exit_words:\n sys.exit('Exiting game by user request.')\n\n if kb_input not in choices:\n if force_compliance:\n print(force_msg)\n return one_key_menu()\n else:\n return default\n else:\n return choices[kb_input]\n\n game = DemonWord()\n game.debug_ouput = show_debug_output\n game.import_word_list('/usr/share/dict/words')\n if game.debug_output:\n game.word_list = game.word_list[:1000]\n welcome_menu()\n select_difficulty_menu()\n word_length_menu()\n show_hint = choose_hints_menu()\n print('The Mystery Word contains {} letters.'.format(len(game.regexp)))\n print(game)\n if show_hint:\n show_hints()\n game_loop()\n while(play_again()):\n game = DemonWord()\n game.import_word_list('/usr/share/dict/words')\n if game.debug_output:\n game.word_list = game.word_list[:1000]\n select_difficulty_menu()\n word_length_menu()\n show_hint = choose_hints_menu()\n print('The Mystery Word contains {} letters.'.format(len(game.regexp)))\n print(game)\n if show_hint:\n show_hints()\n game_loop()\n\nif __name__ == '__main__':\n \"\"\"Use 'debug' command line option to enter debug mode\"\"\"\n to_debug = False\n try:\n if len(sys.argv) > 1 and sys.argv[1] == 'debug':\n to_debug = True\n print('Running in debug mode...')\n except:\n pass\n user_interface(show_hint=True, lying_hints=False,\n show_debug_output=to_debug)\n","sub_path":"demon_words.py","file_name":"demon_words.py","file_ext":"py","file_size_in_byte":17482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"365460341","text":"import sys\nimport random\nimport matplotlib\n\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\nfrom matplotlib import pyplot\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\nmatplotlib.use('Qt5Agg')\n\n\nclass MplCanvas(FigureCanvas):\n\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n # fig = Figure(figsize=(width, height), dpi=dpi)\n fig, self.ax = pyplot.subplots(figsize=(15, 7.5))\n # self.ax = fig.add_subplot(111)\n super(MplCanvas, self).__init__(fig)\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n\n self.canvas = MplCanvas(self, width=30, height=20, dpi=100)\n self.setCentralWidget(self.canvas)\n\n n_data = 50\n self.xdata = list(range(n_data))\n self.ydata = [random.randint(0, 10) for i in range(n_data)]\n self.update_plot()\n\n button1 = QPushButton(self.canvas)\n button1.setText(\"Button1\")\n button1.move(64, 16)\n button1.clicked.connect(self.update_plot)\n\n self.show()\n\n # Setup a timer to trigger the redraw by calling update_plot.\n # self.timer = QtCore.QTimer()\n # self.timer.setInterval(100)\n # self.timer.timeout.connect(self.update_plot)\n # self.timer.start()\n\n\n def update_plot(self):\n # Drop off the first y element, append a new one.\n self.ydata = self.ydata[1:] + [random.randint(0, 10)]\n self.canvas.ax.cla() # Clear the canvas.\n self.canvas.ax.plot(self.xdata, self.ydata, 'r')\n # Trigger the canvas to update and redraw.\n self.canvas.draw()\n\n\napp = QtWidgets.QApplication(sys.argv)\nw = MainWindow()\napp.exec_()\n","sub_path":"src/qt.py","file_name":"qt.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"512609313","text":"# from PyQt5.QtWidgets import *\n# import sys\n#\n# class Window(QWidget):\n# def __init__(self):\n# QWidget.__init__(self)\n# layout = QGridLayout()\n# self.setLayout(layout)\n#\n# radiobutton = QRadioButton(\"Australia\")\n# radiobutton.setChecked(True)\n# radiobutton.value = 1\n# radiobutton.toggled.connect(self.onClicked)\n# layout.addWidget(radiobutton, 0, 0)\n#\n# radiobutton = QRadioButton(\"China\")\n# radiobutton.value = 2\n# radiobutton.toggled.connect(self.onClicked)\n# layout.addWidget(radiobutton, 0, 1)\n#\n# radiobutton = QRadioButton(\"Japan\")\n# radiobutton.value = 3\n# radiobutton.toggled.connect(self.onClicked)\n# layout.addWidget(radiobutton, 0, 2)\n#\n# def onClicked(self):\n# radioButton = self.sender()\n# if radioButton.isChecked():\n# print(\"Country is %s\" % (radioButton.value))\n#\n#\n# app = QApplication(sys.argv)\n# screen = Window()\n# screen.show()\n# sys.exit(app.exec_())\n\n# import sys\n# import time\n#\n# from PyQt5.QtGui import QPixmap\n# from PyQt5.QtWidgets import (QApplication, QDialog,\n# QProgressBar, QPushButton, QMessageBox)\n# from PyQt5.QtCore import QThread, pyqtSignal\n#\n# TIME_LIMIT = 100\n#\n#\n# class External(QThread):\n# \"\"\"\n# Runs a counter thread.\n# \"\"\"\n#\n# countChanged = pyqtSignal(int)\n#\n# def run(self):\n# count = 0\n# while count < TIME_LIMIT:\n# count += 1\n# time.sleep(0.05)\n# self.countChanged.emit(count)\n# QMessageBox.setIconPixmap(QMessageBox.information(None, \"Concluído\", \"10 imagens rotacionadas.\"),\n# QPixmap(\":verificado.png\"))\n# # QMessageBox.iconPixmap(QMessageBox.information(None, \"Concluído\", \"10 imagens rotacionadas.\"))\n#\n# class Actions(QDialog):\n# \"\"\"\n# Simple dialog that consists of a Progress Bar and a Button.\n# Clicking on the button results in the start of a timer and\n# updates the progress bar.\n# \"\"\"\n#\n# def __init__(self):\n# super().__init__()\n# self.initUI()\n#\n# def initUI(self):\n# self.setWindowTitle('Progress Bar')\n# self.progress = QProgressBar(self)\n# self.progress.setGeometry(0, 0, 300, 25)\n# self.progress.setMaximum(100)\n# self.button = QPushButton('Start', self)\n# self.button.move(0, 30)\n# self.show()\n#\n# self.button.clicked.connect(self.onButtonClick)\n#\n# def onButtonClick(self):\n# self.calc = External()\n# self.calc.countChanged.connect(self.onCountChanged)\n# self.calc.start()\n#\n# def onCountChanged(self, value):\n# self.progress.setValue(value)\n#\n# if __name__ == \"__main__\":\n# app = QApplication(sys.argv)\n# window = Actions()\n# sys.exit(app.exec_())\n\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWidgets import QApplication, QDialog, QProgressBar, QPushButton, QVBoxLayout\nimport sys\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\nimport time\n\n\n# class MyThread(QThread):\n# # Create a counter thread\n# change_value = pyqtSignal(int)\n#\n# def run(self):\n# cnt = 0\n# while cnt &100:\n# cnt += 1\n# time.sleep(0.3)\n# self.change_value.emit(cnt)\n#\n# class Window(QDialog):\n# def __init__(self):\n# super().__init__()\n# self.title = \"PyQt5 ProgressBar\"\n# self.top = 200\n# self.left = 500\n# self.width = 300\n# self.height = 100\n# self.setWindowIcon(QtGui.QIcon(\"icon.png\"))\n# self.setWindowTitle(self.title)\n# self.setGeometry(self.left, self.top, self.width, self.height)\n# vbox = QVBoxLayout()\n# self.progressbar = QProgressBar()\n# # self.progressbar.setOrientation(Qt.Vertical)\n# self.progressbar.setMaximum(100)\n# self.progressbar.setStyleSheet(\"QProgressBar {border: 2px solid grey;border-radius:8px;padding:1px}\"\n# \"QProgressBar::chunk {background:yellow}\")\n# # qlineargradient(x1: 0, y1: 0.5, x2: 1, y2: 0.5, stop: 0 red, stop: 1 white);\n# # self.progressbar.setStyleSheet(\"QProgressBar::chunk {background: qlineargradient(x1: 0, y1: 0.5, x2: 1, y2: 0.5, stop: 0 red, stop: 1 white); }\")\n# # self.progressbar.setTextVisible(False)\n# vbox.addWidget(self.progressbar)\n# self.button = QPushButton(\"Start Progressbar\")\n# self.button.clicked.connect(self.startProgressBar)\n# self.button.setStyleSheet('background-color:yellow')\n# vbox.addWidget(self.button)\n# self.setLayout(vbox)\n# self.show()\n#\n# def startProgressBar(self):\n# self.thread = MyThread()\n# self.thread.change_value.connect(self.setProgressVal)\n# self.thread.start()\n#\n# def setProgressVal(self, val):\n# self.progressbar.setValue(val)\n#\n#\n# App = QApplication(sys.argv)\n# window = Window()\n# sys.exit(App.exec())\n\nimport logging\nimport random\nimport sys\nimport time\n\nfrom PyQt5.QtCore import QRunnable, Qt, QThreadPool\nfrom PyQt5.QtWidgets import (\n QApplication,\n QLabel,\n QMainWindow,\n QPushButton,\n QVBoxLayout,\n QWidget,\n)\n\nlogging.basicConfig(format=\"%(message)s\", level=logging.INFO)\n\n# 1. Subclass QRunnable\nclass Runnable(QRunnable):\n def __init__(self, n):\n super().__init__()\n self.n = n\n\n def run(self):\n # Your long-running task goes here ...\n for i in range(5):\n logging.info(f\"Working in thread {self.n}, step {i + 1}/5\")\n time.sleep(random.randint(700, 2500) / 1000)\n\nclass Window(QMainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi()\n\n def setupUi(self):\n self.setWindowTitle(\"QThreadPool + QRunnable\")\n self.resize(250, 150)\n self.centralWidget = QWidget()\n self.setCentralWidget(self.centralWidget)\n # Create and connect widgets\n self.label = QLabel(\"Hello, World!\")\n self.label.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n countBtn = QPushButton(\"Click me!\")\n countBtn.clicked.connect(self.runTasks)\n # Set the layout\n layout = QVBoxLayout()\n layout.addWidget(self.label)\n layout.addWidget(countBtn)\n self.centralWidget.setLayout(layout)\n\n def runTasks(self):\n threadCount = QThreadPool.globalInstance().maxThreadCount()\n self.label.setText(f\"Running {threadCount} Threads\")\n pool = QThreadPool.globalInstance()\n for i in range(threadCount):\n # 2. Instantiate the subclass of QRunnable\n runnable = Runnable(i)\n # 3. Call start()\n pool.start(runnable)\n\napp = QApplication(sys.argv)\nwindow = Window()\nwindow.show()\nsys.exit(app.exec())","sub_path":"test/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":6917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"528441058","text":"import pygame\nimport math\n\npygame.init()\nwin = pygame.display.set_mode((500, 500))\nclock = pygame.time.Clock()\n# The correct method to solve for intersection points of two circles is algebraically.\n# NOT using points (x, y coordinates) because of infinite precision of coordinate system (real numbers).\n\n\ndef get_intersections(x0, y0, r0, x1, y1, r1):\n # circle 1: (x0, y0), radius r0\n # circle 2: (x1, y1), radius r1\n d = math.sqrt((x1-x0)**2 + (y1-y0)**2)\n # non intersecting\n if d > r0 + r1:\n return None\n # One circle within other\n if d < abs(r0-r1):\n return None\n # coincident circles\n if d == 0 and r0 == r1:\n return None\n else:\n a = (r0**2-r1**2+d**2)/(2*d)\n h = math.sqrt(r0**2-a**2)\n x2 = x0+a*(x1-x0)/d\n y2 = y0+a*(y1-y0)/d\n x3 = x2+h*(y1-y0)/d\n y3 = y2-h*(x1-x0)/d\n\n x4 = x2-h*(y1-y0)/d\n y4 = y2+h*(x1-x0)/d\n\n return (x3, y3, x4, y4)\n\n\nh, k = 200, 200\nr = 100\nrun = True\nwhile run:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.KEYDOWN:\n print(pygame.key.name(event.key))\n\n win.fill((66, 67, 69))\n x0, y0 = 0, 0\n r0 = 5\n x1, y1 = 2, 2\n r1 = 5\n\n # intersecting with (x1, y1) but not with (x0, y0)\n x2, y2 = -1, 0\n r2 = 2.5\n\n circle1 = plt.Circle((x0, y0), r0, color='b', fill=False)\n circle2 = plt.Circle((x1, y1), r1, color='b', fill=False)\n circle3 = plt.Circle((x2, y2), r2, color='b', fill=False)\n\n fig, ax = plt.subplots()\n ax.set_xlim((-10, 10))\n ax.set_ylim((-10, 10))\n ax.add_artist(circle1)\n ax.add_artist(circle2)\n ax.add_artist(circle3)\n\n intersections = get_intersections(x0, y0, r0, x1, y1, r1)\n if intersections is not None:\n i_x3, i_y3, i_x4, i_y4 = intersections\n plt.plot([i_x3, i_x4], [i_y3, i_y4], '.', color='r')\n\n intersections = get_intersections(x0, y0, r0, x2, y2, r2)\n if intersections is not None:\n i_x3, i_y3, i_x4, i_y4 = intersections\n plt.plot([i_x3, i_x4], [i_y3, i_y4], '.', color='r')\n\n intersections = get_intersections(x1, y1, r1, x2, y2, r2)\n if intersections is not None:\n i_x3, i_y3, i_x4, i_y4 = intersections\n plt.plot([i_x3, i_x4], [i_y3, i_y4], '.', color='r')\n\n plt.gca().set_aspect('equal', adjustable='box')\n\n pygame.display.flip()\n","sub_path":"Code/Visualize_Tools/test_pygame.py","file_name":"test_pygame.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"352424581","text":"import pandas as pd\n\n\n# Excelファイルを開く\nfilename = 'population.xlsx'\nsheet_name = 'list-sjis.csv'\nbook = pd.read_excel(filename, sheet_name=sheet_name)\n\n# データを人口順に表示\nbook.sort_values(by='法定人口', ascending=False)\nprint(book)\n","sub_path":"3_Data/3-1_excel_pandas.py","file_name":"3-1_excel_pandas.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"486283412","text":"\"\"\"eval URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\n\nfrom main.api import UserProfileResource, UserResource, RecipeResource, StepResource, AllergyResource, \\\n IngredientResource, RecipeFavouriteResource, StepIngredientResource\nfrom main.views import RecipeList, IndexView, RecipeDetail, IngredientList, AllergiesList, Recipe2Detail, RecipeNotView\nfrom tastypie.api import Api\n\nv1_api = Api(api_name='v1')\nv1_api.register(UserProfileResource())\nv1_api.register(UserResource())\nv1_api.register(RecipeResource())\nv1_api.register(StepResource())\nv1_api.register(AllergyResource())\nv1_api.register(IngredientResource())\nv1_api.register(RecipeFavouriteResource())\nv1_api.register(StepIngredientResource())\n\nurlpatterns = [\n # Auth\n url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),\n url(r'^login/$', auth_views.login, {'template_name': 'login.html'}, name='login'),\n url(r'^admin/', admin.site.urls),\n url(r'^admin', admin.site.urls),\n url(r'^$', IndexView.as_view(), name='index'),\n url(r'^recipes/$', RecipeList.as_view(), name='recipes'),\n url(r'^ingredients/$', IngredientList.as_view(), name='ingredients'),\n url(r'^allergies/$', AllergiesList.as_view(), name='allergies'),\n url(r'^recipe/(?P[-\\w]+)/$', RecipeDetail.as_view(), name='recipe-detail'),\n url(r'^recipe2/(?P[-\\w]+)/$', Recipe2Detail.as_view(), name='recipe-detail2'),\n url(r'^recipe-not/$', RecipeNotView.as_view(), name='recipe-not'),\n\n # API section\n url(r'^api/', include(v1_api.urls)),\n\n # Catch all non existent pages\n url(r'^.*$', IndexView.as_view(), name='index'),\n\n]\n","sub_path":"seku/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"137536712","text":"import re\nimport logging\n\n\nfrom model.dao.gym_dao import GymDAO\n\nfrom exceptions import Error, InvalidData\n\n\nclass GymController:\n\t\n\tdef __init__(self, database_engine):\n\t\tself._database_engine = database_engine\n\n\tdef create_gym(self, data):\n\t\ttry:\n\t\t\twith self._database_engine.new_session() as session:\n\t\t\t\tdao = GymDAO(session)\n\t\t\t\tgym = dao.create(data)\n\t\t\t\tgym_data = gym.to_dict()\n\t\t\t\treturn gym_data\n\t\texcept Error as error:\n\t\t\traise error\n\n\tdef list_gyms(self, person_type=None):\n\t\twith self._database_engine.new_session() as session:\n\t\t\tgyms = GymDAO(session).get_all()\n\t\t\tprint(gyms)\n\t\t\tgyms_data = [gyms.to_dict('dict') for gym in gyms]\n\t\treturn gyms_data\n\n\tdef get_gym(self, gym_id):\n\t\twith self._database_engine.new_session() as session:\n\t\t\tgym = GymDAO(session).get(gym_id)\n\t\t\tgym_data = gym.to_dict()\n\t\treturn gym_data\n\n\tdef update_gym(self, gym_id, gym_data):\n\t\twith self._database_engine.new_session() as session:\n\t\t\tdao = GymDAO(session)\n\t\t\tgym = dao.get(gym_data)\n\t\t\tgym = dao.update(gym, gym_data)\n\t\t\treturn gym.to_dict()\n\n\tdef delete_gym(self, gym_id):\n\t\twith self._database_engine.new_session() as session:\n\t\t\tdao = GymDAO(session)\n\t\t\tgym = dao.get(gym_id)\n\t\t\tdao.delete(gym)\n\n\tdef search_gym(self, name):\n\t\twith self._database_engine.new_session() as session:\n\t\t\tdao = GymDAO(session)\n\t\t\tgym = dao.get_by_name(name)\n\t\t\treturn gym.to_dict()","sub_path":"GLPOO_Project/controller/gym_controller.py","file_name":"gym_controller.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"397878486","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\nimport os\nimport sys\nimport unittest\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nimport datetime\nfrom func.sprint4Func import *\nfrom func.loadData import readAndSaveToList\n\ndef load_data():\n file_name = 'nov10.ged'\n # create list for individual and families\n ilist = []\n flist = []\n\n # input sample\n filePath = os.path.join(os.getcwd(), \"Sample/\" + file_name)\n\n # read file to create individual and families\n if os.path.exists(filePath):\n readAndSaveToList(filePath, ilist, flist)\n else:\n print(\"File doesn't exist\")\n\n return ilist, flist\n\nclass TestSprint4Func(unittest.TestCase):\n \"\"\"Test mathfuc.py\"\"\"\n \n def test_US38_upcomingBirthdays(self):\n ilist, flist = load_data()\n for indi in ilist:\n if indi.ID == \"I10\":\n self.assertTrue(US38_upcomingBirthdays(indi))\n def test_US39_upcomingAnniversaries(self):\n ilist, flist = load_data()\n for fm in flist:\n if fm.ID == \"F3\":\n self.assertTrue(US39_upcomingAnniversaries(fm))\n \n def test_us29(self):\n ilist, flist = load_data()\n for pp in ilist:\n if US29(pp.ID, pp.Name ,pp.Alive)!=None:\n self.assertTrue(US29(pp.ID, pp.Name ,pp.Alive))\n \n def test_us30(self):\n ilist, flist = load_data()\n for fm in flist:\n if US30(fm.ID, fm.Divorced, fm.HusbandID, fm.WifeID,ilist)!=None:\n self.assertTrue(US30(fm.ID, fm.Divorced, fm.HusbandID, fm.WifeID,ilist))\n\n def test_ListMultipleBirths(self):\n ilist, flist = load_data()\n\n tp = ListMultipleBirths(ilist)\n \n self.assertEqual(len(tp), 8)\n\n def test_ListLivingSingle(self):\n\n ilist, flist = load_data()\n\n tp = ListLivingSingle(ilist)\n\n self.assertEqual(len(tp), 0) \n def test_US35(self):\n day= '2018-11-11'\n self.assertTrue(US35(day))\n def test_US36(self):\n day= '2018-11-11'\n self.assertTrue(US36(day))\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n\n tests = [TestSprint4Func(\"test_US38_upcomingBirthdays\"),TestSprint4Func(\"test_US39_upcomingAnniversaries\"),\\\n TestSprint4Func(\"test_us29\"),TestSprint4Func(\"test_us30\"),TestSprint4Func(\"test_US35\"),TestSprint4Func(\"test_US36\")]\n \n suite.addTests(tests)\n \n\n tests = [TestSprint4Func(\"test_ListMultipleBirths\"),TestSprint4Func(\"test_ListLivingSingle\")]\n suite.addTests(tests)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)\n\n","sub_path":"Project04/module/test/sprint4Test.py","file_name":"sprint4Test.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"506699922","text":"#!/usr/bin/env python3\n\nfrom player import Player\nfrom point_guard import PointGuard\nfrom center import Center\nfrom small_forward import SmallForward\nfrom shooting_guard import ShootingGuard\nfrom power_forward import PowerForward\n\ndef main():\n\n pg = PointGuard()\n c = Center()\n sf = SmallForward()\n sg = ShootingGuard()\n pf = PowerForward()\n pg.run_play(\"I'll dribble penetrate\")\n c.run_play(\"I'll cut to the corner\")\n sf.run_play(\"I'll set the first pick\")\n sg.run_play(\"I'll run to the wing\")\n pf.run_play(\"I'll set the second pick\")\n pg.run_play(\"I'll dribble past the screens to the post\")\n c.run_play(\"I'll cut to the top of the key\")\n sf.run_play(\"I'll set the first pick and roll to the basket\")\n sg.run_play(\"I'll maneuver to the low block and set a screen\")\n pf.run_play(\"I'll set the second pick and screen the shooting guard\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"P1/ex05/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"228650827","text":"\"\"\"\nVarious ways to plot lines with Matplotlib.\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nx = [1, 2, 3, 4, 5, 6, 7, 8]\ny = [2, 3, 7, 6, 7, 8, 9, 4]\n\ndata_obj = {\n 'Items': x,\n 'Quantity': y\n}\n\n\ndef plt_lines(x, y, **kwargs):\n plt.figure()\n plt.plot(x, y)\n plt.xlabel(kwargs['xlabel'])\n plt.ylabel(kwargs['ylabel'])\n plt.title(kwargs['title'])\n plt.box(on=False)\n plt.grid(color='0.8')\n plt.tick_params(color='0.8')\n\n\nplt.close('all')\n\n# example 1\nplt.figure()\nplt.plot(x, y)\nplt.xlabel('Items')\nplt.ylabel('Quantity')\nplt.title('Example 1')\nplt.box(on=False)\nplt.grid(color='0.85')\nplt.tick_params(color='0.85')\n\n# example 2\nplt.figure()\nplt.plot('Items', 'Quantity', data=data_obj)\nplt.xlabel('Items')\nplt.ylabel('Quantity')\nplt.title('Example 2')\nplt.box(on=False)\nplt.grid(color='0.85')\nplt.tick_params(color='0.85')\n\n# example 3\nfig, ax = plt.subplots()\nax.plot(x, y)\nax.set_xlabel('Items')\nax.set_ylabel('Quantity')\nax.set_title('Example 3')\nplt.box(on=False)\nplt.grid(color='0.85')\nplt.tick_params(color='0.85')\n\n# example 4 that uses above function to plot x and y\nplt_lines(x, y, xlabel='Items', ylabel='Quantity', title='Example 4')\n\nplt.show()\n","sub_path":"matplotlib_lines.py","file_name":"matplotlib_lines.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"513798297","text":"import math\n\nclass Parametric:\n step = -1\n edges = None\n\n def __init__(self, edgeMatrix, step=0.01):\n self.edges = edgeMatrix\n self.step = step\n\n def arc(self, center, radius, angle=2*math.pi):\n x = lambda t: center[0] + radius * math.cos(angle * t)\n y = lambda t: center[1] + radius * math.sin(angle * t)\n z = lambda t: center[2]\n self.add(x, y, z)\n\n def hermite(self, p0, p1, r0, r1):\n f = lambda i: lambda t: (2 * p0[i] - 2 * p1[i] + r0[i] + r1[i]) * t ** 3 + (-3 * p0[i] + 3 * p1[i] - 2 * r0[i] - r1[i]) * t ** 2 + r0[i] * t + p0[i]\n x = f(0)\n y = f(1)\n self.add(x, y)\n\n def bezier(self, p0, p1, p2, p3):\n f = lambda i: lambda t: (-p0[i] + 3 * p1[i] - 3 * p2[i] + p3[i]) * t ** 3 + (3 * p0[i] - 6 * p1[i] + 3 * p2[i]) * t ** 2 + (-3 * p0[i] + 3 * p1[i]) * t + p0[i]\n x = f(0)\n y = f(1)\n self.add(x, y)\n\n def add(self, x, y, z=lambda t: 0):\n step = self.step\n t = 0\n while t < 1:\n self.edges.addEdge((x(t), y(t), 0), (x(min(t + step, 1)), y(min(t + step, 1)), 0))\n t += step\n","sub_path":"engine/parametric/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"39611888","text":"\"\"\"Creates a card object when passed a suit and rank.\"\"\"\n\nimport collections\n\n\nclass Card(object):\n \"\"\"A card object with two attributes, suit and rank.\n\n The suit and rank values are provided when the deck is constructed, since\n different decks have different ranks and values for the cards they contain.\n \"\"\"\n\n SUITS = ('Clubs', 'Diamonds', 'Hearts', 'Spades')\n RANK = collections.namedtuple('Rank', 'value name')\n RANKS = (\n RANK(2, '2'),\n RANK(3, '3'),\n RANK(4, '4'),\n RANK(5, '5'),\n RANK(6, '6'),\n RANK(7, '7'),\n RANK(8, '8'),\n RANK(9, '9'),\n RANK(10, '10'),\n RANK(11, 'Jack'),\n RANK(12, 'Queen'),\n RANK(13, 'King'),\n RANK(14, 'Ace')\n )\n\n def __init__(self, suit, rank):\n self.suit = suit\n self.rank = rank\n\n def __str__(self):\n return '{0:>7} of {1}'.format(self.rank.name, self.suit)\n","sub_path":"card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"557795335","text":"# -*- coding: utf-8 -*-\n\nimport json\n\nfrom diary import storage\nfrom os import path\n\nif __name__ == '__main__':\n \n storage.show_menu()\n db_name = 'diary.db'\n storage.conn = storage.connect(db_name)\n storage.initialize(storage.conn)\n \n #if path.exists(db_name) == False:\n # storage.initialize(conn)\n \n \n actions = {\n '1': storage.show_diary,\n '2': storage.add_activity,\n '3': storage.change_activity,\n '4': storage.finish_activity,\n '5': storage.return_activity,\n '6': storage.action_exit\n }\n \n #task = storage.show_diary()\n \n while True:\n cmd = input()\n \n action = actions.get(cmd)\n \n if action:\n action()","sub_path":"HW5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"17425058","text":"import uuid\n\n\nclass Objective:\n __objective_map = {}\n\n @staticmethod\n def get_by_id(objective_id):\n return Objective.__objective_map[objective_id]\n\n def __init__(self, location, name, description, gameKey):\n self.id = str(uuid.uuid1())\n self.location = location\n self.name = name\n self.description = description\n self.gameKey = gameKey\n self.players_completed = []\n\n Objective.__objective_map[self.id] = self\n\n def __del__(self):\n del Objective.__objective_map[self.id]\n\n def player_complete(self, player_id):\n self.players_completed.append(player_id)\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"description\": self.description,\n \"location\": self.location\n }\n","sub_path":"SH-server/app/library/Objective.py","file_name":"Objective.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"292256890","text":"import os\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.getOrCreate()\n\ndata_path = os.getcwd() + '\\\\data'\n\njson_df2_path = data_path + '\\\\example_5.json'\n# df1 = spark.read.format(\"json\").load(json_df1_path)\n\ndf2 = spark.read.json(json_df2_path, multiLine=True)\ndf2.count()\ndf2.show()\n\ndf2.columns\n\ndf2_sample = df2.sample(False, fraction=0.1)\n\ndf2_sample.show()\n\ndf2_sorted = df2.sort(\"color\")\ndf2_sorted.show()\n","sub_path":"03.03 - Reading json files 2.py","file_name":"03.03 - Reading json files 2.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"547356830","text":"# OmrCard\r\nclass OmrCard:\r\n # 생성자\r\n def __init__(self, name, school_id):\r\n self.name = name\r\n self.school_id = school_id\r\n self.answer_list = list()\r\n\r\n # 정답을 입력 받는 메소드\r\n def marking_answer(self, *answers):\r\n for answer in answers:\r\n self.answer_list.append(answer)\r\n\r\n# OmrCardReader\r\nclass OmrCardReader:\r\n # 정답\r\n def __init__(self, *answer):\r\n self.real_answer_list = list()\r\n for answer in answer:\r\n self.real_answer_list.append(answer)\r\n\r\n # 카드를 읽어서 그 안에 있는 이름, 학번, 점수 계산 => 출력\r\n def read_omr_card(self, omr_card):\r\n i = 0 # index\r\n score = 0\r\n for real_answer in self.real_answer_list: # 0 1 2 3 4\r\n if real_answer == omr_card.answer_list[i]: # 정답이면\r\n # 한 문제당 점수: 100 / 문제 개수\r\n score += 100 / len(self.real_answer_list)\r\n \r\n i += 1 # index 값을 1 증가\r\n\r\n print(\"이름:%s, 학번:%d, 점수:%g\" % (omr_card.name, omr_card.school_id, score))\r\n\r\n# 객체 생성\r\nomrCard1 = OmrCard(\"신보람\", 1)\r\nomrCard1.marking_answer(1, 2, 3, 4, 5)\r\nprint(omrCard1.answer_list)\r\n\r\nomrCardReader = OmrCardReader(1, 2, 2, 4, 5)\r\nprint(omrCardReader.real_answer_list)\r\nomrCardReader.read_omr_card(omrCard1)\r\n\r\nomrCard2 = OmrCard(\"김바다\", 2)\r\nomrCard2.marking_answer(1, 2, 2, 4, 5)\r\n\r\nomrCardReader.read_omr_card(omrCard2)\r\n","sub_path":"11_class/quiz04/Quiz04.py","file_name":"Quiz04.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"348390916","text":"# Disable warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport pandas as pd\nimport numpy as np\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport math\nfrom math import sqrt\nfrom scipy import stats\nfrom sklearn.metrics import mean_squared_error, r2_score, explained_variance_score\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.feature_selection import f_regression \nfrom pydataset import data\n\n##########################################################################\n\ndef plot_residuals(y, yhat):\n '''\n This function takes in a dataframe, the y target variable \n and the yhat (model predictions) and creates columns for residuals\n and baseline residuals. It returns a graph of both residual columns.\n '''\n\n # create a residual column\n df['residual'] = (yhat - y)\n\n # create a residual baseline column\n df['residual_baseline'] = (y.mean() - y)\n \n fig, ax = plt.subplots(figsize=(13,7))\n\n ax.hist(df.residual_baseline, label='baseline residuals', alpha=.6)\n ax.hist(df.residual, label='model residuals', alpha=.6)\n ax.legend()\n\n plt.show()\n\n###############################################################################\n\ndef regression_errors(df, y, yhat):\n '''\n \n '''\n \n SSE = mean_squared_error(y, yhat)*len(df)\n\n\n MSE = mean_squared_error(y, yhat)\n\n\n RMSE = sqrt(mean_squared_error(y, yhat))\n\n\n ESS = sum((yhat - y.mean())**2)\n TSS = sum((y - y.mean())**2)\n\n # compute explained variance\n R2 = ESS / TSS\n \n print('SSE is:', SSE)\n print('ESS is:', ESS)\n print('TSS is:', TSS)\n print('R2 is:', R2)\n print('MSE is:', MSE)\n print('RMSE is:', RMSE) \n\n######################################################################\n\n\ndef baseline_mean_errors(df, y, yhat_baseline):\n \n SSE_baseline = mean_squared_error(y, yhat_baseline)*len(df)\n \n MSE_baseline = mean_squared_error(y, yhat_baseline)\n \n RMSE_baseline = sqrt(mean_squared_error(y, yhat_baseline))\n \n \n print('Baseline SSE is:', SSE_baseline)\n print('Baseline MSE is:', MSE_baseline)\n print('Baseline RMSE is:', RMSE_baseline)\n\n#######################################################################\n\n\ndef better_than_baseline(df, y, yhat, yhat_baseline):\n\n RMSE = sqrt(mean_squared_error(y, yhat))\n \n RMSE_baseline = sqrt(mean_squared_error(y, yhat_baseline))\n \n if RMSE < RMSE_baseline:\n print('True - The model performs better than the baseline')\n \n elif RMSE > RMSE_baseline:\n print('False - The baseline performs better than the model')\n \n return RMSE, RMSE_baseline","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"152325324","text":"import pickle\nimport os\nimport sys\nimport uuid\nimport psycopg2\nfrom flask import Flask, request, redirect, send_from_directory, Response, render_template, url_for\nfrom Show_Objects import cartoon_show_object, anime_show_object, show_object\nfrom htmltemplate import Template\nfrom PIL import Image\nimport base64\n\napp = Flask(__name__)\napp.secret_key = os.getenv('cartoon_secret_key')\n\nParent_Object = show_object()\nParent_Object.cartoon_dict = {}\nParent_Object.anime_dict = {}\n\n\ndef save_database():\n conn = psycopg2.connect(os.getenv('cartoon_database_url'))\n\n cursor = conn.cursor()\n cursor.execute('truncate \"ShowPickle\"')\n conn.commit()\n\n cursor.execute('INSERT INTO \"ShowPickle\"(cartoon_pickle_data) VALUES (%s)',\n (psycopg2.Binary(pickle.dumps(Parent_Object)),))\n\n conn.commit()\n\n\n# Load the Player list database\ndef load_database():\n try:\n global Parent_Object\n\n conn = psycopg2.connect(os.getenv('cartoon_database_url'))\n\n cursor = conn.cursor()\n cursor.execute('select cartoon_pickle_data from \"ShowPickle\" LIMIT 1') #\n mypickle = cursor.fetchone()[0]\n Parent_Object = pickle.loads(mypickle)\n\n except TypeError as err:\n print(\"Unexpected error:\", err)\n pass # do nothing. no database to load\n except:\n pass #this might happen if there's no schema deployed\n\n\ndef shrink_image(file):\n if file is not None and file.filename != '':\n image_to_shrink = Image.open(file)\n image_to_shrink.thumbnail((300, 300))\n image_to_shrink.save(file.filename)\n image_to_shrink.close()\n image_to_shrink = open(file.filename, 'rb')\n read_image = image_to_shrink.read()\n os.remove(file.filename)\n return read_image\n else:\n return None\n\n\n@app.route('/initdb')\ndef setup_database():\n conn = psycopg2.connect(os.getenv('cartoon_database_url'))\n\n cursor = conn.cursor()\n cursor.execute('''\n CREATE TABLE public.\"ShowPickle\"\n (\n cartoon_pickle_data bytea, \n anime_pickle_data bytea\n )\n with (\n OIDS = FALSE\n );\n ''')\n conn.commit()\n\n\n@app.route('/cartoon_list', methods=['GET',])\ndef cartoon_list():\n load_database()\n cartoon_list_page = open(app.root_path + '/CartoonList.html').read()\n\n def render_Cartoon_template(node):\n node.Cartoon_Attribute.repeat(render_cartoonAtr, Parent_Object.cartoon_dict)\n\n\n\n def render_cartoonAtr(node, cartoonsection):\n\n if Parent_Object.cartoon_dict[cartoonsection].showimage is not None \\\n and Parent_Object.cartoon_dict[cartoonsection].showimage != '':\n data64 = u'data:%s;base64, %s' % (\n 'image/jpg', base64.encodebytes(Parent_Object.cartoon_dict[cartoonsection].showimage).decode('utf8'))\n else:\n data64 = None\n\n node.Cartoon_Title_Attribute.text = Parent_Object.cartoon_dict[cartoonsection].showname\n node.Cartoon_Title_Attribute.atts['href'] = Parent_Object.cartoon_dict[cartoonsection].showlink\n node.Cartoon_Edit_Attribute.atts['href'] = '/cartoon/' + str(Parent_Object.cartoon_dict[cartoonsection].id)\n node.Cartoon_Delete_Attribute.atts['href'] = '/cartoon/' + str(Parent_Object.cartoon_dict[cartoonsection].id) + '/delete'\n node.Cartoon_Logo_Attribute.atts['src'] = data64\n\n cartoon_list_template = Template(cartoon_list_page)\n return cartoon_list_template.render(render_Cartoon_template)\n\n\n@app.route('/anime_list', methods=['GET',])\ndef anime_list():\n load_database()\n anime_list_page = open(app.root_path + \"/AnimeList.html\").read()\n\n\n def render_anime_template(node):\n node.Anime_Attribute.repeat(render_animeAtr, Parent_Object.anime_dict)\n\n def render_animeAtr(node, animesection):\n if Parent_Object.anime_dict[animesection].showimage is not None \\\n and Parent_Object.anime_dict[animesection].showimage != '':\n data64 = u'data:%s;base64, %s' % (\n 'image/jpg', base64.encodebytes(Parent_Object.anime_dict[animesection].showimage).decode('utf8'))\n else:\n data64 = None\n\n node.Anime_Title_Attribute.text = Parent_Object.anime_dict[animesection].showname\n node.Anime_Title_Attribute.atts['href'] = Parent_Object.anime_dict[animesection].showlink\n node.Anime_Edit_Attribute.atts['href'] = '/anime/' + str(Parent_Object.anime_dict[animesection].id)\n node.Anime_Delete_Attribute.atts['href'] = '/anime/' +str(Parent_Object.anime_dict[animesection].id) + '/delete'\n node.Anime_Logo_Attribute.atts['src'] = data64\n\n\n anime_list_template = Template(anime_list_page)\n return anime_list_template.render(render_anime_template)\n\n\n@app.route('/')\ndef send_js(path):\n return send_from_directory('', path)\n\n\n@app.route('/cartoon/', methods=['GET',])\ndef get_cartoon(id):\n global Parent_Object\n load_database()\n\n id_as_uuid = uuid.UUID(id)\n cartoon_object_from_dictionary = Parent_Object.cartoon_dict[id_as_uuid]\n\n if cartoon_object_from_dictionary.showimage is not None\\\n and cartoon_object_from_dictionary.showimage != '':\n data64 = u'data:%s;base64, %s' % (\n 'image/jpg', base64.encodebytes(cartoon_object_from_dictionary.showimage).decode('utf8'))\n else:\n data64 = None\n\n edit_page = open('Cartoon_Edit.html').read()\n\n def render_cartoon(node, cartoon_object):\n node.ActionPathAtr.atts['action'] = '/cartoon/' + str(id_as_uuid) + '/update'\n node.ActionPathAtr.Cartoon_Link_Attribute.atts['value'] = cartoon_object.showlink\n node.ActionPathAtr.Cartoon_Title_Attribute.atts['value'] = cartoon_object.showname\n node.ActionPathAtr.DisplayImgAtr.atts['src'] = data64\n\n cartoon_template = Template(edit_page)\n return cartoon_template.render(render_cartoon, cartoon_object_from_dictionary)\n\n\n@app.route('/anime/', methods=['GET',])\ndef get_anime(id):\n global Parent_Object\n load_database()\n\n id_as_uuid = uuid.UUID(id)\n anime_object_from_dictionary = Parent_Object.anime_dict[id_as_uuid]\n\n if anime_object_from_dictionary.showimage is not None\\\n and anime_object_from_dictionary.showimage != '':\n data64 = u'data:%s;base64, %s' % (\n 'image/jpg', base64.encodebytes(anime_object_from_dictionary.showimage).decode('utf8'))\n\n else:\n data64 = None\n\n edit_page = open('Anime_Edit.html').read()\n\n def render_anime(node, anime_object):\n node.ActionPathAtr.atts['action'] = '/anime/' + str(id_as_uuid) + '/update'\n node.ActionPathAtr.Anime_Link_Attribute.atts['value'] = anime_object.showlink\n node.ActionPathAtr.Anime_Title_Attribute.atts['value'] = anime_object.showname\n node.ActionPathAtr.DisplayImgAtr.atts['src'] = data64\n\n cartoon_template = Template(edit_page)\n return cartoon_template.render(render_anime, anime_object_from_dictionary)\n\n\n@app.route('/cartoon//delete')\ndef delete_cartoon(id):\n global Parent_Object\n load_database()\n id_as_uuid = uuid.UUID(id)\n del Parent_Object.cartoon_dict[id_as_uuid]\n save_database()\n return redirect('/cartoon_list')\n\n\n@app.route('/anime//delete')\ndef delete_anime(id):\n global Parent_Object\n load_database()\n id_as_uuid = uuid.UUID(id)\n del Parent_Object.anime_dict[id_as_uuid]\n save_database()\n return redirect('/anime_list')\n\n\n@app.route('/cartoon//update', methods=['POST',])\ndef update_cartoon(id):\n global Parent_Object\n load_database()\n\n id_as_uuid = uuid.UUID(id)\n\n cartoon_to_update = Parent_Object.cartoon_dict[id_as_uuid]\n\n try:\n file = request.files['Image_Input']\n except:\n file = None\n\n cartoon_to_update.showname = request.form['Cartoon_Title_Input']\n\n if file is not None:\n cartoon_to_update.showimage = shrink_image(file)\n\n cartoon_to_update.showlink = request.form['Cartoon_Link_Input']\n\n Parent_Object.cartoon_dict[id_as_uuid] = cartoon_to_update\n save_database()\n return redirect('/')\n\n\n@app.route('/anime//update', methods=['POST',])\ndef update_anime(id):\n global Parent_Object\n load_database()\n\n id_as_uuid = uuid.UUID(id)\n\n anime_to_update = Parent_Object.anime_dict[id_as_uuid]\n\n try:\n file = request.files['Image_Input']\n except:\n file = None\n\n anime_to_update.showname = request.form['Anime_Title_Input']\n\n if file is not None:\n anime_to_update.showimage = shrink_image(file)\n anime_to_update.showlink=request.form['Anime_Link_Input']\n\n Parent_Object.anime_dict[id_as_uuid] = anime_to_update\n save_database()\n return redirect('/')\n\n\n@app.route('/')\ndef home():\n this_folder = os.path.dirname(os.path.abspath(__file__))\n home_page = os.path.join(this_folder, 'Home.html')\n with open(home_page) as home:\n list_page = open(home_page).read()\n return list_page\n\n\n@app.route('/cartoon/new/', methods=['POST',])\ndef add_cartooon():\n try:\n file = request.files['Image_Input']\n except:\n file = None\n\n # instantiate a new show object and populate it from request.form\n New_Cartoon = cartoon_show_object(\n showname=request.form['Cartoon_Title_Input'],\n showimage=shrink_image(file),\n showlink=request.form['Cartoon_Link_Input'])\n\n Parent_Object.cartoon_dict[New_Cartoon.id] = New_Cartoon\n save_database()\n return redirect('/')\n\n\n@app.route('/cartoon/new/', methods=['GET',])\ndef get_add_cartooon_form():\n\n def render_cartoon_page(node):\n node.ActionPathAtr.atts['action'] = '/cartoon/new/'\n\n this_folder = os.path.dirname(os.path.abspath(__file__))\n add_cartoon_page = os.path.join(this_folder, 'Cartoon_Edit.html')\n cartoon_template = Template(open(add_cartoon_page).read())\n return cartoon_template.render(render_cartoon_page)\n #return open(add_cartoon_page).read()\n\n\n@app.route('/anime/new/', methods=['GET',])\ndef get_add_anime_form():\n\n def render_anime_page(node):\n node.ActionPathAtr.atts['action'] = '/anime/new/'\n\n this_folder = os.path.dirname(os.path.abspath(__file__))\n add_anime_page = os.path.join(this_folder, 'Anime_Edit.html')\n anime_template = Template(open(add_anime_page).read())\n return anime_template.render(render_anime_page)\n #return open(add_anime_page).read()\n\n\n@app.route('/anime/new/', methods=['POST',])\ndef add_anime():\n try:\n file = request.files['Image_Input']\n except:\n file = None\n\n # instantiate a new show object and populate it from request.form\n New_Anime = anime_show_object(\n showname=request.form['Anime_Title_Input'],\n showimage=shrink_image(file),\n showlink=request.form['Anime_Link_Input'])\n\n Parent_Object.anime_dict[New_Anime.id] = New_Anime\n save_database()\n return redirect('/')\n\n\n@app.route('/search/', methods=['POST', ])\ndef search():\n load_database()\n search_result_list2 = {}\n\n for animekey, animeitem in Parent_Object.anime_dict.items():\n if animeitem.showname == request.form['searchbox']:\n search_result_list2[animekey] = animeitem\n\n for cartoonkey, cartoonitem in Parent_Object.cartoon_dict.items():\n if cartoonitem.showname == request.form['searchbox']:\n search_result_list2[cartoonkey] = cartoonitem\n\n this_folder = os.path.dirname(os.path.abspath(__file__))\n search_results_page = os.path.join(this_folder, 'Search_Results.html')\n\n def render_anime_template(node):\n node.Anime_Attribute.repeat(render_animeAtr, search_result_list2)\n\n def render_animeAtr(node, animesection):\n if search_result_list2[animesection].showimage is not None \\\n and search_result_list2[animesection].showimage != '':\n data64 = u'data:%s;base64, %s' % (\n 'image/jpg', base64.encodebytes(search_result_list2[animesection].showimage).decode('utf8'))\n else:\n data64 = None\n\n node.Anime_Logo_Attribute.atts['src'] = data64\n node.Anime_Title_Attribute.text = search_result_list2[animesection].showname\n node.Anime_Title_Attribute.atts['href'] = search_result_list2[animesection].showlink\n if type(search_result_list2[animesection]) == anime_show_object:\n node.Anime_Edit_Attribute.atts['href'] = '/anime/' + str(search_result_list2[animesection].id)\n elif type(search_result_list2[animesection]) == cartoon_show_object:\n node.Anime_Edit_Attribute.atts['href'] = '/cartoon/' + str(search_result_list2[animesection].id)\n\n\n search_template = Template(open(search_results_page).read())\n return search_template.render(render_anime_template)\n\n\nif __name__ == '__main__':\n load_database()\n app.run(debug=True)\n","sub_path":"Cartoon-main.py","file_name":"Cartoon-main.py","file_ext":"py","file_size_in_byte":12777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"200262894","text":"# -*- coding:utf-8 -*-\n# @Time:2021/4/13 20:20\n# @Author: explorespace\n# @Email: cyberspacecloner@qq.com\n# @File: model.py\n# software: PyCharm\n\nfrom tensorflow.keras import layers, models, Model, Sequential\n\n\ndef VGG(feature, im_height=224, im_width=224, num_classes=1000):\n # tensorflow 中 tensor 通道顺序为:N H W C\n input_image = layers.Input(shape=(im_height, im_width, 3), dtype=\"float32\")\n x = feature(input_image)\n x = layers.Flatten()(x)\n x = layers.Dropout(rate=0.5)(x)\n x = layers.Dense(2048, activation='relu')(x)\n x = layers.Dropout(rate=0.5)(x)\n x = layers.Dense(2048, activation='relu')(x)\n x = layers.Dense(num_classes)(x)\n output = layers.Softmax()(x)\n model = models.Model(inputs=input_image, outputs=output)\n return model\n\n\ndef features(cfg):\n feature_layers = []\n for v in cfg:\n if v == 'M':\n feature_layers.append(layers.MaxPool2D(pool_size=2, strides=2))\n else:\n conv2d = layers.Conv2D(v, kernel_size=3, padding='SAME', activation='relu')\n feature_layers.append(conv2d)\n return Sequential(feature_layers, name='feature')\n\n\ncfgs = {\n 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef vgg(model_name='vgg16', **kwargs):\n assert model_name in cfgs.keys(), \"not support model {}\".format(model_name)\n cfg = cfgs[model_name]\n model = VGG(features(cfg), **kwargs)\n\n return model","sub_path":"CV/tf/VGG/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"174912693","text":"\"\"\"\nContains various utility functions for PyTorch model training and saving.\n\"\"\"\nimport torch\nfrom pathlib import Path\nfrom torch.utils.tensorboard import SummaryWriter\n\ndef save_model(model: torch.nn.Module,\n target_dir: str,\n model_name: str):\n \"\"\"Saves a PyTorch model to a target directory.\n\n Args:\n model: A target PyTorch model to save.\n target_dir: A directory for saving the model to.\n model_name: A filename for the saved model. Should include\n either \".pth\" or \".pt\" as the file extension.\n\n Example usage:\n save_model(model=model_0,\n target_dir=\"models\",\n model_name=\"05_going_modular_tingvgg_model.pth\")\n \"\"\"\n # Create target directory\n target_dir_path = Path(target_dir)\n target_dir_path.mkdir(parents=True,\n exist_ok=True)\n\n # Create model save path\n assert model_name.endswith(\".pth\") or model_name.endswith(\".pt\"), \"model_name should end with '.pt' or '.pth'\"\n model_save_path = target_dir_path / model_name\n\n # Save the model state_dict()\n print(f\"[INFO] Saving model to: {model_save_path}\")\n torch.save(obj=model.state_dict(),\n f=model_save_path)\n\ndef create_writer(experiment_name: str, \n model_name: str, \n extra: str=None) -> torch.utils.tensorboard.writer.SummaryWriter():\n \"\"\"Creates a torch.utils.tensorboard.writer.SummaryWriter() instance saving to a specific log_dir.\n\n log_dir is a combination of runs/timestamp/experiment_name/model_name/extra.\n\n Where timestamp is the current date in YYYY-MM-DD format.\n\n Args:\n experiment_name (str): Name of experiment.\n model_name (str): Name of model.\n extra (str, optional): Anything extra to add to the directory. Defaults to None.\n\n Returns:\n torch.utils.tensorboard.writer.SummaryWriter(): Instance of a writer saving to log_dir.\n\n Example usage:\n # Create a writer saving to \"runs/2022-06-04/data_10_percent/effnetb2/5_epochs/\"\n writer = create_writer(experiment_name=\"data_10_percent\",\n model_name=\"effnetb2\",\n extra=\"5_epochs\")\n # The above is the same as:\n writer = SummaryWriter(log_dir=\"runs/2022-06-04/data_10_percent/effnetb2/5_epochs/\")\n \"\"\"\n from datetime import datetime\n import os\n\n # Get timestamp of current date (all experiments on certain day live in same folder)\n timestamp = datetime.now().strftime(\"%Y-%m-%d\") # returns current date in YYYY-MM-DD format\n\n if extra:\n # Create log directory path\n log_dir = os.path.join(\"runs\", timestamp, experiment_name, model_name, extra)\n else:\n log_dir = os.path.join(\"runs\", timestamp, experiment_name, model_name)\n \n print(f\"[INFO] Created SummaryWriter, saving to: {log_dir}...\")\n return SummaryWriter(log_dir=log_dir)\n","sub_path":"ds_utils/pytorch_utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"559963897","text":"import json\nimport csv\nimport re\nfrom bs4 import BeautifulSoup\nimport lxml.html\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\n\ndrugFiles = ['drug-label-0001-of-0006.json']##,'drug-label-0002-of-0006.json','drug-label-0002-of-0006.json','drug-label-0004-of-0006.json','drug-label-0005-of-0006.json','drug-label-0006-of-0006.json',]\n\t\t\nbody_system = ['Gastrointestinal','Central Nervous System', 'Gastrointestinal System','Cardiovascular System','Psychiatric and Paradoxical Reactions','Urogenital System','Skin and Appendages']\nproduct_data_array = []\nprod_dic_by_company = {}\ncompany_keys = prod_dic_by_company.keys()\nndc_keys = []\nunique_keys = {}\nmed_by_pharma = {}\ndrug_mono_array = []\ndrug_mono_dict = {}\n\nhtml_template_string = '\\nFDA Monographs\\n\\n\\n

FDA Monograph list

'\n\nclass ProductSummary:\n\tdef __init__(self,prod_items):\n\t\tself.product_items = prod_items\n\t\tself.isValid = 'NO'\n\t\tif(len(self.product_items) > 0):\n\t\t\tself.isValid = 'YES'\n\t\t\tself.productID = self.product_items[0]\n\t\t\tself.companyID = self.productID.split('_')[1]\n\t\t\tself.productNDC = self.product_items[1]\n\t\t\tself.productTypeName = self.product_items[2]\n\t\t\tself.productProprietary = self.product_items[3]\n\t\t\tself.productProprietarySuffix = self.product_items[4]\n\t\t\tself.productNonProprietaryName = self.product_items[5]\n\t\t\t##added\n\t\t\tself.productDosageFormName = self.product_items[6]\n\t\t\t##added\n\t\t\tself.productRouteName = self.product_items[7]\n\t\t\t##added\n\t\t\tself.productLabelerName= self.product_items[12]\n\t\t\tself.productStartMarketingDate = self.product_items[8]\n\t\t\tself.productEndMarketingDate = self.product_items[9]\n\t\t\tself.productMarketingCategoryName= self.product_items[10]\n\t\t\tself.productApplicationNumber = self.product_items[11]\n\t\t\tself.productSubstanceName = self.product_items[13]\n\t\t\tself.productActiveNumeratorStrength = self.product_items[14]\n\t\t\tself.productActiveIngredientUnit = self.product_items[15]\n\t\t\tself.productPharmClasses = self.product_items[16]\n\n\t\t##DEASCHEDULE\n\t\t\nclass DrugMonograph:\n\tdef __init__(self,drug_info):\n\t\t\n\t\t\n\t\tdrug_keys= drug_info.keys()\n\t\tdrug_fda = drug_info['openfda']\n\t\tself.id = drug_info['id']\n\n\t\tdrug_monograph = drug_fda\n\t\tif len(drug_keys) < 5:\n\t\t\tprint(str(drug_monograph))\n\n\t\tself.product_ndc = 'No PRODUCT NDC'\n\t\tself.rxcui = 'No RXCUI'\n\t\tself.spl_id = 'No SPL id'\n\t\tself.spl_set_id = 'No SPL set id'\t\t\n\t\tself.product_id = 'No product_id'\n\t\tself.generic_name = 'No generic'\n\t\tself.brand_name = 'No brand name'\n\t\tself.pharm_class = 'No Pharma' \n\t\tself.pharm_class_pe = 'No Pharma PE'\n\t\tself.product_type = 'No Product Type'\n\t\tself.manufacturer_name = 'No Mfr'\n\t\tself.brand_name = 'No Brand Name'\n\t\tself.route = 'No Route'\n\t\tself.precautions = 'No Precautions'\n\t\tself.how_supplied = 'No supplied'\n\t\tself.contraindications = 'No contraindications'\n\t\tself.description = 'No description'\n\t\tself.indications_usage = 'No indications usage'\n\t\tself.patientInfo = 'No patient info'\n\t\tself.warnings = 'No warnings'\n\t\tself.mechanism = 'No mechanism'\n\t\tself.pharmacology = 'No pharmacology'\n\t\tself.boxedWarning = 'No warning'\n\t\tself.pharmacokinetics = 'No pharmacokinetics'\n\t\tself.pregnancy = 'No preggers'\n\t\tself.labor_delivery = 'No labor delivery'\n\t\tself.fertility = 'No fertility'\n\t\tself.adverse_reactions = 'No adverse'\n\n\t\tself.dosage_admin = []\n\t\tself.adverse_reactions_table = []\n\t\tself.drug_interactions = []\n\t\tself.pharmacokinetics_table = []\n\t\tself.manufacturers = []\n\t\tself.routes = []\n\n\t\tif 'product_ndc' in drug_fda:\n\t\t\tself.product_ndc = drug_fda['product_ndc'][0]\n\t\tif 'rxcui' in drug_fda:\n\t\t\tself.rxcui = drug_fda['rxcui'][0]\n\t\tif 'spl_id' in drug_fda:\n\t\t\tself.spl_id = drug_fda['spl_id'][0]\n\t\tif 'spl_set_id' in drug_fda:\n\t\t\tself.spl_set_id = drug_fda['spl_set_id'][0]\n\n\n\t\tif 'generic_name' in drug_fda or 'brand_name' in drug_fda:\n\t\t\tif 'generic_name' in drug_fda:\n\t\t\t\tgeneric_string = drug_monograph['generic_name'][0]\n\t\t\t\tself.generic_name = generic_string\n\t\t\telse: \n\t\t\t\tself.generic_name = 'No Generic Name'\n\n\t\t\tif 'brand_name' in drug_fda:\n\t\t\t\tself.brand_name = drug_fda['brand_name'][0]\n\n\t\t\tif 'pharm_class_epc' in drug_fda:\n\t\t\t\tself.pharm_class = drug_fda['pharm_class_epc'][0]\n\t\t\tif 'pharm_class_pe' in drug_fda:\n\t\t\t\tself.pharm_class_pe = drug_fda['pharm_class_pe'][0]\t\n\t\t\tif 'manufacturer_name' in drug_fda:\n\t\t\t\tself.manufacturer_name = drug_fda['manufacturer_name'][0]\t\n\t\t\tif 'product_type' in drug_fda:\n\t\t\t\tself.product_type = drug_fda['product_type'][0]\n\t\t\tif 'indications_and_usage' in drug_keys:\n\t\t\t\tself.indications_usage = drug_info['indications_and_usage'][0]\n\t\t\tif 'contraindications' in drug_keys:\n\t\t\t\tself.contraindications = drug_info['contraindications'][0]\n\t\t\tif 'precautions' in drug_keys:\n\t\t\t\tself.precautions = drug_info['precautions'][0]\n\t\t\tif 'description' in drug_keys:\n\t\t\t\tself.description = drug_info['description'][0]\n\t\t\tif 'how_supplied' in drug_keys:\n\t\t\t\tself.how_supplied = drug_info['how_supplied'][0]\n\n\n\t\t\tif 'information_for_patients' in drug_keys:\n\t\t\t\tself.patientInfo = drug_info['information_for_patients'][0]\n\t\t\tif 'warnings' in drug_keys:\n\t\t\t\tself.warnings = drug_info['warnings'][0]\n\t\t\tif 'pregnancy' in drug_keys:\n\t\t\t\tself.pregnancy = drug_info['pregnancy'][0]\n\t\t\tif 'labor_and_delivery' in drug_keys:\n\t\t\t\tself.labor_delivery = drug_info['labor_and_delivery'][0]\n\t\t\tif 'carcinogenesis_and_mutagenesis_and_impairment_of_fertility' in drug_keys:\n\t\t\t\tself.fertility = drug_info['carcinogenesis_and_mutagenesis_and_impairment_of_fertility'][0]\n\t\t\tif 'boxed_warning' in drug_keys:\n\t\t\t\tself.boxedWarning = drug_info['boxed_warning'][0]\t\n\t\t\tif 'route' in drug_fda:\n\t\t\t\tself.route = drug_fda['route'][0]\n\n\t\t\tif 'package_label_principal_display_panel' in drug_keys:\n\t\t\t\tself.package_label_display = drug_info['package_label_principal_display_panel'][0]\n\t\t\tif 'mechanism_of_action' in drug_keys:\n\t\t\t\tself.mechanism = drug_info['mechanism_of_action'][0]\n\t\t\tif 'clinical_pharmacology' in drug_keys:\n\t\t\t\tself.pharmacology = drug_info['clinical_pharmacology'][0]\n\t\t\tif 'pharmacokinetics' in drug_keys:\n\t\t\t\tself.pharmacokinetics = drug_info['pharmacokinetics'][0]\n\n\t\t\tif 'adverse_reactions' in drug_keys:\n\t\t\t\tself.adverse_reactions = drug_info['adverse_reactions'][0]\n\n\t\t\t## Arrays\n\t\t\t##-----------------------------------\n\t\t\tif 'dosage_and_administration_table' in drug_keys:\n\t\t\t\tself.dosage_admin = drug_info['dosage_and_administration_table']\n\t\t\tif 'adverse_reactions_table' in drug_keys:\n\t\t\t\tself.adverse_reactions_table = drug_info['adverse_reactions_table']\n\t\t\tif 'drug_interactions' in drug_keys:\n\t\t\t\tself.drug_interactions = drug_info['drug_interactions']\n\t\t\tif 'pharmacokinetic_table' in drug_keys:\n\t\t\t\tself.pharmacokinetics_table = drug_info['pharmacokinetics_table']\n\ndef checkMultipleArrayVal(drug_dic):\n\tdict_keys = drug_dic.keys()\n\tfor key in dict_keys:\n\t\tif type(drug_dic[key]) is list:\n\t\t\tif len(drug_dic[key]) > 1:\n\t\t\t\tprint('List contains multiple items')\n\ndef regexTableData(parse_text):\n\tfor text_parse in parse_text:\n\t\ttext_clean_1 = re.sub(r'<[A-Z,a-z,0-9\\s]+\\/>','####\\n',text_parse)\n\t\ttext_clean_2 = re.sub(r'<[A-Z,a-z,0-9\\s]+ \\= [A-Z,a-z,0-9\\\"]+>','####\\n',text_clean_1)\n\t\ttext_clean_3 = re.sub(r'<[A-Z,a-z,0-9\\s\\=]+>','####\\n',text_clean_2)\n\n\t\treturn text_clean_3\n\ndef write_med_mono(outfile, drug_mono, product):\n\toutfile.write('--------------------------------------------------------------\\n')\n\toutfile.write('******' + drug_mono.boxedWarning+'\\n')\n\toutfile.write('Drug ID: ' + drug_mono.id+ ', NDC: ' + drug_mono.product_ndc + '\\n')\n\toutfile.write(drug_mono.generic_name + '(' + drug_mono.brand_name + ')' '\\n' + 'Pharm class: ' + drug_mono.pharm_class + '\\n')\n\toutfile.write(product.productLabelerName + '(' + drug_mono.manufacturer_name + ')\\n')\n\toutfile.write(product.productDosageFormName + '\\nActive: ' + product.productActiveNumeratorStrength + '(' + product.productActiveIngredientUnit + ')\\n' + 'Route: ' + product.productRouteName + '\\n')\n\toutfile.write(drug_mono.how_supplied + '\\n')\n\toutfile.write(drug_mono.description + '\\n')\n\n\tindications = drug_mono.indications_usage.split('.')\n\tfor indication in indications:\n\t\toutfile.write(indication+'**')\n\toutfile.write(drug_mono.indications_usage + '\\n')\n\tprecautions = drug_mono.precautions.split(',')\n\tfor precaution in precautions:\n\t\toutfile.write(precaution+'**')\n\n\toutfile.write(drug_mono.precautions+'\\n')\n\toutfile.write(drug_mono.contraindications+'\\n')\n\toutfile.write(drug_mono.warnings+'\\n')\n\toutfile.write(drug_mono.patientInfo+'\\n')\n\toutfile.write(drug_mono.pregnancy+'\\n')\n\toutfile.write(drug_mono.labor_delivery+'\\n')\n\t\n\tif len(drug_mono.adverse_reactions) == 0:\n\t\toutfile.write('ADVERSE: 0\\n')\n\telse:\n\t\tfor reaction in drug_mono.adverse_reactions:\n\t\t\treaction_clean = re.sub(r'<[A-Z,a-z,0-9\\s]+\\/>','####\\n',reaction)\n\t\t\treaction_clean2 = re.sub(r'<[A-Z,a-z,0-9\\s\\\"]+ \\= [A-Z,a-z,0-9\\\"\\/]*>','####\\n',reaction_clean)\n\t\t\treaction_clean3 = re.sub(r'<[A-Z,a-z,0-9\\s\\=]+>', '####\\n', reaction)\n\t\t\treaction_clean4 = re.sub(r'<\\/[A-Z,a-z,0-9]>','****\\n',reaction_clean3)\n\t\t\treaction_clean5 = re.sub(r'<[A-Z,a-z,0-9\\s]+[A-Z,a-z,0-9]+\\=\\\"[A-Z,a-z,0-9]+\\\">','#*#*#*',reaction_clean4)\n\t\t\toutfile.write('ADVERSE:\\n ' + soup.prettify())\n\n\t\t\tsoup = BeautifulSoup(reaction,'xml')\n\t\t\ttable_tags = soup.find('table')\n\t\t\ttable_caption = table_tags.find('caption')\n\t\t\tif table_caption != None:\n\t\t\t\tprint (table_caption[0])\n\t\t\ttable_rows = table_tags.find_all('tr')\n\t\t\tprint('rows: ' + str(len(table_rows)))\n\n\t\t\tfor tableRow in table_rows:\n\t\t\t\ttableItem = tableRow.find_all('td')\n\t\t\t\tfor element in tableItem:\n\t\t\t\t\tif len(element) > 0:\n\t\t\t\t\t\titem = element.contents[0]\n\t\t\t\t\t\t##print (item)\n\n\t\t\ttable_items = table_row.find_all('td')\n\t\t\tprint('table items: ' + str(len(table_items)))\n\n\tif len(drug_mono.dosage_admin) == 0:\n\t\toutfile.write('DOSE ADMIN: 0\\n')\n\telse:\n\t\tfor doseAdmin in drug_mono.dosage_admin:\n\t\t\tcleanDose = regexTableData(doseAdmin)\n\t\t\t##outfile.write('DOSE: \\n' + cleanDose+'\\n')\n\n\tif len(drug_mono.drug_interactions) == 0:\n\t\toutfile.write('INTERACTIONS : 0 \\n')\n\telse:\n\t\tfor interactions in drug_mono.drug_interactions:\n\t\t\tre.sub('<[a-z, A-Z, 0-9]+>', '!!\\n!!', interactions)\n\t\t\toutfile.write('INTERACTIONS: \\n' +interactions+'\\n')\n\t\t\ndef read_product_data():\n\twith open('product.txt',encoding='ISO-8859-1') as csvDataFile:\n\t\tcsvReader = csv.reader(csvDataFile,delimiter='\\t')\n\t\trowCount = 0\n\t\tfor row in csvReader:\n\t\t\trowCount = rowCount + 1\n\t\t\tif len(row) < 16:\n\t\t\t\tprint('Less than 16')\n\t\t\t\tprint(str(rowCount))\n\t\t\telif len(row) < 15:\n\t\t\t\tprint('Less than 15')\n\t\t\t\tprint(str(rowCount))\n\t\t\tproduct_info = ProductSummary(row)\n\t\t\tif product_info.isValid == 'YES':\n\t\t\t\tproduct_data_array.append(product_info)\n\ndef write_html_detail_monograph(generic_med_name, monograph, toc_file):\n\tif len(monograph.generic_name) < 60:\n\t\t\tgeneric_name_init = re.sub(r'\\s+','',monograph.generic_name)\n\t\t\tgeneric_name = re.sub(r'\\/','',generic_name_init)\n\t\t\tdrug_filename = generic_name + '.html'\n\t\t\ttoc_file.write('

Full monograph

\\n')\n\n\t\t\tdrug_file = open('./drugList/'+drug_filename,'w')\n\t\t\tif monograph.boxedWarning != 'No warning':\n\t\t\t\tdrug_file.write('

BOXED WARNING

\\n

' + monograph.boxedWarning)\n\t\t\t\n\t\t\tdrugName = '

' + generic_med_name + ' (' + monograph.brand_name + ')

'\n\t\t\tdrugIDinfo = '

SPL'+monograph.spl_id+'
\\nNDC:'+monograph.product_ndc+'
\\nRXCUI'+monograph.rxcui+'
'\n\t\t\tdrugPharma = '

' + monograph.pharm_class + ' -- ' + monograph.pharm_class_pe +'

\\n'\n\t\t\tdrug_file.write('

Route: ' + monograph.route + '

')\n\t\t\tdrug_file.write(drugName)\n\t\t\tdrug_file.write(drugIDinfo)\n\t\t\tdrug_file.write(drugPharma)\n\t\t\tdrug_file.write('

How supplied

\\n' + monograph.how_supplied)\n\t\t\tdrug_file.write('

Indications and Usage

\\n' + monograph.indications_usage)\n\t\t\tdrug_file.write('

Description

\\n' + monograph.description)\n\t\t\tdrug_file.write('

Contraindications

\\n' + monograph.contraindications)\n\t\t\tdrug_file.write('

Precautions

\\n' + monograph.precautions)\n\t\t\tdrug_file.write('

Warnings

\\n' + monograph.warnings)\n\n\t\t\tdrug_file.write('

Dosage Admin

')\n\t\t\tif len(monograph.dosage_admin) > 0:\n\t\t\t\tfor dose in monograph.dosage_admin:\n\t\t\t\t\tdrug_file.write(dose+'
\\n')\n\n\t\t\tdrug_file.write('

Adverse Reactions

')\n\t\t\tdrug_file.write(monograph.adverse_reactions)\n\t\t\tif len(monograph.adverse_reactions) > 0:\n\n\n\t\t\t\tgastrointestinal_re = re.compile(r'Gastrointestinal (:)*', re.UNICODE)\n\n\t\t\t\tfor adverse in monograph.adverse_reactions_table:\n\t\t\t\t\tmatch = re.search('Gastrointestinal (:)*',adverse)\n\t\t\t\t\tif match != None:\n\t\t\t\t\t\tprint (generic_name + ' ' + str(match) + \"-->\" + adverse)\n\t\t\t\t\tgastrointestinal_re.sub(r'

Gastrointestinal:
',adverse)\n\t\t\t\t\tre.sub(r'Central Nervous System:','

Central Nervous System:
',adverse)\n\t\t\t\t\tre.sub(r'Cardiovascular:','

Cardiovascular:
',adverse)\n\t\t\t\t\tre.sub(r'Skin:','

Skin:
',adverse)\n\t\t\t\t\tsubpoints = re.split(r'(\\d+\\.\\d+)',adverse)\n\t\t\t\t\tif subpoints != None:\n\t\t\t\t\t\tdrug_file.write('

\\n

    \\n')\n\t\t\t\t\t\tfor point in subpoints:\n\t\t\t\t\t\t\tdrug_file.write('
  1. ' + point + '
  2. ')\n\t\t\t\t\t\tdrug_file.write('
Gastrointestinal:
',adverse)\n\t\t\t\t\t\tre.sub(r'Central Nervous System:','

Central Nervous System:
',adverse)\n\t\t\t\t\t\tre.sub(r'Cardiovascular:','

Cardiovascular:
',adverse)\n\t\t\t\t\t\tre.sub(r'Skin:','

Skin:
',adverse)\n\t\t\t\t\t\tdrug_file.write(adverse +'
\\n')\n\n\t\t\tdrug_file.write('

Drug Interactions

')\n\t\t\tif len(monograph.drug_interactions) > 0:\n\t\t\t\tfor interaction in monograph.drug_interactions:\n\t\t\t\t\tsubpoints = re.split(r'(\\d+\\.\\d+\\s)',interaction)\n\t\t\t\t\tif subpoints != None:\n\t\t\t\t\t\tdrug_file.write('

\\n

    \\n')\n\t\t\t\t\t\tfor point in subpoints:\n\t\t\t\t\t\t\tdrug_file.write('
  • ' + point + '
  • ')\n\t\t\t\t\telse:\n\t\t\t\t\t\tdrug_file.write(interaction+'
    \\n')\n\t\t\tdrug_file.close()\n\t\t\t\t\t\ndef write_html(drug_monograph_dict):\n\n\tkeys = drug_monograph_dict.keys()\n\ttable_contents_file = open('monographs-toc.html','w')\n\ttable_contents_file.write(html_template_string)\n\n\n\tfor key in keys:\n\t\tdrug_array = drug_monograph_dict[key]\n\t\ttable_contents_file.write('

    ' + key + '

    \\n')\n\t\ttable_contents_file.write('

    -------------------------------------------------

    \\n')\n\t\tmonograph = drug_monograph_dict[key][0]\n\t\ttable_contents_file.write('

    ' + monograph.pharm_class + '

    \\n')\n\t\twrite_html_detail_monograph(key, monograph, table_contents_file)\n\n\t\tif monograph.pharm_class_pe in med_by_pharma:\n\t\t\tmonographs_by_pharma_array = med_by_pharma[monograph.pharm_class]\n\t\t\tmonographs_by_pharma_array.append(monograph)\n\t\telse:\n\t\t\tmonographs_by_pharma_array = []\n\t\t\tmonographs_by_pharma_array.append(monograph)\n\t\t\tmed_by_pharma[monograph.pharm_class] = monographs_by_pharma_array\n\n\ttable_contents_file.write('\\n')\n\ttable_contents_file.close()\n\ndef write_html_by_pharma_class(medication_by_pharmaceutical_class_dict):\n\tpharmaMedOutfile = open('MonographsByPharma.html','w')\n\tpharmaMedOutfile.write(html_template_string)\n\n\tmed_by_pharma_keys = medication_by_pharmaceutical_class_dict.keys()\n\n\tfor pharma_key in med_by_pharma_keys:\n\t\tmed_array = med_by_pharma[pharma_key]\n\t\tpharmaMedOutfile.write('

    '+ pharma_key + '

    \\n')\n\t\tfor med in med_array:\n\t\t\tpharmaMedOutfile.write('

    ' + med.generic_name + ' (' + med.brand_name + ')

    \\n')\n\n\tpharmaMedOutfile.write('\\n')\n\tpharmaMedOutfile.close()\n\ndef create_monographs():\n\toutfile= open('monographs.txt','w')\n\toutfile2 = open('monographs2.txt','w')\n\toutfile3 = open('monographs3.txt','w')\n\toutfile4 = open('monographs4.txt','w')\n\toutfile5 = open('monographs5.txt','w')\n\toutfile6 = open('monographs6.txt','w')\n\n\tfor drugSource in drugFiles:\n\t\twith open(drugSource) as json_file:\n\t\t\tprint ('OPEN FILE')\n\t\t\tdata = json.load(json_file)\n\t\t\tcounter = 0;\n\t\t\t\n\t\t\tfor display_name in data['results']:\n\t\t\t\tdrug_mono = DrugMonograph(display_name)\n\t\t\t\tgeneric = drug_mono.generic_name\n\t\t\t\tcounter = counter + 1\n\t\t\t\tif counter > 3000 and counter < 6000:\n\t\t\t\t\toutfile = outfile2\n\t\t\t\telif counter > 5999 and counter < 9000:\n\t\t\t\t\toutfile = outfile3\n\t\t\t\telif counter > 8999 and counter < 12000:\n\t\t\t\t\toutfile = outfile4\n\t\t\t\telif counter > 11999 and counter < 15000:\n\t\t\t\t\toutfile = outfile5\n\t\t\t\telif counter > 15000:\n\t\t\t\t\toutfile = outfile6\n\t\t\t\t\n\t\t\t\tif drug_mono.product_type == 'HUMAN PRESCRIPTION DRUG':\n\n\t\t\t\t\tdrug_keys = display_name.keys()\n\t\t\t\t\tfor drug_key in drug_keys:\n\t\t\t\t\t\tif drug_key in unique_keys:\n\t\t\t\t\t\t\tdrug_key_count = int(unique_keys[drug_key])\n\t\t\t\t\t\t\tdrug_key_count = drug_key_count + 1\n\t\t\t\t\t\t\tunique_keys[drug_key] = str(drug_key_count)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tunique_keys[drug_key] = 1\n\n\n\t\t\t\t\tif generic in drug_mono_dict:\n\t\t\t\t\t\tdrug_array = drug_mono_dict[generic]\n\t\t\t\t\t\tdrug_array.append(drug_mono)\n\t\t\t\t\t\tfor product in product_data_array:\n\t\t\t\t\t\t\tif product.companyID == drug_mono_id:\n\t\t\t\t\t\t\t\tdrug_mono.productID= product.productID\n\t\t\t\t\telse:\n\t\t\t\t\t\tcounter = counter + 1\n\t\t\t\t\t\tdrug_array = []\n\t\t\t\t\t\tdrug_array.append(drug_mono)\n\t\t\t\t\t\tdrug_mono_dict[generic] = drug_array\n\t\t\t\t\t\tif drug_mono.pharm_class =='No Pharma' and drug_mono.pharm_class_pe == 'No Pharma PE':\n\t\t\t\t\t\t\tdrug_mono_id = drug_mono.id\n\t\t\t\t\t\t\tfor product in product_data_array:\n\t\t\t\t\t\t\t\tif product.companyID == drug_mono_id:\n\t\t\t\t\t\t\t\t\tdrug_mono.productID= product.productID\n\t\t\t\t\t\t\t\t\tdrug_mono.pharm_class = product.productPharmClasses\n\t\t\t\t\t\t\t\t\t##write_med_mono(outfile,drug_mono, product)\n\t\t\t\t\t\t\t\t\tdrug_mono_array.append(drug_mono)\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\n\n\n\t\t\twrite_html(drug_mono_dict)\n\t\t\t\n\toutfile.close()\n\toutfile2.close()\n\toutfile3.close()\n\toutfile4.close()\n\toutfile5.close()\n\toutfile6.close()\n\n\t##write_html_by_pharma_class(med_by_pharma)\n\nread_product_data()\ncreate_monographs()\t\n\n\n\n\t##for product in product_data_array:\n\t##\tndc = product.productNDC\n\t##\tndc_list = ndc.split('-')\n\t##\tndc_company = ndc_list[0]\n\t##\tif ndc_company in prod_dic_by_company:\n\t##\t\tcompany_drug_array = prod_dic_by_company[ndc_company]\n\t##\t\tcompany_drug_array.append(product)\n\t##\telse:\n\t##\t\tnew_array = []\n\t##\t\tnew_array.append(product)\n\t##\t\tprod_dic_by_company[ndc_company] = new_array\n\t\n\t##for key in company_keys:\n\t##\tprint(key)\n\t##\tproductInfoList = prod_dic_by_company[key]\n\t\t##for product in productInfoList:\n\t\t##\tprint (product.productProprietary + ', ' + product.productLabelerName + ', '+ product.productDosageFormName + ', ' + product.productRouteName)\n\t\t\n##spl_unclassified_section\n##spl_product_data_elements\n##set_id\n##overdosage\n##laboratory_tests\n##effective_time\n##clinical_studies\n##geriatric_use\n##nursing_mothers\n##do_not_use\n##active_ingredient\n##inactive_ingredient\n##keep_out_of_reach_of_children\n\n\n##id: \t\t\t\t\t\t33004\t\tadded\n##set_id: \t\t\t\t\t33004\n##version: \t\t\t\t\t33004\n##effective_time: \t\t\t\t33004\n##openfda: \t\t\t\t\t33004\n##spl_product_data_elements: \t\t33004\n\n##package_label_principal_display_panel: \t32971\t\tadded\n##adverse_reactions: \t\t\t30060\t\tadded\n##description: \t\t\t\t30376\t\tadded\n##dosage_and_administration: \t\t30772\t\tadded\n##indications_and_usage: \t\t\t30878\t\tadded\n##information_for_patients: \t\t\t21655\t\tadded\n##clinical_pharmacology: \t\t\t29625\n##contraindications: \t\t\t\t29997\t\tadded\n##how_supplied: \t\t\t\t29644\n\n##overdosage: \t\t\t\t27385\n##pregnancy: \t\t\t\t23114\n##pediatric_use: \t\t\t\t23293\n##drug_interactions: \t\t\t\t22222\n##nursing_mothers: \t\t\t\t21985\n##carcinogenesis_and_mutagenesis_and_impairment_of_fertility: 21224\n##geriatric_use: \t\t\t\t19256\n##precautions: \t\t\t\t19614\n##warnings:\t\t\t\t\t18855\n##spl_unclassified_section: \t\t\t16868\n##pharmacokinetics: \t\t\t\t15952\n##adverse_reactions_table: \t\t\t14267\n##mechanism_of_action: \t\t\t12164\n##general_precautions: \t\t\t11385\n##boxed_warning: \t\t\t\t11231\n##use_in_specific_populations: \t\t10744\n##nonclinical_toxicology: \t\t\t10128\n##dosage_forms_and_strengths: \t\t10256\n##clinical_pharmacology_table: \t\t10056\n##warnings_and_cautions: \t\t\t10801\n##clinical_studies_table: \t\t\t8497\n##pharmacodynamics: \t\t\t9317\n##storage_and_handling: \t\t\t8699\n##dosage_and_administration_table: \t8620\n##teratogenic_effects: \t\t\t8084\n##laboratory_tests: \t\t\t\t6942\n##drug_abuse_and_dependence: \t\t6601\n##labor_and_delivery: \t\t\t6645\n##spl_medguide: \t\t\t\t6598\n##recent_major_changes: \t\t\t5524\n##references: \t\t\t\t5677\n##how_supplied_table:\t\t\t5206\n##pharmacokinetics_table: \t\t\t5355\n##spl_patient_package_insert: \t\t3919\n##drug_and_or_laboratory_test_interactions: 3823\n##animal_pharmacology_and_or_toxicology: 3969\n##nonteratogenic_effects: \t\t\t3208\n##abuse: \t\t\t\t\t2335\n##spl_medguide_table: \t\t\t2852\n##controlled_substance: \t\t\t2581\n##warnings_and_cautions_table: \t\t2190\n##drug_interactions_table: \t\t\t2147\n##dependence: \t\t\t\t2174\n##warnings_table: \t\t\t\t2098\n##microbiology: \t\t\t\t2051\n##spl_unclassified_section_table: \t\t1562\n##recent_major_changes_table: \t\t1468\n##description_table: \t\t\t\t1496\n##inactive_ingredient: \t\t\t1149\n##spl_patient_package_insert_table: \t1107\n##microbiology_table: \t\t\t887\n##pharmacodynamics_table: \t\t858\n##precautions_table: \t\t\t\t797\n##indications_and_usage_table:\t\t725\n##instructions_for_use: \t\t\t609\n##use_in_specific_populations_table: \t600\n##active_ingredient: 551\n##\tkeep_out_of_reach_of_children: \t503\n##\tinformation_for_patients_table: \t455\n##\tpediatric_use_table: \t\t\t413\n##\tdosage_forms_and_strengths_table: 380\n##\tpatient_medication_information: \t366\n##\tquestions: \t\t\t\t363\n##\tpurpose: \t\t\t\t314\n##\tpregnancy_or_breast_feeding: \t258\n##\tinstructions_for_use_table: \t\t234\n##\tgeneral_precautions_table: \t\t172\n##\tgeriatric_use_table: \t\t\t161\n##\tother_safety_information: \t\t123\n##\tstop_use: \t\t\t\t123\n##\tdo_not_use: \t\t\t\t120\n##\tnonclinical_toxicology_table: \t117\n##\trisks: \t\t\t\t\t102\n\n##\tpackage_label_principal_display_panel_table: 96\n##\tcontraindications_table: 95\n##\tcarcinogenesis_and_mutagenesis_and_impairment_of_fertility_table: 85\n##\twhen_using: 82\n##\task_doctor: 71\n##\toverdosage_table: 70\n##\tstorage_and_handling_table: 64\n##\tpatient_medication_information_table: 63\n##\tpharmacogenomics: 57\n##\tsafe_handling_warning: 47\n##\tboxed_warning_table: 41\n##\tmechanism_of_action_table: 35\n##\tlaboratory_tests_table: 27\n##\tcomponents: 25\n##\treferences_table: 23\n##\tdisposal_and_waste_handling: 19\n##\tinformation_for_owners_or_caregivers: 19\n##\tdrug_abuse_and_dependence_table: 18\n##\thealth_care_provider_letter: 17\n##\tactive_ingredient_table: 17\n##\tpregnancy_table: 16\n##\task_doctor_or_pharmacist: 15\n##\ttroubleshooting: 14\n##\tpurpose_table: 12\n##\tuser_safety_warnings: 12\n##\thealth_care_provider_letter_table: 12\n##\tdrug_and_or_laboratory_test_interactions_table: 12\n##\tnonteratogenic_effects_table: 10\n##\tabuse_table: 8\n##\tveterinary_indications: 8\n##\tinactive_ingredient_table: 8\n##\tanimal_pharmacology_and_or_toxicology_table: 7\n##\tstatement_of_identity: 6\n##\tspl_indexing_data_elements: 3\n##\tinformation_for_owners_or_caregivers_table: 3\n##\tcleaning: 2\n##\twhen_using_table: 2\n##\tintended_use_of_the_device: 2\n##\tteratogenic_effects_table: 1\n##\tstatement_of_identity_table: 1\n##\tsafe_handling_warning_table: 1\n##\thealth_claim: 1\n##\trisks_table: 1\n##\tsummary_of_safety_and_effectiveness: 1\n##\tother_safety_information_table: 1\n##\tcalibration_instructions: 1\n##\tdependence_table: 1\n##\tenvironmental_warning: 1\n\n\n\n\n\n\n\n\t\t\t\t\n\t\n","sub_path":"fda2.py","file_name":"fda2.py","file_ext":"py","file_size_in_byte":23083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"364460524","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 4 15:34:35 2020\n\n@author: pi\n\n\"\"\"\n\nimport pdb\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport findiff as fd\n\nimport smbus\nimport spidev\nimport time\nimport sys\nimport board\nimport busio\nimport RPi.GPIO as GPIO\n\nfrom datetime import timedelta,datetime\nfrom scipy.signal import lfilter, firwin, freqz\nfrom scipy import zeros, signal, random\nfrom digitalio import Direction, Pull\nfrom adafruit_mcp230xx.mcp23017 import MCP23017\nfrom CustomizedLibrary import adafruit_vl6180x\nfrom CustomizedLibrary.ErrorClassDefinition import PeristalsisThresholdError,YesNoError,SpeedError,SpeedMismatchError\n\n# Clear shell\ndef cls():\n print('\\n'*50)\n#clear Console \ncls()\n\nplt.style.use('dark_background')\nplt.rcParams['figure.figsize']=(4.0,2.5 )\nplt.rcParams['figure.dpi']=300\nplt.rcParams['grid.linewidth']=0.5\n\n\nclass RoSE_actuation_protocol():\n \n def __init__(self,RPi_Cobbler_Pin_No=18,\n SPI_args=((0,0),5000000,0,False),\n I2C_args=(1,0x65),\n UseIOExpander=False,\n UseADC=False\n ):\n # GPIO setup\n self.RPi_Cobbler_Pin_No= RPi_Cobbler_Pin_No\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(RPi_Cobbler_Pin_No,GPIO.OUT)\n \n # SPI setup\n a,b,c,d=SPI_args\n a1,a2=a\n self.spi = spidev.SpiDev()\n self.spi.open(a1,a2)\n self.spi.max_speed_hz=b\n self.spi.mode=c\n #self.spi.cshigh=d\n\n # Flag setup\n self.Flag_UseIOExpander=None\n self.Flag_UseADC=None\n \n \n if UseIOExpander is True:\n self.Flag_UseIOExpander=True\n #Call IOExpander initialisation and TOF address change function\n self.InitializeIOExpanderAndChangeTOFAddress((0x40,0x41,0x42,0x43,0x44,\\\n 0x45,0x46,0x47,0x48,0x49))\n else:\n self.Flag_UseIOExpander=False\n\n if UseADC is True:\n self.Flag_UseADC=True\n \n #I2C setup for ADC\n # Define Smbus Configuration for ADC MAXIM 11605\n e,f=I2C_args\n ADC_BUS=e\n self.ADC_ADDRESS=f #7 bit address (will be left shifted to add the read write bit)\n #SEtup SMBus access\n self.ADCbus = smbus.SMBus(ADC_BUS) # 0 = /dev/i2c-0 (port I2C0), 1 = /dev/i2c-1 (port I2C1)\n else:\n self.Flag_UseADC=False\n\n def PeristalsisFileRead(self,Peristalsis_filepath='PeristalsisData/40mmat20mmps_Dips.csv' ):\n # Assign the spreadsheet filename to string filename\n #Load the spreadsheet\n df=pd.read_csv(Peristalsis_filepath)\n #convert dataframe array to int64 array\n df=np.array(df)\n #flip the matrix about the columns\n dfFlipped=np.flip(df,1)\n self.dfFlipped=dfFlipped\n \n return dfFlipped\n #size_df=self.df.shape\n\n # Define a function to command the ADC MAX11605\n def GeneratePressureDataFromADC(self,NoOfADC=12):\n\n if self.Flag_UseADC is True:\n \n #With RPi GPIO pin reset the IOExpander\n #GPIO.output(self.RPi_Cobbler_Pin_No,GPIO.LOW)\n\n \n #This reads a single byte from a device, from a designated register.\n #The register is specified through the Comm byte.\n #S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P\n #List comprehension to generate Data from all the ADCs at once\n\n #Setup Byte\n #REG=1, SEL2=1,SEL1=SEL0=0,CLK=0 (internal),BIP=0,RSTbar=1,X=0\n## SETUP_BYTE=1<<7|4<<4|2\n## self.ADCbus.write_byte_data(self.ADC_ADDRESS,SETUP_BYTE)\n \n #Configuration byte\n #REG=0, SCAN0=SCAN1=1,CS0to 3=ADC select, SGL=1\n## i2c_adc = busio.I2C(board.SCL, board.SDA)\n## adafruit_vl6180x.VL6180X(i2c_adc,\n## address=self.ADC_ADDRESS\n## )\n## self._device = i2c_device.I2CDevice(i2c_adc, self.ADC_ADDRESS)\n\n pressure_kpa_array=np.array([self.ADCbus.read_byte_data(self.ADC_ADDRESS,3<<5|i<<1|1) for i in range(NoOfADC)])\n## self.ADCbus.write_byte_data(self.ADC_ADDRESS,SETUP_BYTE)\n \n return pressure_kpa_array\n \n elif self.Flag_UseADC is None:\n print(\"Please initialize the ADC when calling the constructor\")\n \n def InitializeIOExpanderAndChangeTOFAddress(self,\n *VL6180X_NEW_I2C_ADDR):\n #pdb.set_trace()\n \n \n #With RPi GPIO pin reset the IOExpander\n GPIO.output(self.RPi_Cobbler_Pin_No,GPIO.LOW)\n time.sleep(1)\n GPIO.output(self.RPi_Cobbler_Pin_No,GPIO.HIGH)\n\n '''\n # Initialize the MCP23017 chip on the bonnet\n # Optionally change the address of the device if you set any of the A0, A1, A2\n # pins. Specify the new address with a keyword parameter:\n #mcp = MCP23017(i2c, address=0x21) # MCP23017 w/ A0 set\n '''\n time.sleep(1)\n \n # Initialize the I2C bus:\n i2c = busio.I2C(board.SCL, board.SDA)\n mcp = MCP23017(i2c)\n No_MCP23017_pins=12\n port_ab_pins = [mcp.get_pin(pin) for pin in range(0, No_MCP23017_pins)]\n \n \n \n # Set all the port A pins to output\n for pin in port_ab_pins:\n pin.direction = Direction.OUTPUT\n \n \n # If needed, define new addresses for the i2c as a tuple and Create sensor instance.\n VL6180X_NEW_I2C_ADDR=VL6180X_NEW_I2C_ADDR[0]\n \n Num_of_TOF=len(VL6180X_NEW_I2C_ADDR)\n port_ab_pins[0].value = False\n port_ab_pins[1].value = False\n port_ab_pins[2].value = False\n port_ab_pins[3].value = False\n port_ab_pins[4].value = False\n port_ab_pins[5].value = False \n port_ab_pins[6].value = False\n port_ab_pins[7].value = False\n port_ab_pins[8].value = False\n port_ab_pins[9].value = False\n\n \n TOF_sensor=[]\n\n \n for ii in range (0,Num_of_TOF):\n port_ab_pins[ii].value = True\n TOF_sensor.append(adafruit_vl6180x.VL6180X(i2c,\n new_address=VL6180X_NEW_I2C_ADDR[ii]\n )\n )\n time.sleep(0.5)\n self.TOF_sensor=TOF_sensor\n#==============================================================================\n # Define a function to generate timestamp \n#============================================================================== \n\n def GenerateTimestamp(self,t1):\n \n t=datetime.time(datetime.now())\n t2 = timedelta(minutes = t.minute, seconds = t.second, microseconds=t.microsecond)\n t3 = t2 - t1\n return t3.total_seconds()\n#==============================================================================\n # Define a function to command the TOF VL 6180x \n#==============================================================================\n def GenerateDisplacementDataFromTOF(self,t1): \n return np.array([self.GenerateTimestamp(t1),\n self.TOF_sensor[0].range,\n #self.TOF_sensor[0].range,self.TOF_sensor[0].range,self.TOF_sensor[0].range,self.TOF_sensor[0].range,\n self.TOF_sensor[1].range,\n #self.TOF_sensor[1].range,self.TOF_sensor[1].range,self.TOF_sensor[1].range,self.TOF_sensor[1].range,\n self.TOF_sensor[2].range,\n #self.TOF_sensor[2].range,self.TOF_sensor[2].range,self.TOF_sensor[2].range,self.TOF_sensor[2].range,\n self.TOF_sensor[3].range,\n #self.TOF_sensor[3].range,self.TOF_sensor[3].range,self.TOF_sensor[3].range,self.TOF_sensor[3].range,\n self.TOF_sensor[4].range,\n #self.TOF_sensor[4].range,self.TOF_sensor[4].range,self.TOF_sensor[4].range,self.TOF_sensor[4].range,\n self.TOF_sensor[5].range,\n #self.TOF_sensor[5].range,self.TOF_sensor[5].range,self.TOF_sensor[5].range,self.TOF_sensor[5].range,\n self.TOF_sensor[6].range,\n #self.TOF_sensor[6].range,self.TOF_sensor[6].range,self.TOF_sensor[6].range,self.TOF_sensor[6].range,\n self.TOF_sensor[7].range,\n #self.TOF_sensor[7].range,self.TOF_sensor[7].range,self.TOF_sensor[7].range,self.TOF_sensor[7].range,\n self.TOF_sensor[8].range,\n #self.TOF_sensor[8].range,self.TOF_sensor[8].range,self.TOF_sensor[8].range,self.TOF_sensor[8].range,\n self.TOF_sensor[9].range])\n #self.TOF_sensor[9].range,self.TOF_sensor[9].range,self.TOF_sensor[9].range,self.TOF_sensor[9].range]\n \n\n \n def mergeDACadd2DataAndSend(self,dfMatrix,rowIndex,threshold,ScalingFact=0,size=12,Remove=40):\n #Loop through the columns of jth row of df\n \n #raise ValueError \n data12BitArray=np.zeros([size,1])\n temp=dfMatrix[rowIndex,:]\n if any(dfMatrix[rowIndex,:]-Remove)<0:\n temp[(dfMatrix[rowIndex,:]-Remove)<0]=ScalingFact*0+threshold\n else:\n temp=ScalingFact*((dfMatrix[rowIndex,:]-Remove))+threshold\n #==============================================================================\n # # If any data value is greater than cut_off then we are passing (0x00ff & 296)=40 to DAC\n #==============================================================================\n cut_off=250\n #converting dac address and data to unsigned 8 bit\n data8Bit=0x00ff & 296 #=40\n DAC_DATA=int(data8Bit)\n #==============================================================================\n # #Write the serial data to the ith dac\n #==============================================================================\n #resp=[self.spi.writebytes([ii,DAC_DATA]) for ii in range(0,size) if dfMatrix[rowIndex,ii]] \n if any(dfMatrix[rowIndex,:]>cut_off) is True:\n resp=[self.spi.writebytes([ii,DAC_DATA]) for ii in range(0,size) if dfMatrix[rowIndex,ii]] \n else:\n data8Bit=(0x00ff & temp.astype(np.uint8))\n \n #converting dac address and data to unsigned 8 bit\n DAC_DATA=data8Bit.astype(np.uint8)\n data12BitArray=np.array([0,1,2,3,4,5,6,7,8,9,10,11],dtype=np.int16)\n #Merge the address of Dac with the 8 bit serial data to form 12 bit \n data12BitArray=data12BitArray<<8|data8Bit\n #Write the serial data to the ith dac\n resp = self.spi.writebytes([int(0),int(DAC_DATA[0,0])])\n resp = self.spi.writebytes([int(1),int(DAC_DATA[0,1])])\n resp = self.spi.writebytes([int(2),int(DAC_DATA[0,2])])\n resp = self.spi.writebytes([int(3),int(DAC_DATA[0,3])]) \n resp = self.spi.writebytes([int(4),int(DAC_DATA[0,4])])\n resp = self.spi.writebytes([int(5),int(DAC_DATA[0,5])])\n resp = self.spi.writebytes([int(6),int(DAC_DATA[0,6])])\n resp = self.spi.writebytes([int(7),int(DAC_DATA[0,7])])\n resp = self.spi.writebytes([int(8),int(DAC_DATA[0,8])])\n resp = self.spi.writebytes([int(9),int(DAC_DATA[0,9])])\n resp = self.spi.writebytes([int(10),int(DAC_DATA[0,10])])\n resp = self.spi.writebytes([int(11),int(DAC_DATA[0,11])])\n #pdb.set_trace()\n return DAC_DATA\n#==============================================================================\n# \n#==============================================================================\n def StringCheck(self):\n\n while True:\n try:\n startActuation =input(\"Do you want to start actuation? (y/n) \")\n if startActuation!='y':\n raise YesNoError('Input expects y or n')\n break\n except YesNoError:\n print('Input expects y or n, please try again...') \n return startActuation\n#==============================================================================\n# \n#==============================================================================\n def SpeedCheck(self,filename):\n\n while True:\n try:\n SpeedOfWave=int(input(\"Enter integer value of wave speed in mm/s (20,30,40): \"))\n SpeedList=[20,30,40]\n if SpeedOfWave in SpeedList:\n tempString=str(SpeedOfWave)\n \n if tempString in filename:\n index=SpeedList.index(SpeedOfWave)\n NumSamplesList=[10,6.5,3.0]\n SamplingTime=15/(SpeedOfWave*NumSamplesList[index])\n break\n else:\n raise SpeedMismatchError(\"The wave speed entered and specified in the peristalsis data file name does not match. \")\n \n else:\n raise SpeedError('Input expects 20mm/s, 30mm/s or 40mm/s')\n except (ValueError ,SpeedError):\n print('Input expects integer, and 20mm/s, 30mm/s or 40mm/s')\n except SpeedMismatchError:\n print('The wave speed entered and specified in the peristalsis data file name does not match. Please try again...')\n \n SpeedOfWaveChecked=SpeedOfWave\n return SpeedOfWaveChecked, SamplingTime\n\n#==============================================================================\n# \n#==============================================================================\n @staticmethod\n def Data_Saving(SavingData, filename,ScalingFactor,DataFrom='TOF'):\n\n if DataFrom=='TOF':\n filenameForSaving=filename[16:]\n \n ColumnName=('time (ms)',\n 'TOF1 mean displacement (mm)',\n 'TOF2 mean displacement (mm)',\n ## 'Sample 1_3 (mm)',\n ## 'Sample 1_4 (mm)',\n ## 'Sample 1_5 (mm)',\n ## 'Sample 2_1 (mm)',\n ## 'Sample 2_2 (mm)',\n ## 'Sample 2_3 (mm)',\n ## 'Sample 2_4 (mm)',\n ## 'Sample 2_5 (mm)'\n )\n \n dfTOF=pd.DataFrame(SavingData,columns=ColumnName)\n filename2=\"CsvData18_03_2020/QSR_2Layer_\"+DataFrom+\"_test_\"+str(ScalingFactor)+\"_\"+\\\n filenameForSaving[0:4]+\"_\"+filenameForSaving[6:12]+\\\n \".csv\"\n dfTOF.to_csv(filename2)\n\n elif DataFrom=='OnlineFilter':\n\n filenameForSaving=filename[16:]\n \n ColumnName=('time (ms)',\n 'OnlineFilter1 displacement (mm)',\n 'OnlineFilter2 displacement (mm)',\n )\n \n dfOnlineFilter=pd.DataFrame(SavingData,columns=ColumnName)\n filename2=\"CsvData18_06_2020/QSR_2Layer_\"+DataFrom+\"_test_PDMS_sealing\"+str(ScalingFactor)+\"_\"+\\\n filenameForSaving[0:4]+\"_\"+filenameForSaving[6:12]\n dfOnlineFilter.to_csv(filename2)\n\n elif DataFrom=='TOFADCandPer':\n\n filenameForSaving=filename[16:]\n dfTOFADCandPer=pd.DataFrame(SavingData)\n filename2=\"CsvData29_09_2020/RoSEv2pt0_\"+DataFrom+\"_test_\"+str(ScalingFactor)+\"_\"+\\\n filenameForSaving[0:]\n #+filenameForSaving[6:12]+\\\n dfTOFADCandPer.to_csv(filename2)\n\n @staticmethod\n def TOF_Plottting(fig, PlottingData,PlottingDataFiltered,NumOfDataPts=[],defSize=4):\n\n if NumOfDataPts==[]:\n NumOfDataPts=PlottingData.shape[0]\n \n ax1 = fig.add_subplot(611)\n\n InPoint=200\n\n ax1.plot(\n #PlottingData[InPoint:NumOfDataPts,0],\n PlottingData[InPoint:NumOfDataPts,1],\n color='red',\n alpha=1,\n linewidth=1.5,\n linestyle='--',\n label='TOF1',\n #marker='o',\n #ms=1,\n #mec='white'\n )\n ax1.plot(\n #PlottingDataFiltered[InPoint:NumOfDataPts,0],\n PlottingDataFiltered[InPoint:NumOfDataPts,1],\n color='blue',\n alpha=1,\n linewidth=1.5,\n #label='TOF1_filtered',\n linestyle='-',\n #marker='o',\n #ms=1,\n #mec='white'\n )\n\n ax2 = fig.add_subplot(612)\n\n ax2.plot(\n #PlottingData[InPoint:NumOfDataPts,0],\n PlottingData[InPoint:NumOfDataPts,2],\n color='red',\n alpha=1,\n label='TOF2',\n #marker='*',\n #ms=1,\n #mec='white'\n )\n \n ax2.plot(\n #PlottingDataFiltered[InPoint:NumOfDataPts,0],\n PlottingDataFiltered[InPoint:NumOfDataPts,2],\n color='blue',\n alpha=1,\n #label='TOF2_filtered',\n #marker='o',\n #ms=1,\n #mec='white'\n )\n ax3 = fig.add_subplot(613)\n \n ax3.plot(\n #PlottingData[InPoint:NumOfDataPts,0],\n PlottingData[InPoint:NumOfDataPts,3],\n color='red',\n alpha=1,\n label='TOF2',\n #marker='*',\n #ms=1,\n #mec='white'\n )\n \n ax3.plot(\n #PlottingDataFiltered[InPoint:NumOfDataPts,0],\n PlottingDataFiltered[InPoint:NumOfDataPts,3],\n color='blue',\n alpha=1,\n #label='TOF2_filtered',\n #marker='o',\n #ms=1,\n #mec='white'\n )\n ax4 = fig.add_subplot(614)\n\n ax4.plot(\n #PlottingData[InPoint:NumOfDataPts,0],\n PlottingData[InPoint:NumOfDataPts,4],\n color='red',\n alpha=1,\n label='TOF2',\n #marker='*',\n #ms=1,\n #mec='white'\n )\n \n ax4.plot(\n #PlottingDataFiltered[InPoint:NumOfDataPts,0],\n PlottingDataFiltered[InPoint:NumOfDataPts,4],\n color='blue',\n alpha=1,\n #label='TOF2_filtered',\n #marker='o',\n #ms=1,\n #mec='white'\n )\n ax5 = fig.add_subplot(615)\n\n ax5.plot(\n #PlottingData[InPoint:NumOfDataPts,0],\n PlottingData[InPoint:NumOfDataPts,5],\n color='red',\n alpha=1,\n label='TOF2',\n #marker='*',\n #ms=1,\n #mec='white'\n )\n \n ax5.plot(\n #PlottingDataFiltered[InPoint:NumOfDataPts,0],\n PlottingDataFiltered[InPoint:NumOfDataPts,5],\n color='blue',\n alpha=1,\n #label='TOF2_filtered',\n #marker='o',\n #ms=1,\n #mec='white'\n )\n ax6 = fig.add_subplot(616)\n\n ax6.plot(\n #PlottingData[InPoint:NumOfDataPts,0],\n PlottingData[InPoint:NumOfDataPts,6],\n color='red',\n alpha=1,\n label='TOF2',\n #marker='*',\n #ms=1,\n #mec='white'\n )\n \n ax6.plot(\n #PlottingDataFiltered[InPoint:NumOfDataPts,0],\n PlottingDataFiltered[InPoint:NumOfDataPts,6],\n color='blue',\n alpha=1,\n #label='TOF2_filtered',\n #marker='o',\n #ms=1,\n #mec='white'\n )\n \n ax1.set_ylabel('Displacement (mm)',fontsize=defSize)\n ax1.set_xlabel('Samples',fontsize=defSize)\n ax2.set_ylabel('Displacement (mm)',fontsize=defSize)\n ax2.set_xlabel('Samples',fontsize=defSize)\n \n\n## ax1.set_ylim([27,34])\n## ax2.set_ylim([40,45])\n\n ax1.legend(loc='upper right',fontsize=defSize)\n handles, labels = ax1.get_legend_handles_labels()\n\n ax2.legend(loc='upper right',fontsize=defSize)\n handles, labels = ax2.get_legend_handles_labels()\n \n plt.show()\n\n \n \n @staticmethod\n def Filter_RealTime_design(numtaps=100,cutoff=0.005):\n # taps=100 worked good for Peristalsis_Staircase_60_100_130_20mmps.csv\n\n b = signal.firwin(numtaps, cutoff)\n z = signal.lfilter_zi(b, 1)\n \n return b,z\n\n @staticmethod\n def Filter_RealTime_apply(actual_data,b,z,size_data=[],n=10):\n\n\n if n==1:\n #pdb.set_trace()\n return actual_data, z\n else:\n if size_data==[]:\n filtered_data = zeros((1,\n actual_data.shape[0]))\n filtered_data[0,0],z1 = signal.lfilter(b, 1, [actual_data[0]], zi=z[0])\n filtered_data[0,1],z2 = signal.lfilter(b, 1, [actual_data[1]], zi=z[1])\n filtered_data[0,2],z3 = signal.lfilter(b, 1, [actual_data[2]], zi=z[2])\n filtered_data[0,3],z4 = signal.lfilter(b, 1, [actual_data[3]], zi=z[3])\n filtered_data[0,4],z5 = signal.lfilter(b, 1, [actual_data[4]], zi=z[4])\n filtered_data[0,5],z6 = signal.lfilter(b, 1, [actual_data[5]], zi=z[5])\n filtered_data[0,6],z7 = signal.lfilter(b, 1, [actual_data[6]], zi=z[6])\n filtered_data[0,7],z8 = signal.lfilter(b, 1, [actual_data[7]], zi=z[7])\n filtered_data[0,8],z9 = signal.lfilter(b, 1, [actual_data[8]], zi=z[8])\n filtered_data[0,9],z10 = signal.lfilter(b, 1, [actual_data[9]], zi=z[9])\n #print('\\n filtering',filtered_data[0,0])\n\n filtered_data, z=RoSE_actuation_protocol.Filter_RealTime_apply(filtered_data[0,:],\n b,\n (z1,z2,z3,z4,z5,z6,z7,z8,z9,z10),\n size_data=[],\n n=n-1)\n return filtered_data, z\n \n \n#==============================================================================\n# Main Program \n#==============================================================================\n#Main function\nif __name__ == '__main__':\n \n try:\n #Apply the Baseline Pressure\n while True:\n try:\n temp =int(input(\"Enter the baseline pressure for the ESR \"))\n \n if temp>120:\n raise PeristalsisThresholdError\n break\n \n except ValueError:\n print(\"Oops! Only integer values are accepted. Try again...\")\n except PeristalsisThresholdError: \n print('Please enter a value below 120..Please try again..')\n \n QSR_Two_layer=RoSE_actuation_protocol(UseIOExpander=True,UseADC=True)\n Peristalsis_filepath='PeristalsisData/40mmat20mmps_Dips.csv'#Peristalsis_Staircase_50_10_130_20mmps.csv'\n dfFlipped=QSR_Two_layer.PeristalsisFileRead(Peristalsis_filepath)\n \n print('\\n')\n size_df=dfFlipped.shape\n BaseLinePress=temp*np.ones((1,size_df[1]),dtype=int)\n ZeroArray=np.zeros((1,size_df[1]),dtype=int)\n QSR_Two_layer.mergeDACadd2DataAndSend(dfFlipped,0,BaseLinePress)\n print('Please wait for 4 seconds...')\n print('\\n')\n time.sleep(0)\n\n ## Checking whether user wants to do the actuation or not\n startActuation=QSR_Two_layer.StringCheck()\n print('\\n')\n ## Checking correct wave speed and it is integer data type or not? \n SpeedOfWaveChecked, SamplingTime=QSR_Two_layer.SpeedCheck(Peristalsis_filepath)\n print('\\n')\n \n ScalingFact =float(input(\"What is the scaling factor you would like? (Enter between 0-1.5): \"))\n j=0;\n numOfCyc=0;\n \n Adc2dArray=np.zeros(size_df[1])\n TOF2dArray=np.zeros(7)\n\n t1 = timedelta(minutes = 0, seconds = 0, microseconds=0)\n \n if QSR_Two_layer.Flag_UseIOExpander is True and QSR_Two_layer.Flag_UseADC is False:\n \n #TOF2dArray=np.zeros((1,QSR_Two_layer.GenerateDisplacementDataFromTOF(t1).shape[0]))\n TOF2dArray=np.array([[],[],[]]).T\n range_mm_array=QSR_Two_layer.GenerateDisplacementDataFromTOF(t1)\n\n #Design real-time filter\n b,z=RoSE_actuation_protocol.Filter_RealTime_design()\n z=(z,z)\n TOF2dArray_mean_filtered_stacked=np.array([[],[],[]]).T\n\n elif QSR_Two_layer.Flag_UseADC is True and QSR_Two_layer.Flag_UseIOExpander is False:\n ADC2dArray=np.array([[],[],[],[],[],[],[],[],[],[],[],[]]).T\n\n elif QSR_Two_layer.Flag_UseADC is True and QSR_Two_layer.Flag_UseIOExpander is True:\n TOFADCPer2dArray=np.array([[],\\\n [],[],[],[],[],[],[],[],[],[],\\\n [],[],[],[],[],[],[],[],[],[],\\\n [],[],[],[],[],[],[],[],[],[],[],[],\\\n [],[],[],[],[],[],[],[],[],[],[],[]]).T\n range_mm_array=QSR_Two_layer.GenerateDisplacementDataFromTOF(t1)\n\n #Design real-time filter\n b,z=RoSE_actuation_protocol.Filter_RealTime_design()\n z=(z,z,z,z,z,z,z,z,z,z)\n TOF2dArray_mean_filtered_stacked=np.array([[],[],[],[],[],[],[],[],[],[],[]]).T\n \n starttime=time.time()\n #==============================================================================\n # Looping starts\n #==============================================================================\n while True:\n \n # create the hex version of df\n## dfHex=[hex(df[j,x]) for x in range(12)]\n\n dac_data_array=QSR_Two_layer.mergeDACadd2DataAndSend(dfFlipped,j,BaseLinePress,ScalingFact,size_df[1],40)\n\n if QSR_Two_layer.Flag_UseIOExpander is True and QSR_Two_layer.Flag_UseADC is False:\n \n range_mm_array=QSR_Two_layer.GenerateDisplacementDataFromTOF(t1)\n range_mm_array_mean=np.array([range_mm_array[0],\n range_mm_array[1:6].mean(axis=0),\n range_mm_array[7:].mean(axis=0)])\n \n TOF2dArray=np.concatenate((TOF2dArray,range_mm_array_mean[np.newaxis,:]))\n\n range_mm_array_filtered, z= RoSE_actuation_protocol.Filter_RealTime_apply(range_mm_array_mean[1:],\n b,z)\n print(range_mm_array_filtered.shape)\n range_mm_array_filtered=np.concatenate((np.array([range_mm_array[0]]),\n range_mm_array_filtered[0,:]))\n TOF2dArray_mean_filtered_stacked=np.vstack((TOF2dArray_mean_filtered_stacked,range_mm_array_filtered\n ))\n\n elif QSR_Two_layer.Flag_UseADC is True and QSR_Two_layer.Flag_UseIOExpander is False:\n pressure_kpa_array=QSR_Two_layer.GeneratePressureDataFromADC(NoOfADC=12)\n ADC2dArray=np.concatenate((ADC2dArray,pressure_kpa_array[np.newaxis,:]))\n\n elif QSR_Two_layer.Flag_UseADC is True and QSR_Two_layer.Flag_UseIOExpander is True:\n \n t_tof_1 = time.time()\n range_mm_array=QSR_Two_layer.GenerateDisplacementDataFromTOF(t1)\n print('Time elapsed for TOF rading ',time.time()-t_tof_1)\n \n pressure_kpa_array=QSR_Two_layer.GeneratePressureDataFromADC(NoOfADC=12)\n \n range_mm_array_mean=np.array([range_mm_array[0],\n range_mm_array[1: 6].mean(axis=0),\n range_mm_array[6: 11].mean(axis=0),\n range_mm_array[11:16].mean(axis=0),\n range_mm_array[16:21].mean(axis=0),\n range_mm_array[21:26].mean(axis=0),\n range_mm_array[26:31].mean(axis=0),\n range_mm_array[31:36].mean(axis=0),\n range_mm_array[36:41].mean(axis=0),\n range_mm_array[41:46].mean(axis=0),\n range_mm_array[46:51].mean(axis=0)])\n t_fil1=time.time()\n range_mm_array_filtered, z= RoSE_actuation_protocol.Filter_RealTime_apply(range_mm_array_mean[1:],\n b,z)\n print('Time elapsed for filteration ',time.time()-t_fil1)\n \n range_mm_array_filtered=range_mm_array_filtered[...,np.newaxis].T\n range_mm_array_filtered=np.concatenate((np.array([range_mm_array[0]]),\n range_mm_array_filtered[0]))\n \n range_pressure_array=np.hstack((np.hstack((range_mm_array_mean,\n range_mm_array_filtered[1:])),\n pressure_kpa_array))\n range_pressure_peristalsis_array=np.hstack((range_pressure_array,dac_data_array[0,:]))\n TOFADCPer2dArray=np.concatenate((TOFADCPer2dArray,range_pressure_peristalsis_array[np.newaxis,:]))\n\n \n QSR_Two_layer.GenerateTimestamp(t1)\n \n #time.sleep(SamplingTime- ((time.time() - starttime) % SamplingTime))\n time.sleep(0.01)\n j+=1\n #if the row index of df reaches the end then go back to the starting point\n if j==dfFlipped.shape[0]: \n numOfCyc+=1\n j=0\n print('\\n')\n print('End of peristalsis cycle number: {0}'.format(numOfCyc))\n print('\\n'*4)\n print('2.5s wait before initiating TOF reading...')\n #time.sleep(2.5 - ((time.time() - starttime) % 1))\n time.sleep(2.5)\n \n except KeyboardInterrupt:\n\n QSR_Two_layer_clear=RoSE_actuation_protocol(UseIOExpander=False)\n ClearDAC=np.zeros((1,12),dtype=int)\n QSR_Two_layer_clear.mergeDACadd2DataAndSend(ClearDAC,0,ClearDAC,0,12,0)\n\n # Generate a csv file from the TOF 2d data\n if QSR_Two_layer.Flag_UseIOExpander is True and QSR_Two_layer.Flag_UseADC is False:\n\n\n #Plot figure \n Figure1= plt.figure()\n RoSE_actuation_protocol.TOF_Plottting(Figure1, TOF2dArray,TOF2dArray_mean_filtered_stacked)\n\n #Save Data\n TOF2dArray[:,0]=TOF2dArray[:,0]-TOF2dArray[0,0]\n TOF2dArray_mean_filtered_stacked[:,0]=TOF2dArray_mean_filtered_stacked[:,0]-\\\n TOF2dArray_mean_filtered_stacked[0,0]\n \n\n RoSE_actuation_protocol.Data_Saving(TOF2dArray,\n filename=Peristalsis_filepath,\n ScalingFactor=ScalingFact,\n DataFrom='TOF'\n )\n \n RoSE_actuation_protocol.Data_Saving(TOF2dArray_mean_filtered_stacked,\n filename=Peristalsis_filepath,\n ScalingFactor=ScalingFact,\n DataFrom='OnlineFilter'\n )\n elif QSR_Two_layer.Flag_UseADC is True and QSR_Two_layer.Flag_UseIOExpander is False:\n\n RoSE_actuation_protocol.Data_Saving(TOF2dArray,\n filename=Peristalsis_filepath,\n ScalingFactor=ScalingFact,\n DataFrom='TOFandADC'\n )\n \n elif QSR_Two_layer.Flag_UseADC is True and QSR_Two_layer.Flag_UseIOExpander is True:\n\n #Plot figure \n Figure1= plt.figure()\n \n RoSE_actuation_protocol.TOF_Plottting(Figure1, TOFADCPer2dArray[:,[0,1,2,3,4,5,6,7,8,9,10]],TOFADCPer2dArray[:,[0,7,8,9,10,11,12,13,14,15,16]])\n \n RoSE_actuation_protocol.Data_Saving(TOFADCPer2dArray,\n filename=Peristalsis_filepath,\n ScalingFactor=ScalingFact,\n DataFrom='TOFADCandPer'\n )\n \n print('Bye! Bye! Dipu')\n except ValueError as ValErr:\n print(ValErr)\n print('Error...Check I2C Connection')\n except OSError as OSErr:\n print('% s occurred due to ADC not found at 65H' %OSErr)\n except IndexError as IndexErr:\n print(IndexErr) \n finally:\n QSR_Two_layer_clear=RoSE_actuation_protocol(UseIOExpander=False)\n ClearDAC=np.zeros((1,12),dtype=int)\n QSR_Two_layer_clear.mergeDACadd2DataAndSend(ClearDAC,0,ClearDAC,0,12,0)\n print('All the best for next run')\n\n i2c = busio.I2C(board.SCL, board.SDA)\n\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(18,GPIO.OUT) \n #With RPi GPIO pin reset the IOExpander\n GPIO.output(18,GPIO.LOW)\n time.sleep(1)\n GPIO.output(18,GPIO.HIGH)\n\n '''\n # Initialize the MCP23017 chip on the bonnet\n # Optionally change the address of the device if you set any of the A0, A1, A2\n # pins. Specify the new address with a keyword parameter:\n #mcp = MCP23017(i2c, address=0x21) # MCP23017 w/ A0 set\n '''\n time.sleep(1)\n i2c = busio.I2C(board.SCL, board.SDA)\n mcp = MCP23017(i2c)\n No_MCP23017_pins=12\n port_ab_pins = [mcp.get_pin(pin) for pin in range(0, No_MCP23017_pins)]\n\n\n # Set all the port A pins to output\n for pin in port_ab_pins:\n pin.direction = Direction.OUTPUT\n\n port_ab_pins[0].value = False \n port_ab_pins[1].value = False\n port_ab_pins[2].value = False\n port_ab_pins[3].value = False\n port_ab_pins[4].value = False\n port_ab_pins[5].value = False \n port_ab_pins[6].value = False\n port_ab_pins[7].value = False\n port_ab_pins[8].value = False\n port_ab_pins[9].value = False\n \n port_ab_pins[0].value = True\n port_ab_pins[1].value = True\n port_ab_pins[2].value = True\n port_ab_pins[3].value = True\n port_ab_pins[4].value = True\n port_ab_pins[5].value = True \n port_ab_pins[6].value = True\n port_ab_pins[7].value = True\n port_ab_pins[8].value = True\n port_ab_pins[9].value = True\n \nelse:\n print(\"This module has been called from other program\")\n \n","sub_path":"RoSE_IOEXpander_TOF_ten_OOPs.py","file_name":"RoSE_IOEXpander_TOF_ten_OOPs.py","file_ext":"py","file_size_in_byte":36505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"191088198","text":"__author__ = 'Ziad'\n\nclass EvaluationFunctions:\n\n __dbManager = None\n __retweetSampleSize = 25\n\n # constructor\n def __init__(self, dbManager):\n super().__init__()\n self.__dbManager = dbManager\n\n def eval_network_relation(self, parentId, childId):\n parent_identity = self.__dbManager.get_twitter_identity_by_id(parentId)\n child_identity = self.__dbManager.get_twitter_identity_by_id(childId)\n parent_retweets = self.__dbManager.get_retweet_stats_by_id(parentId)\n throughput = self.get_through_put(parent_identity, parent_retweets)\n eval = self.evaluate(parent_identity, child_identity, throughput)\n return {'eval': eval, 'throughput': throughput}\n\n def get_through_put(self, parent_identity, parent_retweets):\n retweet_counter = 0\n local_sample = 0\n for tweet in parent_retweets:\n retweet_counter += tweet.retweetsCounter\n local_sample += 1\n if retweet_counter == 0:\n return 0\n normalized_status_count = parent_identity.statusesCount * (local_sample/self.__retweetSampleSize)\n throughput = retweet_counter/normalized_status_count\n return throughput\n\n def evaluate(self, parent_identity, child_identity, throughput):\n return parent_identity.followersCount >= 250 and child_identity.followersCount >= 250 and throughput > 0.01\n\n def compute_node_score(self, relation_count, in_degree, out_degree, avg_through_put):\n if relation_count == 0:\n return 0\n return (in_degree/out_degree)*avg_through_put*relation_count\n\n def normalize(self, min, max, value_min, value_max, value):\n return min + (value-value_min)*(max-min)/(value_max-value_min)\n","sub_path":"Backend/FashionNetworkBackend/Utility/EvaluationFunctions.py","file_name":"EvaluationFunctions.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"148710763","text":"import os\nimport glob\nimport string\nimport random\nfrom asyncio import sleep\nfrom collections import\tnamedtuple\nfrom PIL import Image\n\nimport aiofiles\nfrom eyed3 import id3\nimport youtube_dl\n\nfrom var import var\n\ndef randstr(length=10):\n\treturn ''.join(random.sample(string.ascii_letters, length))\n\n\ndef get_cached_result(s):\n\tcached = var.temp_results.get(s)\n\tif cached and cached.relevant():\n\t\treturn cached\n\telif cached and not cached.relevant():\n\t\tdel var.temp_results[s]\n\n\nasync def delete_cached_results():\n\twhile True:\n\t\tfor key in var.temp_results.keys():\n\t\t\tif not var.temp_results[key].relevant():\n\t\t\t\tdel var.temp_results[key]\n\t\tawait sleep(60)\n\n\ndef _get_yt_id(link):\n\tif 'youtu.be' in link:\n\t\tyt_id = link.split('/')[-1]\n\telse:\n\t\tyt_id = link.split('&')[0].split('v=')[1]\n\treturn f'https://youtube.com/watch?v={yt_id}'\n\t\n\n\ndef clearlink(text):\n\tfor i in text.split():\n\t\tif 'http://' in i or 'https://' in i:\n\t\t\tif 'music.youtube.com' in i:\n\t\t\t\treturn i.split('&')[0]\n\t\t\telif 'youtu.be' in i or 'youtube.com' in i:\n\t\t\t\treturn _get_yt_id(i)\n\t\t\treturn i\n\n\ndef split_string(text, chars_per_string):\n\tresult = []\n\twords = text.split(' ')\n\tstring = ''\n\tfor i, word in enumerate(words):\n\t\tif (len(string + word) > chars_per_string):\n\t\t\tresult.append(string)\n\t\t\tstring = ''\n\t\tstring += word + ' '\n\t\tif i == len(words) - 1:\n\t\t\tresult.append(string)\n\t\t\tstring = ''\n\treturn result\n\n\ndef resize_image(fn, size):\n\tin_file = Image.open(fn)\n\tout_fn = fn.strip('.jpg') + '_smallthumb.jpg'\n\tout_file = in_file.resize(size)\n\tout_file.save(out_fn)\n\treturn out_fn\n\n\nasync def get_thumb(url, fn):\n\torig_fn = await download(url, fn, 'jpg')\n\tresized_fn = resize_image(orig_fn, (90, 90))\n\tos.remove(orig_fn)\n\treturn resized_fn\n\n\nasync def download(link, filename, ext):\n\treq = await var.session.get(link)\n\tfn = f'{filename}.{ext}'\n\tasync with aiofiles.open(fn, 'wb') as f:\n\t\tawait f.write(await req.read())\n\treturn fn\n\n\nGeniusTags = namedtuple(\n\t'GeniusTags',\n\t('title', 'artist', 'album', 'album_artist', 'release_date', 'pic', 'thumb'))\n\n\ndef genius_get_tags(track):\n\treturn GeniusTags(\n\t\ttitle=track.title_with_featured,\n\t\tartist=track.primary_artist.name,\n\t\talbum=track.album.name \\\n\t\t\tif track.album else track.title,\n\t\talbum_artist=track.album.artist.name \\\n\t\t\tif track.album else track.primary_artist.name,\n\t\trelease_date=track.release_date,\n\t\tpic=track.album.cover_art_url \\\n\t\t\tif track.album else track.song_art_image_url,\n\t\tthumb=track.song_art_image_thumbnail_url)\n\nasync def addtags(fn, tags):\n\tfile_tag = id3.Tag()\n\tfile_tag.parse(fn)\n\tfile_tag.artist = tags.artist\n\tfile_tag.album = tags.album\n\tfile_tag.album_artist = tags.album_artist\n\tfile_tag.original_release_date = tags.release_date\n\tfile_tag.title = tags.title\n\tpic_fn = await download(tags.pic, fn, 'jpg')\n\tasync with aiofiles.open(pic_fn, 'rb') as pic_file:\n\t\tfile_tag.images.set(\n\t\t\ttype_=3, img_data=await pic_file.read(), mime_type=\"image/jpeg\")\n\tfile_tag.save()\n\nasync def addtags_ytdl(fn, tags):\n\tfile_tag = id3.Tag()\n\tfile_tag.parse(fn)\n\tfile_tag.artist = tags['artist']\n\tfile_tag.album = tags['track']\n\tfile_tag.album_artist = tags['artist']\n\tfile_tag.title = tags['track']\n\tpic_fn = await download(tags['cover'], fn, 'jpg')\n\tasync with aiofiles.open(pic_fn, 'rb') as pic_file:\n\t\tfile_tag.images.set(\n\t\t\ttype_=3, img_data=await pic_file.read(), mime_type=\"image/jpeg\")\n\tfile_tag.save()\n\n\ndef ytdl_download(url, filename):\n\tydl_opts = {\n\t\t'format': 'bestaudio/best',\n\t\t'postprocessors': [{\n\t\t\t'key': 'FFmpegExtractAudio',\n\t\t\t'preferredcodec': 'mp3',\n\t\t\t'preferredquality': '192',\n\t\t}],\n\t\t'restrictfilenames': True,\n\t\t'forcejson': True,\n\t\t'outtmpl': '%s,%%(title)s.%%(ext)s' % filename,\n\t}\n\twith youtube_dl.YoutubeDL(ydl_opts) as ytdl:\n\t\tinfo = ytdl.extract_info(url)\n\tcover = info.get('thumbnail')\n\tif info.get('track') and info.get('artist'):\n\t\treturn {'track': info['track'], 'artist': info['artist'], 'cover': cover}\n\telse:\n\t\ttitle = info.get('title')\n\t\tuploader = info.get('uploader')\n\t\tfor c in '-\\u2013\\u2014\\u2E3A\\uFE58':\n\t\t\tif f' {c} ' in title and len(title.split(f' {c} ')) == 2:\n\t\t\t\ttrack = title.split(f'{c} ')[1]\n\t\t\t\tartist = title.split(f' {c}')[0]\n\t\t\t\treturn {'track': track, 'artist': artist, 'cover': cover}\n\t\treturn {'track': title, 'artist': uploader, 'cover': cover}","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"557749589","text":"import os\nfrom PIL import Image\nimport numpy as np\nimport random\nimport sys\n#from tqdm import * #pip3 install tqdm\n\nclass VOT2016_Data_Provider():\n def __init__(self,pathofvot2016, cfg):\n self.cfg = cfg\n pathofinput = pathofvot2016 + '/images'\n pathofgroundtruth = pathofvot2016 + '/groundtruths'\n self.datanamelist = os.listdir(pathofgroundtruth)\n # self.datanamelist = ['nature']\n self.datanamesize = len(self.datanamelist)\n assert(self.datanamesize > 0)\n self.datalength = []\n self.gtedge = []\n self.inputdata = []\n self.gtdata = []\n self.nxs = []\n self.nys = []\n self.maxsteps = -1\n self.minsteps = 99999999\n self.channals = cfg['channels']\n self.n_class = 2\n self.batch_size = cfg['batch_size']\n self.steps = cfg['max_step']\n for idata in range(self.datanamesize):\n input_pic_dir = pathofinput + '/' + self.datanamelist[idata]\n input_gt_dir = pathofgroundtruth + '/' + self.datanamelist[idata]\n piclist = os.listdir(input_pic_dir)\n for inamen in range(len(piclist) - 1, -1,-1):\n iname = piclist[inamen]\n if (os.path.splitext(iname)[1] != '.jpg' and os.path.splitext(iname)[1] != '.png'):\n piclist.remove(iname)\n piclist = sorted(piclist)\n gtlist = os.listdir(input_gt_dir)\n for inamen in range(len(gtlist) - 1, -1,-1):\n iname = gtlist[inamen]\n if (os.path.splitext(iname)[1] != '.jpg' and os.path.splitext(iname)[1] != '.png'):\n gtlist.remove(iname)\n gtlist = sorted(gtlist)\n assert(len(gtlist) == len(piclist))\n datalength = len(gtlist)\n if datalength > self.maxsteps: self.maxsteps = datalength\n if datalength < self.minsteps: self.minsteps = datalength\n self.datalength.append(datalength)\n for inamen in range(datalength):\n piclist[inamen] = input_pic_dir + '/' + piclist[inamen]\n gtlist[inamen] = input_gt_dir + '/' + gtlist[inamen]\n self.inputdata.append(piclist)\n self.gtdata.append(gtlist)\n self.gtedge.append(None)\n im1 = Image.open(piclist[0])\n im1_np = np.array(im1)\n self.nxs.append(len(im1_np))\n self.nys.append(len(im1_np[0]))\n assert(len(im1_np[0][0]) == self.channals)\n self.dataidx = 0\n self.nowdata = None\n self.batchidx = 0\n #self.bagdata, self.baglabel = self.get_data(8)\n\n def random_batch_init(self):\n self.batches = []\n for dataidx in range(self.datanamesize):\n start = 0\n while (True):\n end = start + self.batch_size*self.steps - 1\n if end >= self.datalength[dataidx]:\n break\n abatch = (dataidx, start)\n self.batches.append(abatch)\n start = end + 1\n self.batch_nums = len(self.batches)\n print(str(self.batch_nums) + ' batches ready')\n\n def get_images(self, dataidx, start, steps, jump=1):\n nx, ny = self.cfg['max_size_x'], self.cfg['max_size_y']\n inputdata = np.zeros((steps, nx, ny, self.channals), dtype=np.float32)\n gtdata = np.zeros((steps, nx, ny), dtype = np.bool)\n inputnamelist = self.inputdata[dataidx]\n gtnamelist = self.gtdata[dataidx]\n if start + steps*jump >= len(inputnamelist):\n return None, None\n for istep in range(steps):#tqdm(range(steps)):\n im_ipt = Image.open(inputnamelist[start + istep*jump])\n im_ipt = im_ipt.resize((nx, ny))\n inputdata[istep] = np.array(im_ipt)\n im_gt = Image.open(gtnamelist[start + istep*jump])\n im_gt = im_gt.resize((nx, ny))\n gtdata[istep] = np.array(im_gt)\n if self.cfg['norm_input']:\n if self.cfg['norm_input_minus']:\n inputdata = (inputdata * 2 - 255) / 255\n else:\n inputdata = (inputdata / 255.0)\n gtdata = gtdata.astype(np.float)\n return (inputdata, gtdata)\n\n def get_a_random_batch(self, jump=1):\n inputdata, gtdata = None, None\n while inputdata is None:\n batchidx = random.randint(0, self.batch_nums-1)\n # batchidx=0\n dataidx, start = self.batches[batchidx]\n inputdata, gtdataonehot = self.get_images(dataidx, start, self.batch_size * self.steps, jump=jump)\n nsteps, nx, ny, band = inputdata.shape\n inputdata = inputdata.reshape((self.batch_size, self.steps, nx, ny, self.channals))\n gtdata = gtdataonehot.reshape((self.batch_size, self.steps, nx, ny))\n datatuple = (inputdata, gtdata)\n return datatuple \n\n def get_data(self, dataidx):\n assert (0 <= dataidx and dataidx < self.datanamesize)\n datname = self.datanamelist[dataidx]\n inputnamelist = self.inputdata[dataidx]\n gtnamelist = self.gtdata[dataidx]\n steps = self.datalength[dataidx]\n nx = self.nxs[dataidx]\n ny = self.nys[dataidx]\n channals = self.channals\n inputdata = np.zeros((steps, nx, ny, channals), dtype=np.float32)\n gtdata = np.zeros((steps, nx, ny), dtype = np.bool)\n print('loading data ' ,datname, '...')\n for istep in range(steps):#tqdm(range(steps)):\n im_ipt = Image.open(inputnamelist[istep])\n inputdata[istep] = np.array(im_ipt)\n im_gt = Image.open(gtnamelist[istep])\n gtdata[istep] = np.array(im_gt)\n gtdata = gtdata.astype(np.int32)\n gtdata = gtdata.reshape((steps*nx*ny))\n gtdataonehot = np.zeros((steps*nx*ny, 2), dtype=np.float32)\n gtdataonehot[np.arange(steps*nx*ny), gtdata] = 1\n gtdataonehot = gtdataonehot.reshape((steps,nx,ny,2))\n return (inputdata, gtdataonehot)\n\n def get_data_one_batch(self, dataidx):\n inputdata, gtdataonehot = self.get_data(dataidx)\n inputdata = inputdata.reshape([1] + list(np.shape(inputdata)))\n gtdataonehot = gtdataonehot.reshape([1] + list(np.shape(gtdataonehot)))\n # inputdata.dim:(batch_size = 1, steps, nx, ny, channals)\n # gtdata.dim:(batch_size = 1, steps, nx, ny, nclass)\n return (inputdata, gtdataonehot)\n\n def get_one_data_with_maxstep(self, max_step):\n inputdata, gtdataonehot = self.get_data(self.dataidx)\n iptshp = list(np.shape(inputdata)) # iptdata is in shape[sheps, nx, ny, channels]\n gtshp = list(np.shape(gtdataonehot))\n steps = iptshp[0]\n if steps <= max_step:\n inputdata = inputdata.reshape([1] + list(np.shape(inputdata)))\n gtdataonehot = gtdataonehot.reshape([1] + list(np.shape(gtdataonehot)))\n return (inputdata, gtdataonehot)\n else:\n batch_size = steps // max_step\n inputdata = (inputdata[:batch_size * max_step,:,:,:]).reshape([batch_size, max_step, iptshp[1], iptshp[2],iptshp[3]])\n gtdataonehot = (gtdataonehot[:batch_size * max_step,:,:,:]).reshape([batch_size, max_step, gtshp[1], gtshp[2], gtshp[3]])\n return (inputdata, gtdataonehot)\n\n def subsampling(self, datatuple, max_size):\n max_nx, max_ny = max_size\n inputdata, gtdata = datatuple\n batch_size,steps,nx,ny,channels = inputdata.shape\n timex = (nx + max_nx - 1) // max_nx\n timey = (ny + max_ny - 1) // max_ny\n time = timex if timex > timey else timey\n return (inputdata[:,:,::time,::time,:], gtdata[:,:,::time,::time])\n \n def get_one_data_with_maxstep_next_batch_t(self, batch_size, max_step, max_size = None, edge = 0):\n if self.nowdata is None:\n self.nowdata = self.get_one_data_with_maxstep(max_step)\n inputdata, gtdataonehot = self.nowdata\n batches = len(inputdata)\n if self.batchidx + batch_size >= batches:\n returndata = (inputdata[self.batchidx:batches], gtdataonehot[self.batchidx:batches])\n else:\n returndata = (inputdata[self.batchidx:self.batchidx + batch_size], gtdataonehot[self.batchidx:self.batchidx + batch_size])\n \n self.batchidx = self.batchidx + batch_size\n if self.batchidx >= batches:\n self.batchidx = 0\n self.dataidx = (self.dataidx + 1) % self.datanamesize\n self.nowdata = None\n \n if max_size is not None:\n returndata = self.subsampling(returndata, max_size)\n \n gtdata = returndata[1]\n _,__,nx,ny,___ = gtdata.shape\n sumcenter = np.sum(gtdata[:,:,edge:nx-edge,edge:ny-edge,1:2])\n sumedge = np.sum(gtdata[:,:,:,:,1:2]) - sumcenter\n #print(gtdata)\n if sumcenter > 0 and sumedge == 0:\n return returndata\n else:\n #print('sumcenter:',sumcenter)\n #print('sumedge:',sumedge)\n #return self.get_one_data_with_maxstep_next_batch(batch_size, max_step, max_size, edge)\n return None\n\n def get_one_data_with_maxstep_next_batch(self, batch_size, max_step, max_size = None, edge = 0, centershape = None):\n rd = self.get_one_data_with_maxstep_next_batch_t(batch_size,max_step,max_size,edge)\n while rd is None:\n rd = self.get_one_data_with_maxstep_next_batch_t(batch_size,max_step,max_size,edge)\n return rd[0], rd[1]#, self.get_mark(rd, edge, centershape)\n\n def __call__(self, batch_size = 1):\n return self.bagdata, self.baglabel\n\ndef printlen():\n dptest = VOT2016_Data_Provider('/home/cjl/data/vot2016')\n for i in range (20):\n iptdata, gtdataonehot = dptest.get_data(i)\n nx = len(iptdata[0])\n ny = len(iptdata[0][0])\n print(nx, ' ',ny)\n\ndef test_maxstep():\n dptest = VOT2016_Data_Provider('/home/cjl/data/vot2016')\n dptest.dataidx = 9\n iptdata, gtdataonehot = dptest.get_one_data_with_maxstep_next_batch(10, 8, max_size = (30,30))\n print(np.shape(iptdata))\n print(np.shape(gtdataonehot))\n #iptdata, gtdataonehot = dptest.get_one_data_with_maxstep_next_batch(10, 8)\n print(np.shape(iptdata))\n print(np.shape(gtdataonehot))\n #iptdata, gtdataonehot = dptest.get_one_data_with_maxstep_next_batch(10, 8)\n print(np.shape(iptdata))\n print(np.shape(gtdataonehot))\n\ndef test_resize():\n import sys\n sys.path.append('/home/cjl/tf_runet')\n from config import cfg\n pro_path = '/home/cjl/tf_runet'\n data_path = pro_path + '/data/vot2016'\n data_provider = VOT2016_Data_Provider(data_path, cfg)\n data_provider.random_batch_init()\n data_provider.dataidx = 10\n iptdata, gtdata = data_provider.get_a_random_batch(jump=20)\n\nif __name__ == '__main__':\n #printlen()\n #test_maxstep()\n test_resize()\n \n","sub_path":"data/vot2016.py","file_name":"vot2016.py","file_ext":"py","file_size_in_byte":10852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"302366739","text":"import unittest\nimport logging\nimport remclient\nimport time\nfrom testdir import *\n\n\nclass T16(unittest.TestCase):\n \"\"\"Test for queue/level lifetimes functionality\"\"\"\n\n def setUp(self):\n self.connector = Config.Get().server1.connector\n self.connector2 = Config.Get().server2.connector\n\n def testCustomQueueSuccessLifetime(self):\n timestamp = time.time()\n pck = self.connector.Packet('test_successfull_lifetime-%d' % int(timestamp))\n pck.AddJob('true')\n self.connector.Queue('test_lifetime').AddPacket(pck)\n pckInfo = self.connector.PacketInfo(pck.id)\n WaitForExecution(pckInfo, [\"SUCCESSFULL\"])\n\n self.connector.Queue('test_lifetime').SetSuccessLifeTime(1)\n time.sleep(1)\n self.connector.proxy.forget_old_items()\n\n self.assertRaises(pckInfo.update)\n\n def testCustomQueueSuccessLifetime(self):\n timestamp = time.time()\n pck = self.connector.Packet('test_error_lifetime-%d' % int(timestamp))\n pck.AddJob('false', tries=1)\n self.connector.Queue('test_lifetime').AddPacket(pck)\n pckInfo = self.connector.PacketInfo(pck.id)\n WaitForExecution(pckInfo, [\"ERROR\"])\n\n self.connector.Queue('test_lifetime').SetErroredLifeTime(1)\n time.sleep(1)\n self.connector.proxy.forget_old_items()\n\n self.assertRaises(pckInfo.update)\n\n def testCustomQueueSuccessLifetime(self):\n timestamp = time.time()\n queue = self.connector.Queue('test_lifetime_%s' % timestamp)\n\n pck = self.connector.Packet('test_suspended_lifetime-%d' % int(timestamp),\n wait_tags=['no_such_tag_%s' % timestamp])\n pck.AddJob('true')\n queue.AddPacket(pck)\n\n pckInfo = self.connector.PacketInfo(pck.id)\n\n WaitForExecution(pckInfo, [\"SUSPENDED\"])\n\n queue.SetSuspendedLifeTime(1)\n time.sleep(1)\n self.connector.proxy.forget_old_items()\n\n WaitForExecution(pckInfo, [\"ERROR\"])\n","sub_path":"tests/test_16.py","file_name":"test_16.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"608109336","text":"# ----------\n# User Instructions:\n#\n# Create a function compute_value which returns\n# a grid of values. The value of a cell is the minimum\n# number of moves required to get from the cell to the goal.\n#\n# If a cell is a wall or it is impossible to reach the goal from a cell,\n# assign that cell a value of 99.\n# ----------\nimport collections\n\ngrid = [[0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0]]\ngoal = [len(grid) - 1, len(grid[0]) - 1]\ncost = 1 # the cost associated with moving from a cell to an adjacent one\n\ndelta = [[-1, 0], # go up\n [0, -1], # go left\n [1, 0], # go down\n [0, 1]] # go right\n\ndelta_name = ['^', '<', 'v', '>']\n\n\nclass Point(object):\n visit_grid = [[False for col_items in xrange(len(grid[0]))] for row_items in xrange(len(grid))]\n\n def __init__(self, row, col):\n self.row = row\n self.col = col\n self.value = None\n\n def is_visited(self):\n return self.visit_grid[self.row][self.col]\n\n def visit(self):\n self.visit_grid[self.row][self.col] = True\n return self\n\n def new_neighbors(self):\n neighbors = []\n for dir in delta:\n row = self.row + dir[0]\n col = self.col + dir[1]\n if row < 0 or col < 0:\n continue\n if row > len(grid) - 1 or col > len(grid[0]) - 1:\n continue\n if grid[row][col] == 1:\n continue\n if self.visit_grid[row][col]:\n continue\n neighbors.append(Point(row, col))\n return neighbors\n\n\n\ndef compute_value(grid, goal, cost):\n # Create a grid of 99s (what sebastian said we should use, but not sure why)\n value = [[99 for col_items in xrange(len(grid[0]))] for row_items in xrange(len(grid))]\n goal_point = Point(row=goal[0], col=goal[1])\n goal_point.value = 0\n\n # Breadth first search from goal\n # add goal to the queue as the first item\n todo = collections.deque()\n todo.append(goal_point)\n\n # while queue not empty\n while len(todo) > 0:\n\n # pop the first item off the queue as point\n point = todo.popleft().visit()\n\n # set value of point in the grid\n value[point.row][point.col] = point.value\n\n # get every neighbor of our point and for each one\n for next_pt in point.new_neighbors():\n # set the value of the neighbor to be point.value + cost\n next_pt.value = point.value + cost\n # add neighbor to the queue\n todo.append(next_pt)\n\n # return the value grid\n return value\n\nprint(compute_value(grid, goal, cost))\n","sub_path":"udacity/ai_for_robotics/4_17_value_program.py","file_name":"4_17_value_program.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"448935853","text":"from threading import Thread\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\nimport random\n\nclass NavigateMap(Thread):\n\tdef __init__(self,botNum):\n\t\t# initiliaze\n\t\tThread.__init__(self)\n\t\tself.botNum = botNum\n\t\t\n\tdef run(self):\n\t\t#bot motion\n\t\trospy.loginfo(\"bot\" + self.botNum + 'started')\n\t\tvelTopic = '/robot' + self.botNum + '/cmd_vel'\n \n\t\tself.cmd_vel = rospy.Publisher(velTopic, Twist, queue_size=10)\n \n\t\tr = rospy.Rate(10)\n\n\t\tmove_cmd = Twist()\n\t\tmove_cmd.linear.x = 0.2\t\t#linear velocity along x\n\n\t\twhile not rospy.is_shutdown():\n\t\t\tmove_cmd.angular.z = random.randint(-2,2)\n\t\t\tself.cmd_vel.publish(move_cmd)\n\t\t\tr.sleep()\n \n\tdef shutdown(self):\n\t\t# stop bot\n\t\trospy.loginfo(\"Stop Bot\")\n\t\tself.cmd_vel.publish(Twist())\n\t\trospy.sleep(1)\n\t\t\ndef main():\n\ttry:\n\t\trospy.init_node('Navigate', anonymous=False, disable_signals = True)\n\t\t\n\t\tbots = []\n\t\t\n\t\tfor botNum in range(3):\n\t\t\tnewBot = NavigateMap(str(botNum))\n\t\t\tbots.append(newBot)\n\t\t\n\t\tfor botNum in range(3):\n\t\t\tbots[botNum].start()\n\t\t\n\t\tfor botNum in range(3):\n\t\t\tbots[botNum].join()\n\t\t\n\texcept:\n\t\trospy.loginfo(\"Node terminated.\")\n\t\t\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"scripts/sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"297364422","text":"from tkinter import *\r\nimport tkinter.messagebox as tkmb\r\nimport pygame\r\nimport random\r\nfrom os import path\r\nimport pkg_resources.py2_warn\r\ntry:\r\n import pkg_resources.py2_warn\r\nexcept ImportError:\r\n pass\r\n\r\nimg_dir = path.join(path.dirname(__file__),'sprites')\r\n\r\nWIDTH = 480\r\nHEIGHT = 600\r\nFPS = 50\r\n\r\nWHITE = (255,255,255)\r\nBLACK = (0,0,0)\r\nRED = (255,0,0)\r\nGREEN = (0,255,0)\r\nBLUE = (0,0,255)\r\nYELLOW = (255,255,0)\r\n\r\npygame.init()\r\npygame.mixer.init()\r\nscreen = pygame.display.set_mode((WIDTH,HEIGHT))\r\npygame.display.set_caption(\"Shooting Game\")\r\nclock = pygame.time.Clock()\r\n\r\nfont_name = pygame.font.match_font('arial')\r\ndef draw_text(surf,text,size,x,y):\r\n font = pygame.font.Font(font_name,size)\r\n text_surface = font.render(text,True,RED)\r\n text_rect = text_surface.get_rect()\r\n text_rect.midtop = (x,y)\r\n surf.blit(text_surface,text_rect)\r\n\r\ndef newmob():\r\n m = Mob()\r\n all_sprites.add(m)\r\n mobs.add(m)\r\n\r\ndef draw_shield_bar(surf,x,y,pct):\r\n if pct<0:\r\n pct = 0\r\n BAR_LENGTH = 100\r\n BAR_HEIGHT = 10\r\n fill = (pct/100)*BAR_LENGTH\r\n outline_rect = pygame.Rect(x,y,BAR_LENGTH,BAR_HEIGHT)\r\n fill_rect = pygame.Rect(x,y,fill,BAR_HEIGHT)\r\n pygame.draw.rect(surf,GREEN,fill_rect)\r\n pygame.draw.rect(surf,WHITE,outline_rect,2)\r\n\r\ndef draw_lives(surf,x,y,lives,img):\r\n for i in range(lives):\r\n img_rect = img.get_rect()\r\n img_rect.x = x + 30 * i\r\n img_rect.y = y\r\n surf.blit(img,img_rect)\r\n \r\n\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.transform.scale(player_img,(50,38))\r\n self.image.set_colorkey(WHITE)\r\n self.rect = self.image.get_rect()\r\n self.radius = 20 \r\n self.rect.centerx = WIDTH/2\r\n self.rect.bottom = HEIGHT - 10\r\n self.speedx = 0\r\n self.speedy = 0\r\n self.shield = 100\r\n self.shoot_delay = 10\r\n self.last_shot = pygame.time.get_ticks()\r\n self.lives = 10000\r\n self.hidden = False\r\n self.hide_timer = pygame.time.get_ticks()\r\n\r\n def update(self):\r\n if self.hidden and pygame.time.get_ticks() - self.hide_timer > 1000:\r\n self.hidden = False\r\n self.rect.centerx = WIDTH / 2\r\n self.rect.bottom = HEIGHT - 10\r\n self.speedx = 0\r\n self.speedy = 0\r\n keystate = pygame.key.get_pressed()\r\n if keystate[pygame.K_LEFT] or keystate[pygame.K_a]:\r\n self.speedx = -15\r\n if keystate[pygame.K_RIGHT] or keystate[pygame.K_d]:\r\n self.speedx = 15\r\n if keystate[pygame.K_UP] or keystate[pygame.K_w]:\r\n self.speedy = -15\r\n if keystate[pygame.K_DOWN] or keystate[pygame.K_s]:\r\n self.speedy = 1\r\n if keystate[pygame.K_SPACE]:\r\n self.shoot()\r\n self.rect.x += self.speedx\r\n self.rect.y += self.speedy\r\n if self.rect.right > WIDTH:\r\n self.rect.right = WIDTH\r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n if self.rect.top < 0:\r\n self.rect.top = 0\r\n if self.rect.bottom > HEIGHT:\r\n self.rect.bottom = HEIGHT\r\n\r\n def shoot(self):\r\n now = pygame.time.get_ticks()\r\n if now - self.last_shot > self.shoot_delay:\r\n self.last_shot = now\r\n bullet = Bullet(self.rect.centerx,self.rect.top)\r\n all_sprites.add(bullet)\r\n bullets.add(bullet)\r\n\r\n def hide(self):\r\n self.hidden = True\r\n self.hide_timer = pygame.time.get_ticks()\r\n self.rect.center = (WIDTH/2,HEIGHT+200)\r\n\r\nclass Mob(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image_orig = random.choice(meteor_images)\r\n \r\n self.image_orig.set_colorkey(WHITE)\r\n self.image = self.image_orig.copy()\r\n self.rect = self.image.get_rect()\r\n self.radius = int(self.rect.width*0.8/2)\r\n self.rect.x = random.randrange(WIDTH - self.rect.width)\r\n self.rect.y = random.randrange(-100,100)\r\n self.speedy = random.randrange(1,8)\r\n self.speedx = random.randrange(-3,3)\r\n self.rot = 0\r\n self.rot_speed = random.randrange(-8,8)\r\n self.last_update = pygame.time.get_ticks()\r\n\r\n def rotate(self):\r\n now = pygame.time.get_ticks()\r\n if now - self.last_update > 50:\r\n self.last_update = now\r\n self.rot = (self.rot + self.rot_speed) % 360\r\n new_image = pygame.transform.rotate(self.image_orig,self.rot_speed)\r\n old_center = self.rect.center\r\n self.image = new_image\r\n self.rect = self.image.get_rect()\r\n self.rect.center = old_center\r\n \r\n def update(self):\r\n self.rotate()\r\n self.rect.x += self.speedx\r\n self.rect.y += self.speedy\r\n if self.rect.top > HEIGHT + 10 or self.rect.left < -25 or self.rect.right > WIDTH + 20:\r\n self.rect.x = random.randrange(WIDTH - self.rect.width)\r\n self.rect.y = random.randrange(-100,-40)\r\n self.speedy = random.randrange(1,8)\r\n\r\nclass Bullet(pygame.sprite.Sprite):\r\n def __init__(self,x,y):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = bullet_img\r\n self.image.set_colorkey(WHITE)\r\n self.rect = self.image.get_rect()\r\n self.rect.bottom = y\r\n self.rect.centerx = x\r\n self.speedy = -10\r\n\r\n def update(self):\r\n self.rect.y += self.speedy\r\n if self.rect.bottom < 0:\r\n self.kill()\r\n\r\nclass Pow(pygame.sprite.Sprite):\r\n def __init__(self,center):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.type = random.choice(['shield','gun'])\r\n self.image = powerup_images[self.type]\r\n self.image.set_colorkey(WHITE)\r\n self.rect = self.image.get_rect()\r\n self.rect = center\r\n self.speedy = 5\r\n\r\n def update(self):\r\n self.rect.y += self.speedy\r\n if self.rect.top > HEIGHT:\r\n self.kill()\r\n\r\nclass Explosion(pygame.sprite.Sprite):\r\n def __init__(self,center,size):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.size = size\r\n self.image = explosion_anim[self.size][0]\r\n self.rect = self.image.get_rect()\r\n self.rect.center = center\r\n self.frame = 0\r\n self.last_update = pygame.time.get_ticks()\r\n self.frame_rate = 75\r\n\r\n def update(self):\r\n now = pygame.time.get_ticks()\r\n if now - self.last_update > self.frame_rate:\r\n self.last_update = now\r\n self.frame += 1\r\n if self.frame == len(explosion_anim[self.size]):\r\n self.kill()\r\n else:\r\n center = self.rect.center\r\n self.image = explosion_anim[self.size][self.frame]\r\n self.rect = self.image.get_rect()\r\n self.rect.center = center\r\n\r\n\r\nbackground = pygame.image.load(path.join(img_dir,'Background.png')).convert()\r\nbackground_rect = background.get_rect()\r\nplayer_img = pygame.image.load(path.join(img_dir,'Spaceship.png')).convert()\r\nplayer_mini_img = pygame.transform.scale(player_img,(25,19))\r\nplayer_mini_img.set_colorkey(WHITE)\r\nbullet_img = pygame.image.load(path.join(img_dir,'Bullet.png')).convert()\r\nmeteor_images = []\r\nmeteor_list = ['Meteor.png','Meteor1.png','Meteor2.png']\r\nfor img in meteor_list:\r\n meteor_images.append(pygame.image.load(path.join(img_dir,img)).convert())\r\nexplosion_anim = {}\r\nexplosion_anim['lg'] = []\r\nexplosion_anim['sm'] = []\r\nexplosion_anim['player'] = []\r\n\r\nfor i in range(9):\r\n filename = 'Explosion{}.png'.format(i)\r\n img = pygame.image.load(path.join(img_dir,filename)).convert()\r\n img.set_colorkey(WHITE)\r\n img_lg = pygame.transform.scale(img,(75,75))\r\n explosion_anim['lg'].append(img_lg)\r\n img_sm = pygame.transform.scale(img,(32,32))\r\n explosion_anim['sm'].append(img_sm)\r\n filename = 'Explosion{}.png'.format(i)\r\n img = pygame.image.load(path.join(img_dir,filename)).convert()\r\n img.set_colorkey(WHITE)\r\n explosion_anim['player'].append(img)\r\n\r\npowerup_images = {}\r\npowerup_images['shield'] = pygame.image.load(path.join(img_dir,'Shield.png')).convert()\r\npowerup_images['gun'] = pygame.image.load(path.join(img_dir,'Bolt.png')).convert()\r\n\r\n#shoot_sound = pygame.mixer.Sound(path.join(snd_dir,'pew.wav'))\r\n#expl_sound = []\r\n#for snd in ['expl3.wav', 'expl6.wav']:\r\n #expl_sounds.append(pygame.mixer.Sound(path.join(snd_dir,snd)))\r\n#player_die_sound = pygame.mixer.Sound(path.join(snd_dir,'rumble.ogg'))\r\n#pygame.mixer.music.load(path.join(snd_dir,'tgfcoder-FrozenJam-Seamlessloop.ogg'))\r\n#pygame.mixer.music.set_volume(0.4)\r\n\r\nall_sprites = pygame.sprite.Group()\r\nmobs = pygame.sprite.Group()\r\nbullets = pygame.sprite.Group()\r\npowerups = pygame.sprite.Group()\r\nplayer = Player()\r\nall_sprites.add(player)\r\n\r\nfor i in range(20):\r\n newmob()\r\n \r\nscore = 0\r\n#pygame.mixer.music.play(loops=-1)\r\n\r\nrunning = True\r\nwhile running: \r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n\r\n all_sprites.update()\r\n\r\n hits = pygame.sprite.groupcollide(mobs,bullets,True,True)\r\n for hit in hits:\r\n score += 50 - hit.radius\r\n print(score)\r\n #sound\r\n expl = Explosion(hit.rect.center,'lg')\r\n all_sprites.add(expl)\r\n while random.random() > 1:\r\n pow = Pow(hit.rect.center)\r\n all_sprites.add(pow)\r\n powerups.add(pow)\r\n newmob()\r\n \r\n hits = pygame.sprite.spritecollide(player, mobs, True,pygame.sprite.collide_circle)\r\n for hit in hits:\r\n player.shield -= hit.radius*2\r\n expl = Explosion(hit.rect.center,'sm')\r\n all_sprites.add(expl)\r\n newmob()\r\n if player.shield <= 0:\r\n #player_die_sound.play()\r\n death_explosion = Explosion(player.rect.center,'player')\r\n all_sprites.add(death_explosion)\r\n player.hide()\r\n player.lives -= 1\r\n player.shield = 100\r\n\r\n hits = pygame.sprite.spritecollide(player, powerups, True)\r\n for hit in hits:\r\n if hit.type == 'shield':\r\n player.shield += 20\r\n if player.shield >= 100:\r\n player.shield = 100 \r\n\r\n if player.lives == 0 and not death_explosion.alive():\r\n running = False\r\n\r\n screen.fill(BLACK)\r\n screen.blit(background,background_rect)\r\n all_sprites.draw(screen)\r\n draw_text(screen,str(score),18,WIDTH/2,50)\r\n draw_shield_bar(screen,5,5,player.shield)\r\n draw_lives(screen,WIDTH-100,5,player.lives,player_mini_img)\r\n pygame.display.flip()\r\n\r\n\r\npygame.quit()\r\n\r\nprint(\"FINAL SCORE : \",score)\r\ninput('Press any key to exit...')\r\n","sub_path":"Space Chaos/Initial/game/SpaceChaos_mod(inf).py","file_name":"SpaceChaos_mod(inf).py","file_ext":"py","file_size_in_byte":12615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"465564156","text":"from django.shortcuts import render\nfrom django.conf import settings\nfrom .models import Currencies\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render,redirect\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Comments\nfrom .forms import CommentForm\n\nmydict={\n \"Bitcoin\":1,\n \"Ethereum\":2,\n \"Ripple\":3,\n \"Bitcoin Cash\":4,\n \"Litecoin\":5,\n \"Cardano\":6,\n \"NEO\":7,\n \"Stellar\":8,\n \"EOS\":9,\n }\n\n\ndef curr1(request,*args,**kwargs):\n # print(mydict[kwargs])\n # print(mydict[kwargs])\n shit=kwargs['currencywa']\n print(shit)\n currency_objectwa=Currencies.objects.get(id=mydict[shit])\n\n\n commentsinstance = Comments.objects.filter(post=currency_objectwa)\n\n inititss={\n \"user\":request.user,\n \"post\":currency_objectwa,\n }\n myformofcomments=CommentForm(request.POST or None,initial=inititss)\n if myformofcomments.is_valid():\n #userwa=myformofcomments.cleaned_data.get('user')\n #postwa=myformofcomments.cleaned_data.get('post')\n commentdata=myformofcomments.cleaned_data.get(\"content\")\n\n createdcomment=Comments.objects.create(\n user=request.user,\n post=currency_objectwa,\n content=commentdata\n\n )\n if createdcomment:\n return HttpResponseRedirect(\"/currency/\"+shit)\n\n\n else:\n myformofcomments = CommentForm()\n\n\n context = {\n # \"object_list\": queryset,\n \"comments\" : commentsinstance,\n \"kwargs\": kwargs,\n \"myformofcomments\": myformofcomments,\n }\n return render(request, \"currency.html\",context)\n","sub_path":"src/currency/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"381262568","text":"from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.conf import settings\n\nfrom .models import Comment\nfrom .forms import CommentForm\n# Create your views here.\n\nLOGIN_URL = settings.LOGIN_URL\n\n\ndef comment_thread(request, id=None):\n\ttry:\n\t\tobj = Comment.objects.get(id=id)\n\texcept:\n\t\traise Http404\n\t\n\tinitial_data = {\n\t\t'content_type': obj.content_type,\n\t\t'object_id': obj.object_id\n\t}\n\n\tform = CommentForm(request.POST or None, initial=initial_data)\n\n\tif request.POST:\n\t\tif not request.user.is_authenticated:\n\t\t\treturn redirect('/login/?next=/comments/{id}'.format(id=id))\n\t\tif form.is_valid():\n\t\t\tc_type = form.cleaned_data.get(\"content_type\")\n\t\t\tcontent_type = ContentType.objects.get(model=c_type)\n\t\t\tobject_id = form.cleaned_data.get(\"object_id\")\n\t\t\tcontent = form.cleaned_data.get(\"content\")\n\t\t\tparent_obj = None\n\n\t\t\ttry:\n\t\t\t\tparent_id = int(request.POST.get('parent_id'))\n\t\t\texcept:\n\t\t\t\tparent_id = None\n\n\t\t\tif parent_id:\n\t\t\t\tparent_qs = Comment.objects.filter(id=parent_id)\n\t\t\t\tif parent_qs.exists():\n\t\t\t\t\tparent_obj = parent_qs.first()\n\n\n\t\t\tnew_comment, created = Comment.objects.get_or_create(\n\t\t\t\t\t\t\t\tuser=request.user,\n\t\t\t\t\t\t\t\tcontent_type=content_type,\n\t\t\t\t\t\t\t\tobject_id=object_id,\n\t\t\t\t\t\t\t\tcontent=content,\n\t\t\t\t\t\t\t\tparent=parent_obj,\n\t\t\t\t\t\t\t)\n\t\t\treturn HttpResponseRedirect(new_comment.parent.get_absolute_url())\n\n\tcontext = {\n\t\t'comment': obj,\n\t\t'form': form,\n\t}\n\treturn render(request, 'comment_thread.html', context)\n\n@login_required\ndef comment_delete(request, id=None):\n\ttry:\n\t\tobj = Comment.objects.get(id=id)\n\texcept:\n\t\traise Http404\n\n\tif obj.user != request.user:\n\t\traise PermissionDenied\n\n\tif request.method == 'POST':\n\t\tif not obj.is_parent:\n\t\t\tparent_obj_url = obj.parent.get_absolute_url()\n\t\telse:\n\t\t\tparent_obj_url = obj.content_object.get_absolute_url()\n\t\tobj.delete()\n\t\tmessages.success(request,'Comment Deleted.')\n\t\treturn HttpResponseRedirect(parent_obj_url)\n\n\tcontext = {\n\t\t'comment': obj,\n\t}\n\treturn render(request, 'comment_delete.html', context)","sub_path":"comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"364632919","text":"#Import dependencies \nfrom splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport requests\nimport pandas as pd\nimport time \n\ndef scrape_info():\n # Set up Splinter\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path, headless=False)\n\n##############################################################################\n #Mars News \n #Prepare everything for Mars News\n # Visit redplanetscience.com\n # URL of page to be scraped\n url = \"https://redplanetscience.com/\"\n browser.visit(url)\n\n # Scrape page into Soup\n html = browser.html\n soup = bs(html, \"html.parser\")\n\n\n #Get the whole list news titles and of teasers \n title_results = soup.find_all('div', class_='content_title')\n teaser_results = soup.find_all('div', class_='article_teaser_body')\n \n #Only get first news_title and teaser \n news_title = title_results[0].text.strip()\n news_p= teaser_results[0].text.strip()\n\n time.sleep(5)\n\n ###############################################\n\n #Featured Mars image\n #Find the image url for the current Featured Mars Image\n jpl_url = 'https://spaceimages-mars.com/'\n browser.visit(jpl_url)\n\n #Scrape page into soup \n html = browser.html\n soup = bs(html, 'html.parser')\n\n #Get featured image\n image_path = soup.find_all('img', class_=\"headerimage fade-in\")[0][\"src\"]\n featured_image = jpl_url + image_path\n\n time.sleep(5)\n\n ##############################################\n #Mars characteritics table \n t_url = \"https://galaxyfacts-mars.com/\"\n \n #Read the tables \n tables = pd.read_html(t_url)\n\n #Get the table with the Mars, Earth Comparison \n me_df = tables[0]\n me_df.columns = ['Mars-Earth Comparison', 'Mars', 'Earth']\n \n #Clean table and pass it to html \n mars_earth_table = me_df.iloc[1:5, :]\n html_table = mars_earth_table.to_html()\n html_table.replace('\\n', '')\n\n time.sleep(5)\n \n ###########################################################\n \n #Mars hemispheres\n # URL of page to be scraped\n url = \"https://marshemispheres.com/\"\n browser.visit(url)\n\n #Scrape page into soup \n html = browser.html\n soup = bs(html, 'html.parser')\n \n #Create our empty list\n hemispheres_image_url = []\n\n items = soup.find_all('div', class_='item')\n \n #Loop through selected items\n for i in items:\n \n #Find \"Hemisphere\" title. Remember they are in h3 headers\n title = i.h3.text\n \n #Go inside to gthe hemisphere page to get image URL:\n link_url = i.find('a')['href']\n full_url= url + link_url\n browser.visit(full_url)\n img_url = browser.find_by_text('Sample')['href']\n #Save both the image url string for the full resolution hemisphere image, and the Hemisphere title containing the hemisphere name. Use a Python dictionary to store the data using the keys img_url and title.\n hemispheres_dict = {'Title': title,\n 'Image_URL': img_url}\n #Append the dictionary with the image url string and the hemisphere title to a list. This list will contain one dictionary for each hemisphere.\n hemispheres_image_url.append(hemispheres_dict)\n\n ################################\n\n \n \n#Store everything in a dictionary \n mars_data ={'news_title': news_title,\n 'news_p': news_p,\n 'image_url': featured_image,\n 'html_table': html_table,\n 'hemisphere_images': hemispheres_image_url\n }\n \n print(\"Ready!\")\n \n # Close the browser after scraping\n browser.quit()\n\n return mars_data\n \n","sub_path":"Mission_to_Mars/mars_scrape.py","file_name":"mars_scrape.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"61743796","text":"import math\nimport numpy as np\nimport sys\nimport csv\nimport os\n\n\ndef distance_checker(xyz1, xyz2):\n \"\"\" Returns distance between 2 threedimensional points\"\"\"\n return math.sqrt((xyz1[0] - xyz2[0])**2 + (xyz1[1] - xyz2[1])**2 +\n (xyz1[2] - xyz2[2])**2)\n\n\ndef normaliser(vec):\n \"\"\"Normalises a vector\"\"\"\n norm = np.linalg.norm(vec)\n for i in range(len(vec)):\n vec[i] = vec[i] / norm\n return vec\n\n\ndef angle_checker(vec1, vec2):\n \"\"\"\n Calculates angle in radians between two vectors. Note: this is an absolute\n angle.\n \"\"\"\n vec1 = normaliser(vec1)\n vec2 = normaliser(vec2)\n angle = np.arccos(np.clip(np.dot(vec1, vec2), -1, 1))\n return angle\n\n\ndef rotation_matrix(axis, theta):\n \"\"\"\n Return the rotation matrix associated with counterclockwise rotation about\n the given axis by theta radians. Taken from\n https://stackoverflow.com/questions/6802577/rotation-of-3d-vector\n \"\"\"\n axis = np.asarray(axis)\n axis = axis / math.sqrt(np.dot(axis, axis))\n a = math.cos(theta / 2.0)\n b, c, d = -axis * math.sin(theta / 2.0)\n aa, bb, cc, dd = a * a, b * b, c * c, d * d\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],\n [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],\n [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])\n\n\ndef check_bond_len(dict, el_a, el_b):\n \"\"\" Make sure all elements are in bond_len_dict, and return the value\"\"\"\n if el_a in dict:\n if el_b in dict[el_a]:\n return dict[el_a][el_b]\n print()\n print(el_a + \" and \" + el_b + \" bond length currently unsupported. Add value to the csv file.\")\n sys.exit()\n\n\ndef csv2dict(filename):\n \"\"\"\n Transforms the bond_lengths.csv to a dict\n \"\"\"\n dis_dict = {}\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n el_a = row[\"Element Name\"]\n dis_dict[el_a] = {}\n for entry in row:\n if entry != \"Element Name\":\n dis_dict[el_a][entry] = float(row[entry])\n csvfile.close()\n return dis_dict\n\n\ndef bond_checker(atom, dict, bond_dict):\n \"\"\"Check for all atoms in bonding range\"\"\"\n bound = []\n for item, values in dict.items():\n bond_range = check_bond_len(bond_dict, atom[0], values[\"element\"]) + 0.2\n if distance_checker(atom[1:], values[\"coor\"]) <= bond_range:\n bound.append(item)\n return bound\n\n\ndef closest_atom(dict, coor):\n \"Given a dict and coordinates returns the closest atom\"\n min_dis = math.inf\n for atom, values in dict.items():\n dis = distance_checker(coor, values[\"coor\"])\n if dis < min_dis and dis > 0.01:\n min_dis = dis\n min_id = atom\n return min_id\n\n\ndef print_lig():\n \"\"\" Prints available ligands \"\"\"\n lig_list = os.listdir(\"../Ligands\")\n print()\n for ligs in lig_list:\n # Skip folders\n if ligs[-4:] == \".xyz\":\n print(ligs[:-4])\n print()\n\n\ndef file2dict(file, dict, start_id):\n \"\"\"\n Builds simple dict out of .xyz file, containing just id, elements and\n coordinates\n \"\"\"\n id = start_id\n line_number = 0\n file.seek(0)\n for line in file:\n if line_number == 0:\n n_atoms = int(float(line.strip()))\n if line_number >= 2 and line_number < n_atoms + 2:\n values_list = line.split()\n for i in range(1, 4):\n values_list[i] = float(values_list[i])\n dict[id] = {\n \"coor\": values_list[1:],\n \"element\": values_list[0]\n }\n id += 1\n line_number += 1\n return dict\n\n\ndef dict2file(dict, filename, foldername):\n \"\"\"\n Takes an atom dict and writes it to an .xyz file in foldername in\n /Created_QD with filename as name for the file\n \"\"\"\n if foldername:\n if not os.path.exists(\"../Created_QD/\" + foldername):\n os.makedirs(\"../Created_QD/\" + foldername)\n file = open(\"../Created_QD/\" + foldername + \"/\" + filename + \".xyz\", \"w\")\n else:\n file = open(\"../Created_QD/\" + filename + \".xyz\", \"w\")\n file.write(\" \\n\\n\")\n for atom, values in dict.items():\n file.write(values['element'] + \"\\t\" + str(values['coor'][0]) + \"\\t\\t\" +\n str(values['coor'][1]) + \"\\t\\t\" + str(values['coor'][2]) + \"\\n\")\n file.seek(0)\n file.write(str(len(dict)))\n file.close()\n print(\"\\nQuantum Dot created :)\")\n\n\ndef base_atom(dict):\n \"\"\" Finds atoms at the origin in a dict, returns its id\"\"\"\n for atom, values in dict.items():\n xyz = values[\"coor\"]\n if xyz[0] == xyz[1] == xyz[2] == 0:\n return atom\n\n\ndef y2true(text):\n \"\"\"Converts strings y and n to boolean\"\"\"\n while True:\n if text == 'y':\n return True\n elif text == 'n':\n return False\n else:\n text = input(\"Wrong input, try again: \")\n","sub_path":"scripts-TimoKoster/scripts/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"4873480","text":"#!/usr/bin/env python\n\n\"\"\"\n作业一:使用 Python+redis 实现高并发的计数器功能\n需求描述:\n在社交类网站中,经常需要对文章、视频等元素进行计数统计功能,热点文章和视频多为高并发请求,因此采用 redis 做为文章阅读、视频播放的计数器。\n请实现以下函数:\n\n复制代码\ncounter()\ndef counter(video_id: int):\n ...\n return count_number\n函数说明:\n\ncounter 函数为统计视频播放次数的函数,每调用一次,播放次数 +1\n参数 video_id 每个视频的 id,全局唯一\n基于 redis 实现自增操作,确保线程安全\n期望执行结果:\nconuter(1001) # 返回 1\nconuter(1001) # 返回 2\nconuter(1002) # 返回 1\nconuter(1001) # 返回 3\nconuter(1002) # 返回 2\n\n遗留问题:\n1,用户体验不好,存储64万条数据不同线程池的 max_workers 的性能如下:\nmax_workers=100,总耗时:30m14.907s\n2,数据量太大,容易把 redis 搞挂\n3,基于 Django+ redis 实现,使用 redis 作为 Django 后端可参考如下代码:\nhttps://django-redis-chs.readthedocs.io/zh_CN/latest/\n\"\"\"\nimport os\nimport threading\nimport redis\nimport random\nfrom queue import Queue\nfrom dbconfig import read_db_config\nfrom record_log import RecordLog\nfrom concurrent.futures import ThreadPoolExecutor, as_completed, wait, FIRST_COMPLETED\n\n\nclass RedisBase:\n def __init__(self):\n self.base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.log = RecordLog().logger\n config = read_db_config()\n self.client = redis.Redis(**config)\n\n def hash_add(self, key, value, name='video'):\n \"\"\"\n 增加或者修改 value\n :param key:\n :param value:\n :param name:\n :return:\n \"\"\"\n self.client.hset(name, key, value)\n\n def hash_get(self, key, name='video'):\n \"\"\"\n 获取 value\n :param key:\n :param name:\n :return:\n \"\"\"\n value = self.client.hget(name, key)\n if value:\n return value.decode()\n self.log.error(f\"get key:{key} from {name} failed, pls check it out.\")\n\n def hash_getall(self, name):\n \"\"\"\n 返回数据库 哈希表中指定 name 的所有字段和值\n :param name:\n :return:\n \"\"\"\n return self.client.hgetall(name)\n\n def hash_print_all(self, name):\n \"\"\"\n 打印数据库 哈希表中指定 name 的所有字段和值\n :param name:\n :return:\n \"\"\"\n result = self.client.hgetall(name)\n for key, value in result.items():\n self.log.info(f\"{key.decode()}:{value.decode()}\")\n\n def hash_del(self, key, name='video'):\n \"\"\"\n 删除\n :param key:\n :param name:\n :return:\n \"\"\"\n self.client.hdel(name, key)\n\n def hash_member_set(self, value_dic, name='video'):\n \"\"\"\n 批量添加\n :param value_dic:\n :param name:\n :return:\n \"\"\"\n self.client.hmset(name, value_dic)\n\n def init_redis_by_keys(self, key_list, value, name='video'):\n \"\"\"\n 按 key 列表,批量初始化redis,value值为 0\n :param key_list:\n :param value:\n :param name:\n :return:\n \"\"\"\n for key in key_list:\n if not self.client.hget(name, key):\n self.hash_add(key, value, name=name)\n\n\nclass VideoCounter(RedisBase):\n def __init__(self, v_num):\n super(VideoCounter).__init__()\n self.video_num = v_num\n self.group_name = \"documentary\"\n self.video_id_list = [f'video_{i}' for i in range(self.video_num)]\n\n def init_data(self):\n \"\"\"\n 初始化所有的视频播放量,默认为 0\n :return:\n \"\"\"\n self.init_redis_by_keys(self.video_id_list, 0, self.group_name)\n\n def counter(self, video_id):\n \"\"\"\n 视频播放计数器\n :param video_id: 视频id\n :return:\n \"\"\"\n if video_id not in self.video_id_list:\n self.video_id_list.append(video_id)\n self.hash_add(video_id, 0, name=self.group_name)\n else:\n views = self.hash_get(video_id, name=self.group_name)\n if views:\n views = int(views) + 1\n self.hash_add(video_id, views, name=self.group_name)\n return views\n else:\n return False\n\n\ndef get_video_id(total_video_num, total_views):\n \"\"\"\n 获取总随机 video_id\n :param total_video_num:\n :param total_views:\n :return:\n \"\"\"\n for _ in range(total_views):\n yield f\"video_{random.randint(0, total_video_num-1)}\" # -1 是为了确保随机数是在 0 - total_video_num,不包含 total_video_num\n\n\ndef main():\n total_views = 1000000 # 网站视频总播放量\n total_video_num = 10 # 网站总视频数\n\n # 初始化数据到redis\n count = VideoCounter(total_video_num)\n count.init_data()\n\n # 批量线程通过map提交\n executor = ThreadPoolExecutor(max_workers=10)\n for result in executor.map(count.counter, get_video_id(total_video_num, total_views)):\n pass\n\n # 打印各视频播放量\n count.hash_print_all(count.group_name)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"week05/homework/homework1/main_counter_redis.py","file_name":"main_counter_redis.py","file_ext":"py","file_size_in_byte":5308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"611868531","text":"import traceback\nfrom django.core.management.base import BaseCommand\nfrom ppatrigger.models import Project\nfrom travisclient import get_repo\n\n# For updating existing projects with new additions\n# * get project description field from Travis CI\n# * set rustci_token (implicit trigger)\nclass Command(BaseCommand):\n args = ''\n help = 'Fetch/update project descriptions for all projects'\n\n def handle(self, *args, **options):\n\n for project in Project.objects.filter(deleted = False):\n\n repo = get_repo(project.username, project.repository)\n\n if(repo and 'description' in repo and\n repo['description']):\n project.description = repo['description']\n \n self.stdout.write(str(project) + ': ' +\\\n project.description)\n else:\n self.stdout.write('No description found: ' + str(project))\n\n project.save()\n","sub_path":"tpt/ppatrigger/management/commands/updateprojects.py","file_name":"updateprojects.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"165068594","text":"import random as rand\n\nprint(\"*** Welcome to the Guessing Game by Rohith Boppey! ***\\n\\n\\n\")\nch = int(input(\"Do you want to start the game: \"))\nwhile(ch != 0):\n number = rand.randrange(1, 10000)\n ou = 0,\n n = 0\n while(ou != number):\n ou = int(input(\"Your number is: \"))\n if(ou < number):\n print(\"Number greater than\", ou, \"\\n\")\n else:\n print(\"Number less than\", ou, \"\\n\")\n print(\"\\nDamn! You found the number is found\\n\")\n ch = int(input(\"Do you want to continue: \"))\nprint(\"\\nThank you for playing with me!\\n\\n\")\n ","sub_path":"Guess.py","file_name":"Guess.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"51662645","text":"import io\nimport typing\n\nfrom blspy import G1Element\n\nfrom .as_python import as_python\nfrom .CLVMObject import CLVMObject, SExpType\n\nfrom .EvalError import EvalError\n\nfrom .casts import (\n int_from_bytes,\n int_to_bytes,\n)\nfrom .serialize import sexp_to_stream\n\n\nCastableType = typing.Union[\n \"SExp\",\n CLVMObject,\n bytes,\n int,\n None,\n SExpType,\n G1Element,\n typing.Tuple[typing.Any, typing.Any],\n]\n\nNULL = b\"\"\n\n\nclass SExp(CLVMObject):\n true: \"SExp\"\n false: \"SExp\"\n __null__: \"SExp\"\n\n @classmethod\n def _to_sexp_type(\n class_,\n v: CastableType,\n ) -> SExpType:\n if isinstance(v, tuple):\n assert len(v) == 2\n left, right = v\n if type(left) != CLVMObject:\n left = CLVMObject(class_._to_sexp_type(left))\n if type(right) != CLVMObject:\n right = CLVMObject(class_._to_sexp_type(right))\n return (left, right)\n if isinstance(v, CLVMObject):\n return v.pair or v.atom\n if isinstance(v, bytes):\n return v\n\n if isinstance(v, int):\n return int_to_bytes(v)\n if isinstance(v, G1Element):\n return bytes(v)\n if v is None:\n return NULL\n if v == []:\n return NULL\n\n if hasattr(v, \"__iter__\"):\n pair: SExpType = NULL\n for _ in reversed(v):\n pair = (\n class_.to(_),\n class_.to(pair),\n )\n return pair\n\n raise ValueError(\"can't cast to %s: %s\" % (class_, v))\n\n def as_pair(self):\n pair = self.pair\n if pair is None:\n return pair\n return (self.to(pair[0]), self.to(pair[1]))\n\n def as_atom(self):\n return self.atom\n\n def listp(self):\n return self.pair is not None\n\n def nullp(self):\n return self.atom == b\"\"\n\n def as_int(self):\n return int_from_bytes(self.atom)\n\n def as_bin(self):\n f = io.BytesIO()\n sexp_to_stream(self, f)\n return f.getvalue()\n\n @classmethod\n def to(class_, v: CastableType):\n if isinstance(v, class_):\n return v\n v1 = class_._to_sexp_type(v)\n return class_(v1)\n\n def cons(self, right: \"CLVMObject\"):\n s = (self, right)\n return self.to(s)\n\n def first(self):\n pair = self.pair\n if pair:\n return self.to(pair[0])\n raise EvalError(\"first of non-cons\", self)\n\n def rest(self):\n pair = self.pair\n if pair:\n return self.to(pair[1])\n raise EvalError(\"rest of non-cons\", self)\n\n @classmethod\n def null(class_):\n return class_.__null__\n\n def as_iter(self):\n v = self\n while not v.nullp():\n yield v.first()\n v = v.rest()\n\n def __eq__(self, other: CastableType):\n try:\n other = self.to(other)\n except ValueError:\n return False\n to_compare_stack = [(self, other)]\n while to_compare_stack:\n s1, s2 = to_compare_stack.pop()\n p1 = s1.as_pair()\n if p1:\n p2 = s2.as_pair()\n if p2:\n to_compare_stack.append((p1[0], p2[0]))\n to_compare_stack.append((p1[1], p2[1]))\n else:\n return False\n elif s2.as_pair() or s1.as_atom() != s2.as_atom():\n return False\n return True\n\n def list_len(self):\n v = self\n size = 0\n while v.listp():\n size += 1\n v = v.rest()\n return size\n\n def as_python(self):\n return as_python(self)\n\n def __str__(self):\n return self.as_bin().hex()\n\n def __repr__(self):\n return \"%s(%s)\" % (self.__class__.__name__, str(self))\n\n\nSExp.false = SExp.__null__ = SExp(b\"\")\nSExp.true = SExp(b\"\\1\")\n","sub_path":"clvm/SExp.py","file_name":"SExp.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"49039954","text":"# logger.py\n#\n# simple file logger\n\nfrom time import localtime, strftime\nfrom simtime import simtime\nfrom utility import *\nfrom datetime import datetime\nimport os\n\nlogfile = None\n\nLOG_PATH = './log/'\nLOG_FILENAME = 'pycontroller'\nLOG_EXT = '.log'\n\nclass logger(object) :\n\n the_logger = None\n traces = set()\n\n def __init__(self, filename=None, console=False, trace=None) :\n if filename is None :\n filename = f'{LOG_PATH}{LOG_FILENAME}{LOG_EXT}'\n if os.path.exists(filename):\n creation_time = os.path.getctime(filename)\n suffix = datetime.utcfromtimestamp(creation_time).strftime(\"%Y%m%dT%H%M%S\")\n ff = filename.split('.')\n if len(ff) > 1 and '/' not in ff[-1]:\n new_filename = '.'.join(ff[:-1]) + f\"_{suffix}.{ff[-1]}\"\n else:\n new_filename = f\"{filename}_{suffix}\"\n os.rename(filename, new_filename)\n try :\n self.logfile = open(filename, 'w')\n except IOError :\n print(\"failed to open log file\")\n self.console = console\n self.logfile.write(\"%s opening log '%s'\\n\" % (logger._get_time(), filename))\n for t in trace or [] :\n self._add_trace(t)\n logger.the_logger = self\n\n def _log(self, obj, message, *args) :\n if self.logfile :\n text = message % args\n try :\n extra = obj._log_extra()\n except AttributeError :\n extra = ''\n msg = \"%8s %20s.%-15s %s %s\" % \\\n (simtime.time(), obj.my_class, obj.name, text, extra)\n self.logfile.write(msg+'\\n')\n self.logfile.flush()\n if self.console :\n print(msg)\n\n def _trace(self, tr, obj, message, *args) :\n for t in logger.traces :\n if tr.startswith(t) :\n self._log(obj, message, *args)\n break\n\n def _add_trace(self, t) :\n if t :\n logger.traces.add(t)\n\n @staticmethod\n def log(obj, message, *args) :\n logger.the_logger._log(obj, message, *args)\n\n @staticmethod\n def trace(tr, obj, message, *args) :\n logger.the_logger._trace(tr, obj, message, *args)\n\n @staticmethod\n def _get_time() :\n return get_time_of_day()\n\n @staticmethod\n def add_trace(t) :\n logger.the_logger._add_trace(t)\n\ndef log(obj, message, *args) :\n logger.the_logger._log(obj, message, *args)\n\ndef trace(tr, obj, message, *args) :\n logger.the_logger._trace(tr, obj, message, *args)\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"46913468","text":"# -*- coding: utf-8 -*-\n\nimport random\nimport string\n\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\n\nfrom juntagrico.dao.activityareadao import ActivityAreaDao\nfrom juntagrico.dao.depotdao import DepotDao\nfrom juntagrico.dao.extrasubscriptiontypedao import ExtraSubscriptionTypeDao\nfrom juntagrico.dao.memberdao import MemberDao\nfrom juntagrico.dao.jobdao import JobDao\nfrom juntagrico.dao.subscriptionsizedao import SubscriptionSizeDao\nfrom juntagrico.decorators import primary_member_of_subscription\nfrom juntagrico.forms import *\nfrom juntagrico.models import *\nfrom juntagrico.views import get_menu_dict\nfrom juntagrico.util import temporal\n\n\ndef password_generator(size=8, chars=string.ascii_uppercase + string.digits): return ''.join(\n random.choice(chars) for x in range(size))\n\n\n@login_required\ndef subscription(request):\n \"\"\"\n Details for an subscription of a member\n \"\"\"\n renderdict = get_menu_dict(request)\n\n if request.user.member.subscription is not None:\n current_extrasubscriptions = request.user.member.subscription.extra_subscriptions.all()\n future_extrasubscriptions = request.user.member.subscription.future_extra_subscriptions.filter(active=False)\n extrasubscriptions_changed = set(current_extrasubscriptions) != set(future_extrasubscriptions)\n\n if request.user.member.subscription:\n renderdict.update({\n 'extrasubscriptions': current_extrasubscriptions,\n 'future_extrasubscriptions': future_extrasubscriptions,\n 'extrasubscriptions_changed': extrasubscriptions_changed,\n 'subscriptionmembers': request.user.member.subscription.recipients().exclude(\n email=request.user.member.email),\n 'primary': request.user.member.subscription.primary_member.email == request.user.member.email,\n 'next_extra_subscription_date': Subscription.next_extra_change_date(),\n 'next_size_date': Subscription.next_size_change_date()\n })\n renderdict.update({\n 'member': request.user.member,\n 'shares': request.user.member.share_set.count(),\n 'shares_unpaid': request.user.member.share_set.filter(paid_date=None).count(),\n 'menu': {'subscriptionnnement': 'active'},\n })\n return render(request, \"subscription.html\", renderdict)\n\n\n@primary_member_of_subscription\ndef subscription_change(request):\n \"\"\"\n change an subscription\n \"\"\"\n month = timezone.now().month\n renderdict = get_menu_dict(request)\n renderdict.update({\n 'member': request.user.member,\n 'change_size': month <= Config.business_year_cancelation_month(),\n 'next_cancel_date': temporal.next_cancelation_date(),\n 'has_extra_subscriptions': ExtraSubscriptionCategoryDao.all_categories_ordered().count() > 0,\n 'next_extra_subscription_date': Subscription.next_extra_change_date(),\n 'next_business_year': temporal.start_of_next_business_year()\n })\n return render(request, \"subscription_change.html\", renderdict)\n\n\n@primary_member_of_subscription\ndef depot_change(request):\n \"\"\"\n change a depot\n \"\"\"\n saved = False\n if request.method == \"POST\":\n request.user.member.subscription.future_depot = get_object_or_404(Depot, id=int(request.POST.get(\"depot\")))\n request.user.member.subscription.save()\n saved = True\n renderdict = get_menu_dict(request)\n renderdict.update({\n 'saved': saved,\n 'member': request.user.member,\n \"depots\": DepotDao.all_depots()\n })\n return render(request, \"depot_change.html\", renderdict)\n\n\n@primary_member_of_subscription\ndef size_change(request):\n \"\"\"\n change the size of an subscription\n \"\"\"\n saved = False\n if request.method == \"POST\" and int(time.strftime(\"%m\")) <= Config.business_year_cancelation_month() and int(request.POST.get(\"subscription\")) > 0:\n request.user.member.subscription.future_size = int(request.POST.get(\"subscription\"))\n request.user.member.subscription.save()\n saved = True\n renderdict = get_menu_dict(request)\n renderdict.update({\n 'saved': saved,\n 'next_cancel_date': temporal.next_cancelation_date(),\n 'size': request.user.member.subscription.future_size,\n 'subscription_sizes': SubscriptionSizeDao.all_sizes_ordered()\n })\n return render(request, \"size_change.html\", renderdict)\n\n\n@primary_member_of_subscription\ndef extra_change(request):\n \"\"\"\n change an extra subscription\n \"\"\"\n saved = False\n if request.method == \"POST\":\n for extra_subscription in ExtraSubscriptionType.objects.all():\n existing = request.user.member.subscription.extra_subscription_set.filter(type__id=extra_subscription.id)\n if request.POST.get(\"subscription\" + str(extra_subscription.id)) == str(extra_subscription.id):\n if existing.count()==0:\n future_extra_subscription = Extrasubscription.objects.create(main_subscription=request.user.member.subscription,type=extra_subscription)\n future_extra_subscription.active = False\n future_extra_subscription.save()\n else:\n has_active=False\n index=0;\n while not has_active or index0:\n for existing_extra_subscription in existing:\n if existing_extra_subscription.canceled==False and existing_extra_subscription.active==True:\n existing_extra_subscription.canceled=True;\n existing_extra_subscription.save();\n elif existing_extra_subscription.deactivation_date is None and existing_extra_subscription.active==False:\n existing_extra_subscription.delete();\n request.user.member.subscription.save()\n saved = True\n \n subscriptions = []\n for subscription in ExtraSubscriptionTypeDao.all_extra_types():\n if request.user.member.subscription.future_extra_subscriptions.filter(type__id=subscription.id).count() > 0:\n subscriptions.append({\n 'id': subscription.id,\n 'name': subscription.name,\n 'selected': True\n })\n else:\n subscriptions.append({\n 'id': subscription.id,\n 'name': subscription.name\n })\n renderdict = get_menu_dict(request)\n renderdict.update({\n 'saved': saved,\n 'member': request.user.member,\n \"extras\": subscriptions\n })\n return render(request, \"extra_change.html\", renderdict)\n\n\ndef signup(request):\n \"\"\"\n Become a member of juntagrico\n \"\"\"\n success = False\n agberror = False\n agbchecked = False\n userexists = False\n if request.method == 'POST':\n agbchecked = request.POST.get(\"agb\") == \"on\"\n\n memberform = MemberProfileForm(request.POST)\n if not agbchecked:\n agberror = True\n else:\n if memberform.is_valid():\n # check if user already exists\n if User.objects.filter(email=memberform.cleaned_data['email']).__len__() > 0:\n userexists = True\n else:\n # set all fields of user\n # email is also username... we do not use it\n password = password_generator()\n member = Member(**memberform.cleaned_data)\n request.session['main_member'] = member\n return redirect(\"/my/create/subscrition\")\n else:\n memberform = MemberProfileForm()\n\n renderdict = {\n 'memberform': memberform,\n 'success': success,\n 'agberror': agberror,\n 'agbchecked': agbchecked,\n 'userexists': userexists,\n 'menu': {'join': 'active'},\n }\n return render(request, \"signup.html\", renderdict)\n\n\n\ndef welcome(request):\n \"\"\"\n welcome\n \"\"\"\n renderdict= {\n 'no_subscription': request.session['main_member'].subscription is None\n }\n\n return render(request, \"welcome.html\", renderdict)\n\n\ndef confirm(request, hash):\n \"\"\"\n Confirm from a user that has been added as a co_subscriptionnnent\n \"\"\"\n\n for member in MemberDao.all_members():\n if hash == hashlib.sha1((member.email + str(member.id)).encode('utf8')).hexdigest():\n member.confirmed = True\n member.save()\n\n return redirect(\"/my/home\")\n\n\n\ndef createsubscription(request):\n \"\"\"\n create a subscription\n \"\"\"\n if request.user.is_authenticated():\n member = request.user.member\n else:\n member = request.session.get('main_member')\n if member is None:\n return redirect(\"http://\"+Config.server_url())\n \n shareerror = False\n subscriptionform = SubscriptionForm()\n session_subscription = request.session.get('create_subscription')\n co_members = request.session.get('create_co_members', [])\n co_members_shares = request.session.get('create_co_members_shares', [])\n member_shares = request.session.get('create_member_shares', [])\n\n selectedsubscription = \"none\"\n selected_depot = None\n existing_member_shares = 0\n if member.pk is not None:\n existing_member_shares = member.share_set.all().count()\n shares = existing_member_shares\n\n if session_subscription is not None:\n selectedsubscription = next(\n iter(SubscriptionSizeDao.sizes_by_size(session_subscription.size).values_list('name', flat=True) or []),\n 'none')\n selected_depot = session_subscription.depot\n\n co_member_shares = len(co_members_shares)\n if request.method == \"POST\":\n shares += int(request.POST.get(\"shares\"))\n selectedsubscription = request.POST.get(\"subscription\")\n subscriptionform = SubscriptionForm(request.POST)\n\n shares += co_member_shares\n min_num_shares = next(\n iter(SubscriptionSizeDao.sizes_by_name(selectedsubscription).values_list('shares', flat=True) or []), 1)\n if shares < min_num_shares or not subscriptionform.is_valid():\n shareerror = shares < min_num_shares\n else:\n size = next(\n iter(SubscriptionSizeDao.sizes_by_name(selectedsubscription).values_list('size', flat=True) or []),\n 0)\n\n if size > 0:\n session_subscription = Subscription(**subscriptionform.cleaned_data)\n session_subscription.depot = DepotDao.depot_by_id(request.POST.get(\"depot\"))\n session_subscription.primary_member = member\n session_subscription.size = size\n\n if len(member_shares) < int(request.POST.get(\"shares\")):\n toadd = int(request.POST.get(\"shares\")) - len(member_shares)\n for num in range(0, toadd):\n member_shares.append(Share(member=member, paid_date=None))\n elif len(member_shares) > int(request.POST.get(\"shares\")):\n toremove = len(member_shares) - int(request.POST.get(\"shares\"))\n for num in range(0, toremove):\n member_shares.pop()\n\n if request.POST.get(\"add_member\"):\n request.session['create_subscription'] = session_subscription\n request.session['create_member_shares'] = member_shares\n return redirect(\"/my/cosubmember/0\")\n else:\n password = None\n if member.pk is None:\n member.save()\n password = password_generator()\n member.user.set_password(password)\n member.user.save()\n if session_subscription is not None:\n session_subscription.save()\n member.subscription_id = session_subscription.id\n member.save()\n send_welcome_mail(member.email, password, hashlib.sha1((member.email + str(\n member.id)).encode('utf8')).hexdigest())\n for co_member in co_members:\n co_member.subscription_id = session_subscription.id\n co_member.save()\n pw = None\n if co_member.confirmed is False:\n pw = password_generator()\n co_member.user.set_password(pw)\n co_member.user.save()\n send_been_added_to_subscription(co_member.email, pw, member.get_name(), shares,\n hashlib.sha1((co_member.email + str(\n co_member.id)).encode('utf8')).hexdigest())\n for share in member_shares + co_members_shares:\n if share.id is None:\n if share.member.email == member.email:\n share.member = member\n else:\n share.member = list((co_member for co_member in co_members if co_member.email == share.member.email))[0]\n share.save()\n send_share_created_mail(share)\n request.session['create_subscription'] = None\n request.session['create_co_members'] = []\n request.session['create_co_members_shares'] = []\n request.session['create_member_shares'] = []\n if request.user.is_authenticated():\n return redirect(\"/my/home\")\n else:\n return redirect(\"/my/welcome\")\n\n renderdict = {\n 'co_member_shares': co_member_shares,\n 'existing_member_shares': existing_member_shares,\n 'member': member,\n 'subscription_sizes': SubscriptionSizeDao.all_sizes_ordered(),\n 'depots': DepotDao.all_depots(),\n 'selected_depot': selected_depot,\n 'selected_subscription': selectedsubscription,\n 'shareerror': shareerror,\n 'co_members': co_members,\n 'subscriptionform': subscriptionform\n }\n return render(request, \"createsubscription.html\", renderdict)\n\n\n\ndef add_member(request, subscription_id):\n shareerror = False\n shares = 1\n memberexists = False\n memberblocked = False\n if request.method == 'POST':\n memberform = MemberProfileForm(request.POST)\n try:\n shares = int(request.POST.get(\"shares\"))\n shareerror = shares < 0\n except:\n shareerror = True\n member = next(iter(MemberDao.members_by_email(request.POST.get('email')) or []), None)\n if member is not None:\n memberexists = True\n shares = 0\n if member.subscription is not None:\n memberblocked = True\n\n if (memberform.is_valid() and shareerror is False) or (memberexists is True and memberblocked is False):\n tmp_shares = []\n pw = None\n if memberexists is False:\n for num in range(0, shares):\n member = Member(**memberform.cleaned_data)\n tmp_shares.append(Share(member=member, paid_date=None))\n else:\n for share in member.share_set.all():\n tmp_shares.append(share)\n if request.GET.get(\"return\"):\n member.subscription_id = subscription_id\n member.save()\n send_been_added_to_subscription(member.email, pw, request.user.member.get_name(), shares, hashlib.sha1(\n memberform.cleaned_data['email'] + str(subscription_id)).hexdigest())\n if memberexists is False:\n for share in tmp_shares:\n share.save()\n send_share_created_mail(share)\n return redirect(request.GET.get(\"return\"))\n else:\n co_members_shares = request.session.get('create_co_members_shares', [])\n co_members_shares += tmp_shares\n request.session['create_co_members_shares'] = co_members_shares\n co_members = request.session.get('create_co_members', [])\n co_members.append(member)\n request.session['create_co_members'] = co_members\n return redirect('/my/create/subscrition')\n else:\n if request.user.is_authenticated():\n member = request.user.member\n else:\n member = request.session.get('main_member')\n if member is None:\n return redirect(\"http://\"+Config.server_url())\n initial = {\"addr_street\": member.addr_street,\n \"addr_zipcode\": member.addr_zipcode,\n \"addr_location\": member.addr_location,\n \"phone\": member.phone,\n }\n memberform = MemberProfileForm(initial=initial)\n renderdict = {\n 'shares': shares,\n 'memberexists': memberexists,\n 'memberblocked': memberexists,\n 'shareerror': shareerror,\n 'memberform': memberform,\n \"member\": member,\n \"depots\": DepotDao.all_depots(),\n \"cancelUrl\": request.GET.get(\"return\") if request.GET.get(\"return\") else '/my/create/subscrition'\n }\n return render(request, \"add_member.html\", renderdict)\n\n\ndef cancel_create_subscription(request):\n request.session['main_memer'] = None\n request.session['create_subscription'] = None\n request.session['create_co_members'] = []\n request.session['create_co_members_shares'] = []\n request.session['create_member_shares'] = []\n if request.user.is_authenticated():\n return redirect('/my/subscription')\n else:\n return redirect(\"http://\"+Config.server_url())\n \n","sub_path":"juntagrico/views_subscription.py","file_name":"views_subscription.py","file_ext":"py","file_size_in_byte":18800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"528301470","text":"__author__ = 'larrath'\n\nfrom SymbolTable import ScopeChain\n\nglobal label_count\nglobal marker\n\ndef expect_label(xml_data, desired_value):\n if (xml_data.nodeName.strip() != desired_value):\n raise Exception('Label should be ' + str(desired_value) + '; instead got ' + xml_data.nodeName)\n\ndef expect_values(xml_data, desired_values):\n if (str(xml_data.firstChild.nodeValue).strip() not in desired_values):\n raise Exception('Label should be in' + str(desired_values) + '; instead got ' + xml_data.firstChild.nodeValue) \n\ndef set_marker(tag=''):\n global label_count\n global marker\n if (tag == '' or tag is None):\n marker = ''\n else:\n marker = str(tag) + str(label_count)\n label_count += 1\n\ndef get_marker():\n global marker\n if (len(marker) > 0):\n old_mark = marker\n marker = ''\n return old_mark + '\\n'\n\n return ''\n\n\ndef encode(xml_data):\n expect_label(xml_data, 'class')\n global label_count\n label_count = 0\n set_marker(None)\n\n # parsing new class\n scope = ScopeChain()\n\n result = compileClassDeclaration(xml_data, scope)\n return result\n\ndef compileClassDeclaration(xml_data, scope):\n class_data = list(xml_data.childNodes)\n class_name = str(class_data[1].firstChild.nodeValue).strip()\n scope.setClassName(class_name)\n buf = ''\n\n class_data = class_data[3:] # discard previously handled elements\n\n # Scan all function declarations into scope before compilation begins\n func_declarations = xml_data\n for func_root in func_declarations.childNodes:\n if (func_root.nodeName == 'subroutineDec'):\n func_data = func_root.childNodes\n func_visibility = str(func_data[0].firstChild.nodeValue).strip()\n func_name = str(func_data[2].firstChild.nodeValue).strip()\n scope.defineFunction(func_name, func_visibility, class_name)\n\n\n while (class_data):\n operation = class_data[0].tagName\n if (operation == 'subroutineDec'):\n buf += compileClassBody(class_data[0], scope)\n elif (operation == 'classVarDec'):\n compileClassVarDeclaration(class_data[0], scope)\n elif (operation == 'symbol' and class_data[0].firstChild.nodeValue.strip() == '}'):\n pass\n else:\n raise Exception('Unknown operation in class body')\n\n class_data.pop(0)\n\n return buf\n\ndef compileClassBody(xml_data, scope):\n expect_label(xml_data, 'subroutineDec')\n\n buf = ''\n\n while (xml_data.hasChildNodes()):\n current_top = xml_data.childNodes[0]\n command = str(current_top.firstChild.nodeValue).strip()\n if (command in ['constructor', 'function', 'method']):\n buf += compileFunctionDeclaration(current_top.parentNode, scope)\n\n return buf\n\ndef compileFunctionDeclaration(xml_data, scope):\n expect_values(xml_data.firstChild, ['constructor', 'function', 'method'])\n \n func_data = list(xml_data.childNodes)\n buf = ''\n \n function_declare_mode = func_data[0].firstChild.nodeValue.strip()\n function_return_type = func_data[1].firstChild.nodeValue.strip()\n function_name = func_data[2].firstChild.nodeValue.strip()\n arg_count = compileFunctionArguments(func_data[4], scope, function_declare_mode)\n function_body, field_count = compileFunctionBody(func_data[6], scope, function_declare_mode)\n\n scope.defineFunction(function_name, function_declare_mode, scope.getClassName())\n\n buf += 'function ' + str(scope.getClassName()) + '.' + function_name + ' ' + str(field_count) + '\\n'\n\n if (function_declare_mode == 'constructor'):\n buf += 'push constant ' + str(scope.varCount('field')) + '\\n' + 'call Memory.alloc 1\\n' + 'pop pointer 0\\n'\n elif (function_declare_mode == 'method'):\n buf += 'push argument 0\\n' + 'pop pointer 0\\n'\n\n elif (function_declare_mode == 'function'):\n pass\n else:\n raise NotImplementedError\n\n buf += function_body\n # destroy the nodes we compiled\n\n xml_data.parentNode.removeChild(xml_data)\n xml_data.unlink()\n return buf\n\ndef compileFunctionArguments(parameterList, scope, mode):\n scope.pushNewScope()\n expect_label(parameterList, 'parameterList')\n\n if (mode == 'method'):\n scope.define('placeholder', 'NoType', 'argument')\n\n params = list(parameterList.childNodes)\n count = 0\n if (len(params) == 1 and params[0].nodeValue == '\\n'):\n return count\n\n while (params):\n count += 1\n\n var_type = params.pop(0).firstChild.nodeValue.strip()\n var_name = params.pop(0).firstChild.nodeValue.strip()\n scope.define(var_name, var_type, 'argument')\n\n if (params): # discard delimiter, if one exists (last entry has none)\n params.pop(0)\n\n\n # destroy the nodes we compiled\n parameterList.parentNode.removeChild(parameterList)\n parameterList.unlink()\n return count\n\ndef compileFunctionBody(body, scope, mode='function'):\n data = list(body.childNodes)\n data = data[1:-1] # remove first and last elements ( '{' and '}' )\n statements = ''\n args = 0\n while (data):\n action = data[0].tagName\n if (action == 'varDec'):\n args += compileVarDeclaration(data[0], scope, mode)\n elif (action == 'statements'):\n statements = str(compileStatements(data[0], scope, mode))\n else:\n raise Exception('Unknown operation in function body')\n data.pop(0)\n\n return statements, str(args)\n \ndef compileVarDeclaration(declaration, scope, mode='function'):\n expect_label(declaration, 'varDec')\n\n var_count = 0\n data = list(declaration.childNodes)\n\n declaration_kind = data.pop(0).firstChild.nodeValue.strip()\n var_type = data.pop(0).firstChild.nodeValue.strip()\n\n while (data):\n var_name = data.pop(0).firstChild.nodeValue.strip()\n scope.define(var_name, var_type, declaration_kind)\n var_count += 1\n data.pop(0) # discard delimiter\n\n # Variable declarations do not generate code. It's enough to update the scope with data.\n return var_count\n\ndef compileClassVarDeclaration(declaration, scope, mode='function'):\n expect_label(declaration, 'classVarDec')\n\n var_count = 0\n data = list(declaration.childNodes)\n\n declaration_kind = data.pop(0).firstChild.nodeValue.strip()\n var_type = data.pop(0).firstChild.nodeValue.strip()\n\n while (data):\n var_name = data.pop(0).firstChild.nodeValue.strip()\n scope.define(var_name, var_type, declaration_kind)\n var_count += 1\n data.pop(0) # discard delimiter\n\n # Variable declarations do not generate code. It's enough to update the scope with data.\n return var_count\n\ndef compileStatements(xml_data, scope, mode='function'):\n expect_label(xml_data, 'statements')\n data = list(xml_data.childNodes)\n buf = ''\n \n for statement in data:\n statement_type = statement.tagName\n\n if (statement_type == 'letStatement'):\n buf += compileLetStatement(statement, scope, mode)\n elif (statement_type == 'ifStatement'):\n buf += compileIfStatement(statement, scope, mode)\n elif (statement_type == 'whileStatement'):\n buf += compileWhileStatement(statement, scope, mode)\n elif (statement_type == 'doStatement'):\n buf += compileDoStatement(statement, scope, mode)\n elif (statement_type == 'returnStatement'):\n buf += compileReturnStatement(statement, scope, mode)\n elif (statement_type == 'symbol' and str(statement.firstChild.nodeValue) == '}'):\n return buf\n else:\n raise Exception('Unknown statement type')\n return buf\n\n\ndef compileLetStatement(xml_data, scope, mode):\n expect_label(xml_data, 'letStatement')\n\n data = list(xml_data.childNodes)\n var_name = str(data[1].firstChild.nodeValue).strip()\n assign_to_array = (str(data[2].firstChild.nodeValue).strip() == '[')\n\n var_full_name = str(scope.kindOf(var_name)) + ' ' + str(scope.indexOf(var_name))\n buf = ''\n\n if (assign_to_array):\n rvalue_exp = data[6]\n arr_exp = data[3]\n # seek to position in array\n buf += compileExpression(arr_exp, scope, mode)\n buf += 'push ' + var_full_name + '\\n' + 'add\\n'\n\n # obtain value from rvalue expression\n buf += compileExpression(rvalue_exp, scope, mode)\n\n # assign value into position in array\n buf += 'pop temp 0\\n' + 'pop pointer 1\\n' + 'push temp 0\\n' + 'pop that 0\\n'\n else:\n rvalue_exp = data[3]\n buf += compileExpression(rvalue_exp, scope, mode)\n buf += 'pop ' + var_full_name + '\\n'\n\n buf += get_marker()\n\n # remove nodes for garbage collection\n xml_data.parentNode.removeChild(xml_data)\n xml_data.unlink()\n return buf\n\ndef compileIfStatement(xml_data, scope, mode):\n expect_label(xml_data, 'ifStatement')\n data = list(xml_data.childNodes)\n\n cond_exp = data[2]\n buf = ''\n statement_body = data[5]\n\n if (len(data) == 11): # If statement with Else statement\n else_statement = data[9]\n # Evaluate condition\n buf += compileExpression(cond_exp, scope, mode)\n\n # Jump instruction if condition is true\n set_marker('IF-TRUE')\n true_label = str(get_marker())\n buf += 'if-goto ' + true_label\n\n # Jump instruction if false\n set_marker('IF-FALSE')\n false_label = str(get_marker())\n buf += 'goto ' + false_label\n\n # If block body\n buf += 'label ' + true_label\n\n empty_if_body = (statement_body.firstChild.nodeValue == '\\n')\n if (not empty_if_body):\n buf += compileStatements(statement_body, scope, mode)\n\n # Jump to termination of If block body (skip Else instructions)\n set_marker('IF-EXIT')\n exit_label = str(get_marker())\n buf += 'goto ' + exit_label\n\n # Else block body\n buf += 'label ' + false_label\n empty_else_body = (else_statement.firstChild.nodeValue == '\\n')\n if (not empty_else_body):\n buf += compileStatements(else_statement, scope, mode)\n\n # Termination of If body\n buf += 'label ' + exit_label\n\n elif (len(data) == 7): # If statement without an Else statement\n # Evaluate condition\n buf += compileExpression(cond_exp, scope, mode)\n\n # Jump instruction if condition is true\n set_marker('IF-TRUE')\n true_label = str(get_marker())\n buf += 'if-goto ' + true_label\n\n # Jump instruction if false\n set_marker('IF-FALSE')\n false_label = str(get_marker())\n buf += 'goto ' + false_label\n\n # If block body\n buf += 'label ' + true_label\n empty_if_body = (statement_body.firstChild.nodeValue == '\\n')\n if (not empty_if_body):\n buf += compileStatements(statement_body, scope, mode)\n\n # Termination of If body\n buf += 'label ' + false_label\n\n else:\n print('====bad node count in if statement====')\n raise NotImplementedError\n\n # remove nodes for garbage collection\n xml_data.parentNode.removeChild(xml_data)\n xml_data.unlink()\n return buf\n\ndef compileWhileStatement(xml_data, scope, mode):\n expect_label(xml_data, 'whileStatement')\n\n buf = ''\n data = list(xml_data.childNodes)\n\n cond_exp = data[2]\n statement_body = data[5]\n empty_body = False\n\n if (str(statement_body.firstChild.nodeValue).strip() == '}' or (statement_body.firstChild.nodeValue) == '\\n'):\n empty_body = True\n\n # Set label for condition evaluation\n set_marker('WHILE_COND')\n cond_marker = get_marker()\n buf += 'label ' + cond_marker\n buf += compileExpression(cond_exp, scope, mode)\n buf += 'not\\n'\n\n # Set loop exit point and create jump instruction\n set_marker('WHILE_END')\n end_marker = get_marker()\n buf += 'if-goto ' + end_marker\n\n # Perform iteration\n if (not empty_body):\n buf += compileStatements(statement_body, scope)\n buf += 'goto ' + cond_marker\n\n # Set finish point\n buf += 'label ' + end_marker\n\n # remove nodes for garbage collection\n xml_data.parentNode.removeChild(xml_data)\n xml_data.unlink()\n return buf\n\ndef compileDoStatement(xml_data, scope, mode):\n expect_label(xml_data, 'doStatement')\n data = list(xml_data.childNodes)\n buf = ''\n pushed = False\n params_count = 0\n\n if (data[2].firstChild.nodeValue.strip() == '.'): # do Class.Function( params ) or do Obj.Method( params )\n class_name, called_from_object = objectOrClass(data[1], scope)\n\n if (called_from_object):\n obj_name = str(data[1].firstChild.nodeValue).strip()\n params_count += 1\n buf += 'push ' + str(scope.kindOf(obj_name)) + ' ' + str(scope.indexOf(obj_name)) + '\\n'\n\n function_name = str(class_name) + '.' + str(data[3].firstChild.nodeValue.strip())\n params = data[5]\n function_type = scope.getFunctionData(str(data[3].firstChild.nodeValue.strip()), class_name)\n\n else: # do func ( params )\n function_name = scope.getClassName() + '.' + str(data[1].firstChild.nodeValue).strip()\n params = data[3]\n function_type = scope.getFunctionData(str(data[1].firstChild.nodeValue).strip(), scope.getClassName())\n if (function_type is not None and function_type[1] == 'method'):\n buf += 'push pointer 0\\n'\n params_count += 1\n pushed = True\n\n for arg in params.childNodes:\n if (str(arg.nodeValue) == '\\n'):\n break\n if (str(arg.firstChild.nodeValue).strip() == ','):\n continue\n buf += str(compileExpression(arg, scope, mode))\n params_count += 1\n\n if (not pushed and function_type is not None and function_type[1] == 'method'):\n buf += 'push pointer 0\\n'\n params_count += 1\n\n buf += 'call ' + function_name + ' ' + str(params_count) + '\\n' + 'pop temp 0\\n'\n\n return buf\n\n\ndef compileReturnStatement(xml_data, scope, mode):\n expect_label(xml_data, 'returnStatement')\n data = list(xml_data.childNodes)\n buf = ''\n \n if (data[1].tagName == 'expression'):\n buf += compileExpression(data[1], scope, mode)\n else:\n buf += 'push constant 0\\n'\n\n return buf + 'return\\n'\n\ndef compileExpression(xml_data, scope, mode='function'):\n expect_label(xml_data, 'expression')\n\n terms = list(xml_data.childNodes)\n\n if (len(terms) == 1): # expression is a constant, a nested expression, unary operation or function call\n return str(extractTerm(terms[0], scope, mode))\n\n elif (len(terms) == 3): # expression is (term op term)\n left_term = extractTerm(terms[0], scope, mode)\n operation = handleBinaryOpSymbol(str(terms[1].firstChild.nodeValue).strip(), terms[1], scope)\n right_term = extractTerm(terms[2], scope, mode)\n return str(left_term) + str(right_term) + str(operation)\n\n elif (len(terms) % 2 == 1): # term (op term)* with multiple repititions\n parsing_term = True\n\n term_list = []\n op_list = []\n buf = ''\n\n while (len(terms) > 0):\n if (parsing_term):\n node = terms.pop(0)\n term_list.append(extractTerm(node, scope, mode))\n parsing_term = False\n else:\n node = terms.pop(0)\n op_list.append(handleBinaryOpSymbol(str(node.firstChild.nodeValue).strip(), None, scope))\n parsing_term = True\n\n buf += term_list.pop(0)\n for (term, op) in zip(term_list, op_list):\n buf += term\n buf += op\n\n return buf\n\n\n raise Exception('Cannot recognize expression')\n\n\ndef extractTerm(root, scope, mode='function'):\n expect_label(root, 'term')\n term = root.firstChild\n label = term.nodeName\n\n if (label == 'keyword'): # generate constant value value\n return keywordConstant(str(term.firstChild.nodeValue).strip())\n\n elif (label == 'integerConstant'): # constant number\n return 'push constant ' + str(term.firstChild.nodeValue).strip() + '\\n'\n\n elif (label == 'stringConstant'): # generate string\n return stringConstant(str(term.firstChild.nodeValue)[1:-1])\n\n elif (label == 'identifier'): # load variable from scope\n siblings = list(term.parentNode.childNodes)\n buf = ''\n param_count = 0\n if (len(siblings) > 4):\n if (str(siblings[1].firstChild.nodeValue).strip() == '.'): # Class.Function(expressionList);\n class_name, is_method = objectOrClass(siblings[0], scope)\n func_name = str(siblings[2].firstChild.nodeValue).strip()\n\n if (is_method):\n obj_name = str(siblings[0].firstChild.nodeValue).strip()\n param_count += 1\n buf += 'push ' + str(scope.kindOf(obj_name)) + ' ' + str(scope.indexOf(obj_name)) + '\\n'\n\n for param in siblings[4].childNodes:\n if (param.nodeValue == '\\n'): # empty parameter list\n break\n if (param.nodeName == 'symbol' and str(param.firstChild.nodeValue).strip() == ','):\n continue\n param_count += 1\n buf += str(compileExpression(param, scope, mode))\n\n buf += 'call ' + str(class_name) + '.' + str(func_name) + ' ' + str(param_count) + '\\n'\n return buf\n\n elif (term.nextSibling is not None and term.nextSibling.nodeName == 'symbol'):\n if (str(term.nextSibling.firstChild.nodeValue).strip() == '['): # array access\n arr_name = str(term.firstChild.nodeValue).strip()\n buf += compileExpression(term.nextSibling.nextSibling, scope)\n buf += 'push ' + str(scope.kindOf(arr_name)) + ' ' + str(scope.indexOf(arr_name)) + '\\n'\n buf += 'add\\n'\n buf += 'pop pointer 1\\n' + 'push that 0\\n'\n return buf\n\n elif (str(term.nextSibling.firstChild.nodeValue).strip() == '('): # implicit function invocation\n func_data = scope.getFunctionData(str(term.firstChild.nodeValue).strip(), scope.getClassName())\n func_name = func_data[0]\n func_type = func_data[1]\n\n if (func_type == 'method'):\n buf += 'push pointer 0\\n'\n param_count += 1\n\n params = term.nextSibling.nextSibling\n for arg in params.childNodes:\n if (arg.nodeValue == '\\n'): # empty function call\n break\n if (arg.nodeName == 'symbol'):\n continue\n buf += compileExpression(arg, scope, mode)\n param_count += 1\n\n buf += 'call ' + str(scope.getClassName()) + '.' + str(func_name) + ' ' + str(param_count) + '\\n'\n return buf\n\n elif (str(term.nextSibling.firstChild.nodeValue).strip() == '.'): # explicit function invocation\n nodes = term.parentNode.childNodes\n\n class_name = str(nodes[0].firstChilde.nodeValue).strip()\n func_name = str(nodes[2].firstChilde.nodeValue).strip()\n\n if (scope.indexOf(class_name) is not None):\n param_count += 1\n class_name = scope.kindOf(class_name)\n\n func_data = scope.getFunctionData(class_name, func_name)\n func_type = func_data[1]\n\n if (func_type == 'method'):\n buf += 'push pointer 0\\n'\n param_count += 1\n\n params = term.nextSibling.nextSibling\n for arg in params.childNodes:\n if (arg.nodeName == 'symbol'):\n continue\n buf += compileExpression(arg, scope, mode)\n param_count += 1\n\n buf += 'call ' + str(class_name) + '.' + str(func_name) + ' ' + str(param_count) + '\\n'\n return buf\n\n else: # assuming it's a variable\n var_name = str(term.firstChild.nodeValue).strip()\n return 'push ' + str(scope.kindOf(var_name)) + ' ' + str(scope.indexOf(var_name)) + '\\n'\n\n raise NotImplementedError\n\n elif (label == 'symbol'): # parentheses around expression or unary operation\n if (str(term.firstChild.nodeValue).strip() in ['-','~']):\n return extractTerm(term.nextSibling, scope, mode) + handleUnaryOpSymbol(str(term.firstChild.nodeValue).strip())\n elif (str(term.firstChild.nodeValue).strip() == '('):\n return compileExpression(term.nextSibling, scope, mode)\n else:\n raise NotImplementedError\n\n\n raise Exception('Unknown term')\n\ndef handleBinaryOpSymbol(sym, node, scope):\n if (sym == '('):\n return compileExpression(node.nextSibling, scope)\n elif (sym == '+'):\n return 'add\\n'\n elif (sym == '-'):\n return 'sub\\n'\n elif (sym == '*'):\n return 'call Math.multiply 2\\n'\n elif (sym == '/'):\n return 'call Math.divide 2\\n'\n elif (sym == '&'):\n return 'and\\n'\n elif (sym == '|'):\n return 'or\\n'\n elif (sym == '='):\n return 'eq\\n'\n elif (sym == '<'):\n return 'lt\\n'\n elif (sym == '>'):\n return 'gt\\n'\n\n raise NotImplementedError\n\ndef handleUnaryOpSymbol(sym):\n if (sym == '-'):\n return 'neg\\n'\n elif (sym == '~'):\n return 'not\\n'\n\n raise NotImplementedError\n\ndef keywordConstant(keyword):\n if (keyword in ['null', 'false']):\n return 'push constant 0\\n'\n elif (keyword == 'true'):\n return 'push constant 0\\nnot\\n'\n elif (keyword == 'this'):\n return 'push pointer 0\\n'\n\n raise NotImplementedError\n\ndef stringConstant(string):\n buf = ''\n str_len = len(string)\n buf += 'push constant '+ str(str_len) + '\\ncall String.new 1\\n'\n for c in list(string):\n buf += 'push constant ' + str(ord(c)) + '\\ncall String.appendChar 2\\n'\n\n return buf\n\ndef objectOrClass(obj_node, scope):\n node_name = str(obj_node.firstChild.nodeValue).strip()\n if (scope.indexOf(node_name) is not None):\n return scope.typeOf(node_name), True\n else:\n return node_name, False\n","sub_path":"CodeWriter.py","file_name":"CodeWriter.py","file_ext":"py","file_size_in_byte":22363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"13079027","text":"import sqlite3\nd = (\n ('VIZ', 10000),\n ('Centr', 20000),\n ('Uralmash', 15000),\n ('Elmash', 14000),\n ('Sortirovka', 7000)\n)\n\ncon = sqlite3.connect(\"cat.db\")\ncur = con.cursor()\ncur.execute(\"DROP TABLE IF EXISTS distr\")\ncur.execute(\"CREATE TABLE distr (id INTEGER PRIMARY KEY , name TEXT, population INT)\")\ncur.executemany(\"INSERT INTO distr(name, population) VALUES (?,?)\", d)\n#except sqlite3.DatabaseError:\n# print (\"Ошибка:\")\n#else:\n# print (\"Запрос успешно выполнен\")\ncon.commit()\n\nwith con: \n # cur = con.cursor() \n cur.execute(\"SELECT * FROM distr\")\n rows = cur.fetchall()\n \n for row in rows:\n print (row)\n \ncur.close()\ncon.close()\n","sub_path":"sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"536728598","text":"# Building a calculator\r\n\r\nimport re\r\n\r\nprint(\"Our Magical Calculator\")\r\nprint(\" S H A D I\\n\")\r\nprint(\"Type 'quit' to exit\\n\")\r\n\r\nprevious = 0\r\nrun = True\r\n\r\ndef performMath():\r\n global run\r\n global previous\r\n equation = \"\"\r\n if previous == 0:\r\n equation = input(\"Enter equation: \")\r\n else:\r\n equation = input(str(previous))\r\n if equation == 'quit':\r\n print(\"Goodbye, human.\")\r\n run=False\r\n else:\r\n equation = re.sub('[a-zA-Z,.:()\" \"]','',equation)\r\n if previous == 0:\r\n previous = eval(equation)\r\n else:\r\n previous = eval(str(previous) + equation)\r\n\r\nwhile run:\r\n performMath()\r\n\r\n\r\n# Our Magical Calculator\r\n# S H A D I\r\n# Type 'quit' to exit\r\n\r\n# Enter equation: 5+1\r\n# 6*3\r\n# 18+11\r\n# 29-23\r\n# 6*12\r\n# 72%12\r\n# Enter equation: quit\r\n# Goodbye, human.","sub_path":"Python Projects/Build Calculator/04_Step.py","file_name":"04_Step.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"601546350","text":"import turtle, random, math\n\nscreen = turtle.Screen()\nscreen.bgcolor(\"black\")\nscreen.delay(0)\nscreen.register_shape('paddle',((-30,-10), (-30,10), (30,10), (30,-10)))\n\nsprite = turtle.Turtle()\nsprite.penup()\nsprite.speed(0)\nsprite.color(\"white\")\nsprite.ht()\npaddle1 = sprite.clone()\npaddle1.shape(\"paddle\")\npaddle1.st()\npaddle2 = paddle1.clone()\npaddle1.goto(160, 0)\npaddle2.goto(-160, 0)\n\npuck = sprite.clone()\npuck.shape(\"circle\")\npuck.goto(0,0)\npuck.seth(0)\npuck.st()\nspeed = 5\nedge = 200\ndx = random.uniform(0.5,1)*speed\ndy = ((speed**2)-(dx**2))**0.5\ndef update():\n global dx, dy\n x = puck.xcor()\n y = puck.ycor()\n if(math.fabs(x) > edge):\n dx = -dx\n elif(math.fabs(y) > edge):\n dy = -dy\n\n puck.goto(x+dx,y+dy)\n screen.ontimer(update, 10)\n\nupdate()\ndef check_collided(paddle):\n if(math.fabs(paddle.xcor() - puck.xcor()) < 15 and math.fabs(paddle.ycor() - puck.ycor()) < 35):\n return True\n\n return False\n\ndef update():\n global dx, dy\n x = puck.xcor()\n y = puck.ycor()\n if(math.fabs(x) > edge or check_collided(paddle1) or check_collided(paddle2)):\n dx = -dx\n elif(math.fabs(y) > edge):\n dy = -dy\n\n puck.goto(x+dx,y+dy)\n screen.ontimer(update, 10)\ndef right_up():\n if(edge-paddle1.ycor() > 30):\n paddle1.goto(paddle1.xcor(), paddle1.ycor()+20)\n\ndef right_down():\n if(edge+paddle1.ycor() > 30):\n paddle1.goto(paddle1.xcor(), paddle1.ycor()-20)\n\ndef left_up():\n if(edge-paddle2.ycor() > 30):\n paddle2.goto(paddle2.xcor(), paddle2.ycor()+20)\n\ndef left_down():\n if(edge+paddle2.ycor() > 30):\n paddle2.goto(paddle2.xcor(), paddle2.ycor()-20)\n\nscreen.onkey(right_up, \"Up\")\nscreen.onkey(right_down, \"Down\")\nscreen.onkey(left_up, \"W\")\nscreen.onkey(left_down, \"S\")\nscreen.listen()\n \n","sub_path":"Pong.py","file_name":"Pong.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"255845816","text":"from scipy.io.wavfile import read\nfrom scipy.signal import stft\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\nrcParams.update({'figure.autolayout': True})\n\nrate, audio = read(\"../lyd/piano-C4.wav\")\nrateT, audioT = read(\"../lyd/trumpet-C4.wav\")\n\nfourier = np.fft.fft(audio)\nfourier = np.abs(fourier)\n\nfreq = np.fft.fftfreq(len(fourier), d=1/rate)\n\nplt.figure()\nplt.plot(freq[5:4000], fourier[5:4000])\nplt.xlabel(\"Frequency [Hz]\")\nplt.ylabel(\"Amplitude\")\nplt.savefig(\"C4freq.pdf\")\n\ntime = np.linspace(0, len(audio)/rate, len(audio))\nplt.figure()\nplt.plot(time, audio, 'r')\nplt.xlabel(\"Time [s]\")\nplt.savefig(\"C4audio.pdf\")\n\n# window=['rectangular','hanning','hamming','blackman']\n\ndef stftplot(window,windowlength,overlap,inputsignal,samplerate, plot=True):\n \"\"\"\n GENERAL\n ----------\n This function takes an input signal and plots a spectrogram of windowed signal.\n The function uses the function stft from scipy.signal.\n \n \n Parameters\n ----------\n window : TYPE str\n DESCRIPTION: Specifies the applied window function. \n If window function is not specified, default: Hanning window\n \n windowlength : TYPE int\n DESCRIPTION: Length of each frame\n Defaults to: 256\n \n overlap : TYPE: float, int\n DESCRIPTION: Percentwise overlap from frame to frame\n Default: 0.5\n \n inputsignal : TYPE list, tuple, array\n DESCRIPTION: The signal which is analysed\n \n samplerate : TYPE float, int\n DESCRIPTION: samples per second\n \n plot :TYPE bool\n DESCRIPTION: When True, a spectrogram of winodwed \n inputsignal is plotted. \n Default: True\n\n Returns\n -------\n f : TYPE Array of float 64\n DESCRIPTION: Array of sample frequencies.\n \n t : TYPE Array of float 64\n DESCRIPTION: Array of segment times.\n \n Zxx : TYPE Array of complex64\n DESCRIPTION: STFT of windowed signal \n \"\"\"\n \n f,t,Zxx = stft(inputsignal, fs=samplerate, window=window, nfft=windowlength ,nperseg=windowlength, noverlap=windowlength*overlap) \n \n if plot==True: \n plt.figure()\n plt.pcolormesh(t, f, np.abs(Zxx), vmin=0,)\n plt.colorbar()\n plt.title(f'STFT Magnitude, window: {window:}')\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n plt.savefig(f'spectrogramC4{window}.jpg', dpi=150)\n return f, t, Zxx\n\noverlap= 0.5\nf,t,Zxx=stftplot('hanning',200,overlap,audio,rate)\n#Zxx= np.reshape(Zxx, (len(Zxx[0]),len(Zxx)))\n\n#print((time[-1])/(1/rate*200*(1-overlap))) #Sådan beregnes antal af frames\n","sub_path":"figur/FFTPAFIG.py","file_name":"FFTPAFIG.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"210313477","text":"from __future__ import absolute_import, division, print_function\n\nimport argparse\nimport logging\nimport os\nimport time\nimport traceback\nfrom pathlib import Path\n\nimport absl\nimport lib\nimport numpy as np\nimport tensorflow as tf\nfrom absl import flags\nfrom lib.attack_l0 import ClassConstrainedProximalL0Attack\nfrom lib.attack_l1 import (ClassConstrainedL1Attack,\n ClassConstrainedProximalL1Attack)\nfrom lib.attack_l2 import (ClassConstrainedL2Attack,\n ClassConstrainedProximalL2Attack)\nfrom lib.attack_li import ClassConstrainedProximalLiAttack\nfrom lib.attack_utils import AttackOptimizationLoop\nfrom lib.tf_utils import (MetricsDictionary, l0_metric, l0_pixel_metric,\n l1_metric, l2_metric, li_metric, make_input_pipeline)\nfrom lib.utils import (format_float, import_klass_annotations_as_flags,\n log_metrics, register_experiment_flags, reset_metrics,\n setup_experiment)\nfrom tensorboard.plugins.hparams import api as hp\n\nfrom config import test_thresholds\nfrom data import load_cifar10\nfrom models import MadryCNNTf\nfrom utils import load_madry\n\n# general experiment parameters\nregister_experiment_flags(working_dir=\"../results/cifar10/test_lp\")\nflags.DEFINE_string(\"attack\", None,\n \"choice of the attack ('l0', 'l1', 'l2', 'l2g', 'li')\")\nflags.DEFINE_bool(\"attack_save\", False, \"True if save results of the attack\")\nflags.DEFINE_string(\"load_from\", None, \"path to load checkpoint from\")\n# test parameters\nflags.DEFINE_integer(\"num_batches\", -1, \"number of batches to corrupt\")\nflags.DEFINE_integer(\"batch_size\", 100, \"batch size\")\nflags.DEFINE_integer(\"validation_size\", 10000, \"training size\")\n\n# attack parameters\nimport_klass_annotations_as_flags(AttackOptimizationLoop, \"attack_loop_\")\n\nFLAGS = flags.FLAGS\n\nlp_attacks = {\n \"l0\": (\"l0\", ClassConstrainedProximalL0Attack),\n \"l1\": (\"l1\", ClassConstrainedProximalL1Attack),\n \"l1g\": (\"l1\", ClassConstrainedL1Attack),\n \"l2\": (\"l2\", ClassConstrainedProximalL2Attack),\n \"l2g\": (\"l2\", ClassConstrainedL2Attack),\n \"li\": (\"li\", ClassConstrainedProximalLiAttack),\n}\n\n\ndef import_flags(attack):\n assert attack in lp_attacks\n attack_klass = lp_attacks[attack][1]\n import_klass_annotations_as_flags(attack_klass, \"attack_\")\n\n\ndef main(unused_args):\n assert len(unused_args) == 1, unused_args\n norm, attack_klass = lp_attacks[FLAGS.attack]\n\n assert FLAGS.load_from is not None\n setup_experiment(\n f\"madry_{norm}_test\",\n [\n __file__,\n lib.attack_lp.__file__,\n getattr(lib, f\"attack_{norm}\").__file__,\n lib.attack_utils.__file__,\n lib.utils.__file__,\n ],\n )\n\n # data\n _, _, test_ds = load_cifar10(FLAGS.validation_size,\n data_format=\"NHWC\",\n seed=FLAGS.data_seed)\n test_ds = tf.data.Dataset.from_tensor_slices(test_ds)\n test_ds = make_input_pipeline(test_ds,\n shuffle=False,\n batch_size=FLAGS.batch_size)\n\n # models\n num_classes = 10\n model_type = Path(FLAGS.load_from).stem.split(\"_\")[-1]\n classifier = MadryCNNTf(model_type=model_type)\n\n def test_classifier(x, **kwargs):\n return classifier(x, training=False, **kwargs)\n\n # load classifier\n X_shape = tf.TensorShape([FLAGS.batch_size, 32, 32, 3])\n y_shape = tf.TensorShape([FLAGS.batch_size, num_classes])\n classifier(tf.zeros(X_shape))\n load_madry(FLAGS.load_from,\n classifier.trainable_variables,\n model_type=model_type)\n\n # attacks\n attack_loop_kwargs = {\n kwarg.replace(\"attack_loop_\", \"\"): getattr(FLAGS, kwarg)\n for kwarg in dir(FLAGS) if kwarg.startswith(\"attack_loop_\")\n }\n attack_kwargs = {\n kwarg.replace(\"attack_\", \"\"): getattr(FLAGS, kwarg)\n for kwarg in dir(FLAGS)\n if kwarg.startswith(\"attack_\") and not kwarg.startswith(\"attack_loop_\")\n and kwarg not in [\"attack_save\"]\n }\n alp = attack_klass(lambda x: test_classifier(x)[\"logits\"], **attack_kwargs)\n alp.build([X_shape, y_shape])\n allp = AttackOptimizationLoop(alp, **attack_loop_kwargs)\n\n # test metrics\n test_metrics = MetricsDictionary()\n\n @tf.function\n def test_step(image, label):\n outs = test_classifier(image)\n is_corr = outs[\"pred\"] == label\n\n label_onehot = tf.one_hot(label, num_classes)\n image_adv = allp.run_loop(image, label_onehot)\n\n outs_adv = test_classifier(image_adv)\n is_adv = outs_adv[\"pred\"] != label\n\n # metrics\n nll_loss = tf.keras.metrics.sparse_categorical_crossentropy(\n label, outs[\"logits\"])\n acc_fn = tf.keras.metrics.sparse_categorical_accuracy\n acc = acc_fn(label, outs[\"logits\"])\n acc_adv = acc_fn(label, outs_adv[\"logits\"])\n\n # accumulate metrics\n test_metrics[\"nll_loss\"](nll_loss)\n test_metrics[\"acc\"](acc)\n test_metrics[\"conf\"](outs[\"conf\"])\n test_metrics[f\"acc_{norm}\"](acc_adv)\n test_metrics[f\"conf_{norm}\"](outs_adv[\"conf\"])\n\n # measure norm\n r = image - image_adv\n lp = alp.lp_metric(r)\n l0 = l0_metric(r)\n l0p = l0_pixel_metric(r)\n l1 = l1_metric(r)\n l2 = l2_metric(r)\n li = li_metric(r)\n test_metrics[\"l0\"](l0)\n test_metrics[\"l0p\"](l0p)\n test_metrics[\"l1\"](l1)\n test_metrics[\"l2\"](l2)\n test_metrics[\"li\"](li)\n # exclude incorrectly classified\n test_metrics[\"l0_corr\"](l0[tf.logical_and(is_corr, is_adv)])\n test_metrics[\"l0p_corr\"](l0p[tf.logical_and(is_corr, is_adv)])\n test_metrics[\"l1_corr\"](l1[tf.logical_and(is_corr, is_adv)])\n test_metrics[\"l2_corr\"](l2[tf.logical_and(is_corr, is_adv)])\n test_metrics[\"li_corr\"](li[tf.logical_and(is_corr, is_adv)])\n\n # robust accuracy at threshold\n for threshold in test_thresholds[norm]:\n is_adv_at_th = tf.logical_and(lp <= threshold, is_adv)\n test_metrics[f\"acc_{norm}_%s\" %\n format_float(threshold, 4)](~is_adv_at_th)\n test_metrics[\"success_rate\"](is_adv[is_corr])\n\n return image_adv\n\n # reset metrics\n reset_metrics(test_metrics)\n start_time = time.time()\n try:\n is_completed = False\n X_adv = []\n for batch_index, (image, label) in enumerate(test_ds, 1):\n X_adv_b = test_step(image, label)\n X_adv.append(X_adv_b)\n log_metrics(\n test_metrics,\n \"Batch results [{}, {:.2f}s]:\".format(batch_index,\n time.time() -\n start_time),\n )\n if FLAGS.num_batches != -1 and batch_index >= FLAGS.num_batches:\n is_completed = True\n break\n else:\n is_completed = True\n X_adv = np.concatenate(X_adv, axis=0)\n if is_completed:\n if FLAGS.attack_save:\n np.save(\n Path(FLAGS.working_dir) / \"attack.npy\",\n X_adv.reshape(X_adv.shape[0], -1))\n # hyperparameter tuning\n with tf.summary.create_file_writer(FLAGS.working_dir).as_default():\n # hyperparameters\n hp_param_names = [\n kwarg for kwarg in dir(FLAGS)\n if kwarg.startswith(\"attack_\")\n ]\n hp_metric_names = [f\"final_{norm}\", f\"final_{norm}_corr\"]\n hp_params = [\n hp.HParam(hp_param_name)\n for hp_param_name in hp_param_names\n ]\n hp_metrics = [\n hp.Metric(hp_metric_name)\n for hp_metric_name in hp_metric_names\n ]\n hp.hparams_config(hparams=hp_params, metrics=hp_metrics)\n hp.hparams({\n hp_param_name: getattr(FLAGS, hp_param_name)\n for hp_param_name in hp_param_names\n })\n final_lp = test_metrics[f\"{norm}\"].result()\n tf.summary.scalar(f\"final_{norm}\", final_lp, step=1)\n final_lp_corr = test_metrics[f\"{norm}_corr\"].result()\n tf.summary.scalar(f\"final_{norm}_corr\", final_lp_corr, step=1)\n tf.summary.flush()\n except KeyboardInterrupt as e:\n logging.info(\"Stopping after {}\".format(batch_index))\n except Exception:\n traceback.print_exc()\n finally:\n log_metrics(\n test_metrics,\n \"Test results [{:.2f}s, {}]:\".format(time.time() - start_time,\n batch_index),\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--attack\", default=None, type=str)\n args, _ = parser.parse_known_args()\n import_flags(args.attack)\n absl.app.run(main)\n","sub_path":"cifar10/test_our_attack.py","file_name":"test_our_attack.py","file_ext":"py","file_size_in_byte":9103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"441983601","text":"\"\"\"tests video averaging\"\"\"\nimport argparse\nimport cv2\nimport scipy.misc\nimport pickle\nimport h5py\nimport numpy as np\n\n#construct the argumentpase and parse th arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-v\", \"--video\", required=True,\n help=\"Path to input video file\")\nap.add_argument(\"-o\", \"--output\", required=True,\n help=\"path to output file\")\nap.add_argument(\"-ow\", \"--output_width\", required=False, \n help=\"width of output\")\nap.add_argument(\"-oh\", \"--output_height\",required=False,\n help=\"height of output\")\nargs = vars(ap.parse_args())\n\n#initialize the Red, green and blue averages\n(rAvg, gAvg, bAvg) = (None, None, None)\n#initialize the total number of frames read from the file\ntotal_frames = 0\n\n#load the saved impact frames dict\nwith open('./data/frames/frames_vid1.pkl', 'rb') as f:\n frames = pickle.load(f)\n frames = sorted(list(frames.values()))\n#use a counter to keep track of which impact frame is being processed\nimpact_frame_idx = 0\n\n#open a pointer to the video file\nprint(\"[INFO] opening video file pointer...\")\nstream = cv2.VideoCapture(args[\"video\"])\nnum_frames = stream.get(cv2.CAP_PROP_FRAME_COUNT)\n\n#open a pointer to h5 file for writing\nh5f = h5py.File(args[\"output\"], \"w\")\n\nwidth=227\nheight=227\ntry:\n if args[\"output_width\"] != None:\n width = int(args[\"output_width\"])\n if args[\"output_height\"] != None:\n height = int(args[\"output_height\"])\nexcept:\n print(\"[ERROR]please enter an integer for output_height and output_width\")\ndset = h5f.create_dataset(name=args[\"video\"][:-4]+\"_smaller\", shape=(num_frames/5, height, width,3), dtype=np.uint8)\nprint(\"[INFO] computing frame averages, this may take a while...\")\n#current index being processed\ncurrent_idx = impact_frame_idx\noutput_frame = 0\n#loop over the frames from the video file stream\nwhile True:\n if total_frames == num_frames:\n break\n #grab the frame from the file stream\n (grabbed, frame) = stream.read()\n #if a frame was not grabbed, we have reached the end of the file\n if grabbed is False:\n break\n\n #split the frame into its respective channels\n (B, G, R) = cv2.split(frame.astype(\"float\"))\n\n #if the frame averages are none, initialize them\n if rAvg is None:\n rAvg = R\n gAvg = G\n bAvg = B\n #otherwise compute the weighed average between the history of frames and current frame\n else:\n rAvg = ((total_frames * rAvg) + (1 * R)) / (total_frames + 1.0)\n gAvg = ((total_frames * gAvg) + (1 * G)) / (total_frames + 1.0)\n bAvg = ((total_frames * bAvg) + (1 * B)) / (total_frames + 1.0)\n\n #increment the total number of frames read thus far\n total_frames += 1\n\n #for every 3 frames, get the averaged output for the FCN\n try:\n if total_frames == frames[impact_frame_idx] - 30:\n #increment the impact frame counter, this should only be done the first time\n print(\"start of skiped frames at {}\".format(total_frames))\n current_idx = impact_frame_idx\n impact_frame_idx += 1\n #check if the frame is within +-30 of the previous skipped frame\n elif (frames[current_idx] - 30) < total_frames <= (frames[current_idx] + 30):\n print(\"still skipping {}\".format(total_frames))\n\n elif total_frames % 5 == 0:\n # print(\"averaging 5 frames\")\n #merge the RGB averages together\n avg = cv2.merge([bAvg, gAvg, rAvg]).astype(np.uint8)\n avg = cv2.resize(avg, (width, height))\n\n #write averaged frames to output video\n dset[output_frame,:] = avg\n output_frame += 1\n #clear the averages\n (rAvg, gAvg, bAvg) = (None, None, None)\n except IndexError:\n avg = cv2.merge([bAvg, gAvg, rAvg]).astype(np.uint8)\n # avg = cv2.resize(avg, (width, height))\n avg = cv2.resize(avg, (width, height))\n dset[output_frame] = avg\n break\n#close the file pointer\nstream.release()\nh5f.close()\n","sub_path":"filtered_video_data.py","file_name":"filtered_video_data.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"105723014","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2016-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport os\nimport pathlib\n\nfrom .lib import overlay, repobase, testcase\n\n\nclass FsckTest(testcase.EdenRepoTest):\n def populate_repo(self) -> None:\n self.repo.write_file(\"README.md\", \"tbd\\n\")\n self.repo.write_file(\"proj/src/main.c\", \"int main() { return 0; }\\n\")\n self.repo.write_file(\"proj/src/lib.c\", \"void foo() {}\\n\")\n self.repo.write_file(\"proj/src/include/lib.h\", \"#pragma once\\nvoid foo();\\n\")\n self.repo.write_file(\n \"proj/test/test.sh\", \"#!/bin/bash\\necho test\\n\", mode=0o755\n )\n self.repo.write_file(\"doc/foo.txt\", \"foo\\n\")\n self.repo.write_file(\"doc/bar.txt\", \"bar\\n\")\n self.repo.symlink(\"proj/doc\", \"../doc\")\n self.repo.commit(\"Initial commit.\")\n\n def create_repo(self, name: str) -> repobase.Repository:\n return self.create_hg_repo(\"main\")\n\n def setup_eden_test(self) -> None:\n super().setup_eden_test()\n self.overlay = overlay.OverlayStore(self.eden, self.mount_path)\n\n def test_fsck_no_issues(self) -> None:\n output = self.eden.run_cmd(\"fsck\", self.mount)\n self.assertIn(\"No issues found\", output)\n\n def test_fsck_empty_overlay_file(self) -> None:\n overlay_path = self.overlay.materialize_file(pathlib.Path(\"doc/foo.txt\"))\n self.eden.run_cmd(\"unmount\", self.mount)\n\n # Truncate the file to 0 length\n with overlay_path.open(\"wb\"):\n pass\n\n self.eden.run_cmd(\"mount\", self.mount)\n\n cmd_result = self.eden.run_unchecked(\"fsck\", self.mount)\n self.assertEqual(1, cmd_result.returncode)\n","sub_path":"eden/integration/fsck_test.py","file_name":"fsck_test.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"398390997","text":"# Author: Vedant Singhania\n# Machine Learning | Spring 2020\n# University of Colorado Denver\n# Final Project\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import MinMaxScaler\nimport numpy as np\nimport cv2\nimport os\nimport h5py\n\ntrain_path = \"dataset/images/set1_train\"\n\n# HSV Histogram\ndef HSV_histogram(image, mask=None):\n # smoothing image - very important\n image = cv2.bilateralFilter(image, 7, sigmaSpace=75, sigmaColor=75)\n # image converted from RGB to HSV\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n # image resized\n image = cv2.resize(image, tuple((500, 500)))\n histogram = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])\n cv2.normalize(histogram, histogram)\n return histogram.flatten()\n # histogram gets returned as feature vector\n\n# empty lists to hold feature vectors and labels\nglobal_features = []\nlabels = []\n# Classes\ntrain_labels = ['Green', 'Midripe', 'Overripe', 'Yellowish_Green']\n\n# loop over images\nimages_in_class = 0\nprefix = ''\nfor training_name in train_labels:\n dir = os.path.join(train_path, training_name)\n current_label = training_name\n if(training_name == \"Green\"):\n images_in_class = 100\n prefix = \"g\"\n if (training_name == \"Midripe\"):\n images_in_class = 84\n prefix = \"m\"\n if (training_name == \"Overripe\"):\n images_in_class = 29\n prefix = \"v\"\n if (training_name == \"Yellowish_Green\"):\n images_in_class = 44\n prefix = \"y\"\n # check images in each folder\n for x in range(1, images_in_class+1):\n file = dir + '/' + str(prefix) + str(x).zfill(3) + \".jpg\"\n #print(file)\n #img = cv2.imread(\"dataset/images/set1_train/Green/g001.jpg\")\n # Read in images\n img = cv2.imread(file)\n fv_histogram = HSV_histogram(img)\n\n global_feature = fv_histogram\n labels.append(current_label)\n global_features.append(global_feature)\n\n print(\"****Folder pre-processing complete: \".format(current_label))\n\nprint(\"**** Feature Extraction Complete.****\")\n\n# label class variables\ntargetNames = np.unique(labels)\nle = LabelEncoder()\ntarget = le.fit_transform(labels)\n\n# scale the extracted HSV features\nscaler = MinMaxScaler(feature_range=(0, 1))\nrescaled_features = scaler.fit_transform(global_features)\n#print(rescaled_features)\n\n# hsv features and labels stored in h5 instead of csv files\nhsv_features_h5 = h5py.File('output/data.h5', 'w')\nhsv_features_h5.create_dataset('dataset_1', data=np.array(rescaled_features))\nhsv_labels_h5 = h5py.File('output/labels.h5', 'w')\nhsv_labels_h5.create_dataset('dataset_1', data=np.array(target))\nhsv_features_h5.close()\nhsv_labels_h5.close()\n\nprint(\"***Training Complete***\")\n","sub_path":"feature_extraction.py","file_name":"feature_extraction.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"240096390","text":"\"\"\"\nClasses related to tidal turbine farms in Thetis.\n\"\"\"\nfrom firedrake import *\nfrom .log import *\nfrom .callback import DiagnosticCallback\nfrom .optimisation import DiagnosticOptimisationCallback\nimport numpy\n\n\nclass TurbineFarm(object):\n \"\"\"\n Evaluates power output, costs and profit of tidal turbine farm\n\n Cost is simply the number of turbines evaluated as the integral of the\n turbine density.\n\n Power output is calculated as the amount of energy\n taken out of the flow by the turbine drag term. This is in general\n an over-estimate of the 'usefully extractable' energy. Furthermore no\n density is included, so that assuming a density of 1000 kg/m^3 and\n all further quantities in SI, the power is measured in kW.\n\n Profit is calculated as:\n\n Profit = Power - break_even_wattage * Cost\n\n With the above assumptions, break_even_wattage should be specified in\n kW and can be interpreted as the average power per turbine required\n to 'break even', i.e. Profit=0.\n\n Power output and profit are time-integrated (simple first order) and\n time-averages are available as average_power and average_profit.\n \"\"\"\n def __init__(self, farm_options, subdomain_id, u, v, dt):\n \"\"\"\n :arg farm_options: a :class:`TidalTurbineFarmOptions` object that define the farm and the turbines used\n :arg int subdomain_id: the farm is restricted to this subdomain\n :arg u,v: the depth-averaged velocity field\n :arg float dt: used for time-integration.\"\"\"\n turbine_density = farm_options.turbine_density\n C_T = farm_options.turbine_options.thrust_coefficient\n A_T = pi*(farm_options.turbine_options.diameter/2.)**2\n C_D = C_T*A_T/2.*turbine_density\n self.power_integral = C_D * (u*u + v*v)**1.5 * dx(subdomain_id)\n # cost integral is n/o turbines = \\int turbine_density\n self.cost = assemble(turbine_density * dx(subdomain_id))\n self.break_even_wattage = farm_options.break_even_wattage\n self.dt = dt\n\n # time-integrated quantities:\n self.integrated_power = 0.\n self.average_power = 0.\n self.average_profit = 0.\n self.time_period = 0.\n\n def evaluate_timestep(self):\n \"\"\"Perform time integration and return current power and time-averaged power and profit.\"\"\"\n self.time_period = self.time_period + self.dt\n current_power = assemble(self.power_integral)\n self.integrated_power = self.integrated_power + current_power * self.dt\n self.average_power = self.integrated_power / self.time_period\n self.average_profit = self.average_power - self.break_even_wattage * self.cost\n return current_power, self.average_power, self.average_profit\n\n\nclass TurbineFunctionalCallback(DiagnosticCallback):\n \"\"\"\n :class:`.DiagnosticCallback` that evaluates the performance of each tidal turbine farm.\"\"\"\n\n name = 'turbine' # this name will be used in the hdf5 file\n variable_names = ['current_power', 'average_power', 'average_profit']\n\n def __init__(self, solver_obj, **kwargs):\n \"\"\"\n :arg solver_obj: a :class:`.FlowSolver2d` object containing the tidal_turbine_farms\n :arg kwargs: see :class:`DiagnosticCallback`\"\"\"\n nfarms = len(solver_obj.options.tidal_turbine_farms)\n super().__init__(solver_obj, array_dim=nfarms, **kwargs)\n\n solver_obj.create_equations()\n # TODO: was u, eta = split(solution)\n u, v, eta = solver_obj.fields.solution_2d\n dt = solver_obj.options.timestep\n\n self.farms = [TurbineFarm(farm_options, subdomain_id, u, v, dt) for subdomain_id, farm_options in solver_obj.options.tidal_turbine_farms.items()]\n \"\"\"The sum of the number of turbines in all farms\"\"\"\n self.cost = sum(farm.cost for farm in self.farms)\n if self.append_to_log:\n print_output('Number of turbines = {}'.format(self.cost))\n\n def __call__(self):\n return numpy.transpose([farm.evaluate_timestep() for farm in self.farms])\n\n def message_str(self, current_power, average_power, average_profit):\n return 'Current power, average power and profit for each farm: {}, {}, {}'.format(current_power, average_power, average_profit)\n\n @property\n def average_profit(self):\n \"\"\"The sum of the time-averaged profit output of all farms\"\"\"\n return sum(farm.average_profit for farm in self.farms)\n\n @property\n def average_power(self):\n \"\"\"The sum of the time-averaged power output of all farms\"\"\"\n return sum(farm.average_power for farm in self.farms)\n\n @property\n def integrated_power(self):\n \"\"\"The sum of the time-integrated power output of all farms\"\"\"\n return sum(farm.integrated_power for farm in self.farms)\n\n\nclass TurbineOptimisationCallback(DiagnosticOptimisationCallback):\n \"\"\"\n :class:`DiagnosticOptimisationCallback` that evaluates the performance of each tidal turbine farm during an optimisation.\n\n See the :py:mod:`optimisation` module for more info about the use of OptimisationCallbacks.\"\"\"\n name = 'farm_optimisation'\n variable_names = ['cost', 'average_power', 'average_profit']\n\n def __init__(self, solver_obj, turbine_functional_callback, **kwargs):\n \"\"\"\n :arg solver_obj: a :class:`.FlowSolver2d` object\n :arg turbine_functional_callback: a :class:`.TurbineFunctionalCallback` used in the forward model\n :args kwargs: see :class:`.DiagnosticOptimisationCallback`\"\"\"\n self.tfc = turbine_functional_callback\n super().__init__(solver_obj, **kwargs)\n\n def compute_values(self, *args):\n costs = [farm.cost.block_variable.saved_output for farm in self.tfc.farms]\n powers = [farm.average_power.block_variable.saved_output for farm in self.tfc.farms]\n profits = [farm.average_profit.block_variable.saved_output for farm in self.tfc.farms]\n return costs, powers, profits\n\n def message_str(self, cost, average_power, average_profit):\n return 'Costs, average power and profit for each farm: {}, {}, {}'.format(cost, average_power, average_profit)\n","sub_path":"thetis/turbines.py","file_name":"turbines.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"333593574","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 7 09:53:15 2021\n\n@author: giaco\n\"\"\"\n\nimport tenpy\nimport copy\nimport sys\nimport numpy as np\nimport numpy.linalg as alg\nimport matplotlib.pyplot as plt\nfrom tenpy import models\nfrom tenpy.networks.site import SpinSite\nfrom tenpy.networks.site import FermionSite\nfrom tenpy.networks.site import BosonSite\nfrom tenpy.models.model import CouplingModel\nfrom tenpy.models.model import CouplingMPOModel\nfrom tenpy.models.spins import SpinModel\nfrom tenpy.algorithms import dmrg\nfrom tenpy.networks.mps import MPS\nfrom tenpy.models.lattice import Lattice\nfrom tenpy.tools.params import get_parameter\nimport tenpy.linalg.np_conserved as npc\nfrom tenpy.networks.mpo import MPO, MPOEnvironment\nimport tenpy.linalg.charges as charges\nfrom tenpy.models.lattice import Chain\nfrom scipy.linalg import expm\nfrom tenpy.models.fermions_spinless import FermionModel\nfrom tenpy.algorithms.tebd import Engine\nimport pickle\n\n\n\n\ndef sites(L):\n FSite=FermionSite('N', filling=0.5)\n\n sites=[]\n \n for i in range(L):\n sites.append(FSite)\n return sites\n\ndef product_state(L):\n ps=[]\n for i in range(int(L/2)):\n ps.append('empty')\n ps.append('full')\n return ps\nJ=1\ndt=0.05\nL=8\nV=0\nmu=0\nsteps=40\nsites=sites(L)\nps=product_state(L)\npsi=MPS.from_product_state(sites, ps)\npsi2=MPS.from_product_state(sites, ps)\nmodel_params={'bc_MPS':'finite', 'bc_x':'open', 'explicit_plus_hc':True, 'lattice':'Chain', 'J':J, 'conserve':'N', 'V':V, 'mu':mu, 'L':L}\nFC=tenpy.models.fermions_spinless.FermionChain(model_params)\nprint(FC.calc_H_bond()[0])\nverbose=True\ntrunc_param={'svd_min': 0.00000000000001, 'verbose': verbose, 'keys':'sorted'}\noptions={\n 'compression_method': 'SVD',\n 'trunc_param': trunc_param,\n 'keys':'sorted',\n 'verbose': verbose \n }\ntebd_params = {\n 'order': 2,\n 'delta_tau_list': [0.1, 0.01, 0.001, 1.e-4, 1.e-5],\n 'N_steps': 20,\n 'max_error_E': 1.e-8,\n 'trunc_params': {\n 'chi_max': 120,\n 'svd_min': 1.e-10\n },\n 'verbose': verbose,\n }\n\nID='GS_J_'+str(J)+'V_'+str(V)+'L_'+str(L)\n\n\"\"\"#Generate with IMTE\neng = Engine(psi, FC, tebd_params)\neng.run_GS()\n\"\"\"\ndmrg_params = {\n 'mixer': True, # setting this to True is essential for the 1-site algorithm to work.\n 'max_E_err': 1.e-18,\n 'trunc_params': {\n 'chi_max': 120,\n 'svd_min': 1.e-12\n },\n 'verbose': verbose,\n 'combine': False,\n # specifies single-site\n }\ninfo = dmrg.run(psi, FC, dmrg_params)\n\n\n\n\nplt.plot(psi.expectation_value('N'))\nplt.plot()\n\nwith open(ID+'DMRG.pkl', 'wb') as f:\n pickle.dump(psi, f)\n \n\n\n \n","sub_path":"Coherent_state/Fermion_GS_generator.py","file_name":"Fermion_GS_generator.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"209430534","text":"\"\"\"\n@name: Modules/House/Lighting/lighting.py\n@author: D. Brian Kimmel\n@contact: D.BrianKimmel@gmail.com\n@copyright: (c) 2010-2019 by D. Brian Kimmel\n@note: Created on Apr 2, 2010\n@license: MIT License\n@summary: Handle the home lighting system automation.\n\nPyHouse.House.Lighting.\n Buttons\n Controllers\n Lights\n Outlets\n\"\"\"\n\n__updated__ = '2019-09-05'\n__version_info__ = (19, 9, 1)\n__version__ = '.'.join(map(str, __version_info__))\n\n# Import system type stuff\n\n# Import PyHouse files\nfrom Modules.Core.Utilities import config_tools\nfrom Modules.House.Lighting.buttons import API as buttonsApi\nfrom Modules.House.Lighting.controllers import API as controllersApi, MqttActions as controllerMqtt\nfrom Modules.House.Lighting.lights import API as lightsApi, MqttActions as lightMqtt\nfrom Modules.House.Lighting.outlets import API as outletsApi\n\nfrom Modules.Core.Utilities.debug_tools import PrettyFormatAny\n\nfrom Modules.Core import logging_pyh as Logger\nLOG = Logger.getLogger('PyHouse.Lighting ')\n\nCONFIG_FILE_NAME = 'lighting.yaml'\n\n\nclass LightingInformation:\n \"\"\"\n ==> PyHouse.House.Lighting.xxx as in the def below\n \"\"\"\n\n def __init__(self):\n self.Buttons = None # ==> ButtonInformation()\n self.Controllers = None # ==> ControllerInformation()\n self.Lights = None # ==> LightInformation()\n self.Outlets = None # ==> OutletInformation\n\n\nclass ScheduleLightingInformation:\n \"\"\" This is the lighting specific part.\n \"\"\"\n\n def __init__(self):\n self.Type = 'Light'\n self.Brightness = 0\n self.Name = None # Light name\n self.Rate = 0\n self.Duration = None\n self.Room = None # Room Name\n\n\nclass MqttActions:\n \"\"\"\n \"\"\"\n\n def __init__(self, p_pyhouse_obj):\n self.m_pyhouse_obj = p_pyhouse_obj\n\n def decode(self, p_topic: list, p_message, p_logmsg) -> str:\n \"\"\"\n --> pyhouse//lighting//xxx\n \"\"\"\n p_logmsg += '\\tLighting: {}\\n'.format(self.m_pyhouse_obj.House.Name)\n # LOG.debug('MqttLightingDispatch Topic:{}'.format(p_topic))\n if p_topic[0] == 'button':\n pass\n # p_logmsg += but\n elif p_topic[0] == 'controller':\n p_logmsg += controllerMqtt(self.m_pyhouse_obj).decode(p_topic[1:], p_message)\n elif p_topic[0] == 'light':\n p_logmsg += lightMqtt(self.m_pyhouse_obj).decode(p_topic[1:], p_message)\n else:\n p_logmsg += '\\tUnknown Lighting sub-topic {}'.format(p_message)\n LOG.warn('Unknown Lighting Topic: {}'.format(p_topic[0]))\n return p_logmsg\n\n\nclass Config:\n \"\"\"\n \"\"\"\n\n def _update_lighting_from_yaml(self, _p_pyhouse_obj, p_node_yaml):\n \"\"\"\n \"\"\"\n l_lighting = {}\n try:\n l_yaml = p_node_yaml['Lighting']\n except:\n LOG.error('The \"Lighting\" tag is missing in the \"lighting.yaml\" file!')\n return None\n for l_key, l_val in l_yaml.items():\n LOG.debug('\\n\\tKey: {}\\n\\tValue: {}'.format(l_key, PrettyFormatAny.form(l_val, 'Lighting.Update', 190)))\n return l_lighting # For testing.\n\n def load_yaml_config(self, p_pyhouse_obj):\n \"\"\" Read the lighting.yaml file.\n It contains lighting data for the house.\n \"\"\"\n l_node = config_tools.Yaml(p_pyhouse_obj).read_yaml(CONFIG_FILE_NAME)\n return l_node # for testing purposes\n\n# ----------\n\n def _copy_to_yaml(self, p_pyhouse_obj):\n \"\"\" Create or Update the yaml information.\n The information in the YamlTree is updated to be the same as the running pyhouse_obj info.\n\n The running info is a dict and the yaml is a list!\n\n @return: the updated yaml ready information.\n \"\"\"\n try:\n l_node = p_pyhouse_obj._Config.YamlTree[CONFIG_FILE_NAME]\n l_config = l_node.Yaml['Lighting']\n except:\n l_node = config_tools.Yaml(p_pyhouse_obj).create_yaml_node('Lighting')\n l_config = l_node.Yaml['Lighting']\n LOG.debug(PrettyFormatAny.form(p_pyhouse_obj.House, 'PyHouseObj', 190))\n l_working = p_pyhouse_obj.House.Lighting.Lights\n for l_key in [l_attr for l_attr in dir(l_working) if not l_attr.startswith('_') and not callable(getattr(l_working, l_attr))]:\n l_val = getattr(l_working, l_key)\n setattr(l_config, l_key, l_val)\n p_pyhouse_obj._Config.YamlTree[CONFIG_FILE_NAME].Yaml['Lighting'] = l_config\n l_ret = {'Lighting': l_config}\n return l_ret\n\n def save_yaml_config(self, _p_pyhouse_obj):\n \"\"\"\n \"\"\"\n LOG.info('Saving Config - Version:{}'.format(__version__))\n # config_tools.Yaml(p_pyhouse_obj).write_yaml(l_config, CONFIG_FILE_NAME, addnew=True)\n # return l_config\n\n\nclass API:\n \"\"\" Handles all the components of the lighting sub-system.\n \"\"\"\n\n m_pyhouse_obj = None\n m_buttons = None\n m_controllers = None\n m_lights = None\n m_outlets = None\n\n def __init__(self, p_pyhouse_obj):\n LOG.info(\"Initialing - Version:{}\".format(__version__))\n p_pyhouse_obj.House.Lighting = LightingInformation()\n self.m_pyhouse_obj = p_pyhouse_obj\n #\n self.m_buttons = buttonsApi(p_pyhouse_obj)\n self.m_controllers = controllersApi(p_pyhouse_obj)\n self.m_lights = lightsApi(p_pyhouse_obj)\n self.m_outlets = outletsApi(p_pyhouse_obj)\n LOG.info(\"Initialized - Version:{}\".format(__version__))\n\n def LoadConfig(self):\n \"\"\" Load the Lighting xml info.\n \"\"\"\n LOG.info('Loading all Lighting config files.')\n Config().load_yaml_config(self.m_pyhouse_obj)\n self.m_buttons.LoadConfig()\n self.m_controllers.LoadConfig()\n self.m_lights.LoadConfig()\n self.m_outlets.LoadConfig()\n LOG.info('Loaded Lighting config files.')\n\n def Start(self):\n \"\"\" Allow loading of sub modules and drivers.\n \"\"\"\n LOG.info(\"Started.\")\n\n def SaveConfig(self):\n \"\"\" Save the Lighting section.\n It will contain several sub-sections\n \"\"\"\n LOG.info('SaveConfig')\n Config().save_yaml_config(self.m_pyhouse_obj)\n self.m_buttons.SaveConfig()\n self.m_controllers.SaveConfig()\n self.m_lights.SaveConfig()\n self.m_outlets.SaveConfig()\n LOG.info(\"Saved Lighting Config.\")\n return\n\n def Stop(self):\n \"\"\" Allow cleanup of all drivers.\n \"\"\"\n LOG.info(\"Stopping all lighting families.\")\n # self.m_pyhouse_obj._APIs.House.FamilyAPI.stop_lighting_families(self.m_pyhouse_obj)\n LOG.info(\"Stopped.\")\n\n def AbstractControlLight(self, p_device_obj, p_controller_obj, p_control):\n \"\"\"\n Insteon specific version of control light\n All that Insteon can control is Brightness and Fade Rate.\n\n @param p_controller_obj: optional ==> ControllerInformation\n @param p_device_obj: the device being controlled\n @param p_control: the idealized light control params\n \"\"\"\n if self.m_plm == None:\n LOG.info('No PLM was defined - Quitting.')\n return\n # l_api = FamUtil._get_family_device_api(self.m_pyhouse_obj, p_device_obj)\n self.m_plm.AbstractControlLight(p_device_obj, p_controller_obj, p_control)\n\n# ## END DBK\n","sub_path":"Project/src/Modules/House/Lighting/lighting.py","file_name":"lighting.py","file_ext":"py","file_size_in_byte":7464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219606480","text":"import uuid\n\nimport floto.api\nimport floto.decider\nfrom floto.specs import ActivityTask, DeciderSpec\n\n#################################################\n## Run with: ##\n## shell 1: python examples/decider.py ##\n## shell 2: python examples/activity_worker.py ##\n#################################################\n\n# Register workflow_type if does not yet exist\nworkflow_type = floto.api.WorkflowType(domain='floto_test', name='my_workflow_type', version='v1')\n# swf.register_workflow_type(workflow_type)\n\n# Define the activity tasks and their dependencies\nactivity_task_2 = ActivityTask(name='activity2', version='v1')\nactivity_task_1 = ActivityTask(name='activity1', version='v1', requires=[activity_task_2],\n input={'task_input': '4'})\n# activity_task_1 = ActivityTask(name='activity1', version='v1')\n\nrs = floto.specs.retry_strategy.InstantRetry(retries=5)\nactivity_task_fail = ActivityTask(name='activity_fails_3', version='v1', retry_strategy=rs)\n\n# Create a decider spec\n# activity_tasks = [activity_task_fail]\nactivity_tasks = [activity_task_1, activity_task_2]\ndecider_spec = DeciderSpec(domain='floto_test',\n activity_tasks=activity_tasks,\n task_list='your_decider_task_list',\n activity_task_list='floto_activities')\n\ntask_list = str(uuid.uuid4())\ndecider = floto.decider.Decider(decider_spec=decider_spec)\n\nprint(decider_spec.to_json())\n# Start workflow execution\nswf = floto.api.Swf()\n# swf.start_workflow_execution(domain='floto_test',\n# workflow_type_name=workflow_type.name,\n# workflow_type_version=workflow_type.version,\n# task_list=task_list,\n# input={'foo':'bar'})\n\ndecider.run()\ntry:\n # Decider runs in separate process and terminates when all activity tasks have finished\n decider.run()\nexcept Exception as e:\n print(e)\n swf.terminate_workflow_execution(domain='floto_test', workflow_id='my_workflow_type_v1')\n","sub_path":"examples/decider.py","file_name":"decider.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"212955092","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 11 23:49:01 2019\n\n@author: Abbas\n\"\"\"\n# =============================================================================\n# import\n# =============================================================================\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.gaussian_process import GaussianProcessRegressor as GPR\nfrom sklearn.gaussian_process.kernels import RBF, WhiteKernel as WK,\\\nExpSineSquared as ESS, RationalQuadratic as RQ, Matern as M\n\n# =============================================================================\n# help\n# =============================================================================\ndef help_GP():\n '''\n In the case of Gaussian process regression, \n the system response is assumed to be a realization from a Gaussian process\n In the followiing fihures, Overlaid colored lines represent five realizations of the Gaussian process.\n \n Ref: Probabilistic Machine learning for Civil Engineers, J. Goulet. 2018 \n The code has been written by Abbas SheikhMohammadZadeh and Modified by M.R. Moeini. \n \n Instruction:\n Write your date (observation) as a vector (for x and y).\n \n '''\n\n# =============================================================================\n# Write your data here (observations)\n# =============================================================================\nX_obs = np.array(np.array([90.27, 6.21, 2.23, 35.54, 82.88, 29.54, 38.65, 93.32, 97.29]), ndmin=2).T\nf_obs = np.array([10.03, 6.43, 6.10, 5.89, 7.98, 5.99, 6.10, 10.67, 11.03])\n\n\n# =============================================================================\n# Function to show the summary of the fit\n# =============================================================================\ndef summary(gp):\n optimized = gp.optimizer != None\n if not optimized:\n s1 = \"Fitted Kernel(not optimized)\\n\\n%s\" % gp.kernel_\n else:\n s1 = \"Fitted Kernel(Optimized)\\n\\n%s\" % gp.kernel_\n s2 = \"\\n\\nlog marginal likelihood: %.5f\" % gp.log_marginal_likelihood(gp.kernel_.theta)\n print(s1 + s2 + '\\n')\n \n\n# =============================================================================\n# Gaussian process regression\n# =============================================================================\n# Specify a kernel\nkernel = 1 * RBF(1, (1e-2, 1e2))\ngp = GPR(kernel=kernel, alpha = 0, n_restarts_optimizer=9)\n\n# Fit to data & optimize hyperparameters w.r.t. maximizing marginal likelihood\ngp.fit(X_obs, f_obs)\nsummary(gp)\n\n# Make a prediction on several test points\nX_test = np.array(np.linspace(0, 100, 100), ndmin = 2).T\nf_mean, f_var = gp.predict(X_test, return_std=True)\n\n# Create a figure\nfig_noise_free = plt.figure(figsize = (20,12))\nplt.rcParams.update({'font.size': 20})\n\n# Mark the observations\nplt.plot(X_obs, f_obs, 'ro', label='observations')\n\n# Draw a mean function and 95% confidence interval\nplt.plot(X_test, f_mean, 'b-', label='mean function')\nupper_bound = f_mean + 1.96 * f_var\nlower_bound = f_mean - 1.96 * f_var\nplt.fill_between(X_test.ravel(), lower_bound, upper_bound, color = 'b', alpha = 0.1,\n label='95% confidence interval')\n\n# Draw samples from the posterior and plot\nX_samples = np.array(np.linspace(0, 100, 100), ndmin = 2).T\nseed = np.random.randint(10) # random seed\nplt.plot(X_samples, gp.sample_y(X_samples, n_samples = 5, random_state = seed), ':')\n\n# Plot\nplt.xlabel('$x$', fontsize=20)\nplt.ylabel('$f(x)$', fontsize=20)\nplt.xlim(X_test.min(), X_test.max())\nplt.legend(loc='upper left')\nplt.title('A GP posterior with Noise-free observations')\nplt.show()\n\n\n# =============================================================================\n# Gaussian process regression - Noisy\n# =============================================================================\nkernel = 1 * RBF(1, (1e-2, 1e2)) + WK(3)\ngp = GPR(kernel=kernel, alpha = 0, n_restarts_optimizer=10)\ngp.fit(X_obs, f_obs)\nsummary(gp)\n\n# Make a prediction on test points\nX_test = np.array(np.linspace(0, 100, 100), ndmin = 2).T\nf_mean, f_var = gp.predict(X_test, return_std=True)\n\n# Create a Plot\nfig_noisy = plt.figure(figsize = (20,12))\nplt.rcParams.update({'font.size': 20})\n\n# Mark the observations\nplt.plot(X_obs, f_obs, 'ro', label='observations')\n\n# Draw a mean function and 95% confidence interval\nplt.plot(X_test, f_mean, 'b-', label='mean function')\nupper_bound = f_mean + 1.96 * f_var\nlower_bound = f_mean - 1.96 * f_var\nplt.fill_between(X_test.ravel(), lower_bound, upper_bound, color = 'b', alpha = 0.1,\n label='95% confidence interval')\n\n# Draw samples from the posterior and plot\nX_samples = np.array(np.linspace(0, 100, 100), ndmin = 2).T\nseed = np.random.randint(10) # random seed\nplt.plot(X_samples, gp.sample_y(X_samples, n_samples = 10, random_state = seed), ':')\n\n# Plot\nplt.xlabel('$x$', fontsize=20)\nplt.ylabel('$y$', fontsize=20)\nplt.xlim(X_test.min(), X_test.max())\nplt.legend(loc='upper left')\nplt.title('A GP posterior with Noisy observations(RBF Kernel)')\nplt.show()","sub_path":"GPR_python.py","file_name":"GPR_python.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"267860451","text":"def main():\n originalPic = makePicture( pickAFile() )\n show( originalPic )\n picReduced = reducedPic ( originalPic )\n show( picReduced )\n \ndef reducedPic( originalPic ):\n pic = duplicatePicture( originalPic )\n width = getWidth( pic )\n height = getHeight( pic )\n if width%2 == 0:\n widthR = width/2\n else:\n widthR = ( width/2 )+1\n \n if height%2 == 0:\n heightR = height/2\n else:\n heightR = ( height/2 )+1\n reducedPic = makeEmptyPicture( widthR, heightR )\n \n for x in range( 0, width ):\n for y in range( 0, height ):\n px = getPixel( pic, x, y )\n color = getColor( px )\n \n if x%2 == 0 and y%2 == 0:\n setColor( getPixelAt( reducedPic, x/2, y/2 ), color ) \n return reducedPic","sub_path":"JYTHON/Budhathoki_question_10.py","file_name":"Budhathoki_question_10.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"152805166","text":"import numpy as np\nimport sys\nsys.path.append('../fairness_metrics')\n\nfrom base_metric import BaseMetric\nimport warnings\n\ndef warning_on_one_line(message, category, filename, lineno, file=None, line=None):\n return ' %s:%s: %s:%s' % (filename, lineno, category.__name__, message)\n\nclass FairnessMetrics(BaseMetric):\n\n def __init__(self,positive_label_name=None):\n self.name = 'Fairness Metrics Object'\n self.positive_label_name = positive_label_name\n warnings.formatwarning = warning_on_one_line\n if self.positive_label_name == None:\n warnings.warn('No positive label name specified. Running methog Generalized Entropy Index or Theil Index will not work.')\n\n def statistical_parity_diff(self,predictions, priviliged_group, unpriviliged_group):\n\n '''\n Statistical parity denotes an equal proportion of each \n protected category in question (e.g., race) are \n positively classified in the case of a binary classification (e.g., gender classification).\n\n\n E[d(X)|g(X)] = E[d(X)]\n\n Here we calculate in difference in statistical parity.\n P(Y_hat=1 | group = unpriviliged ) - P(Y_hat = 1 | priviled)\n ===========================================================\n Args: \n prediction: prediction Y_hat of random variable Y\n priviliged__group : indices of individuals in the priviliged groups\n unpriviliged__group : indices of individuals in the unpriviliged groups\n \n\n Returns:\n Statistical parity diffence between priviliged and unpriviliged group \n (works ONLY with binary classification at this point)\n '''\n priviliged_predictions = predictions[priviliged_group]\n unpriviliged_predictions = predictions[unpriviliged_group]\n\n return self.num_group_instances(unpriviliged_predictions,1)/len(unpriviliged_group) - self.num_group_instances(priviliged_predictions,1)/len(priviliged_group)\n\n def disparate_impact(self,predictions,priviliged_group,unpriviliged_group):\n '''\n Disparate Impact is the ratio of predictions for a \"positive\" outcome \n in a binary classification task between members of protected and unprotected groups, respectively.\n\n \\frac{Pr(\\hat{Y} = 1 | D = \\text{unprivileged})}\n {Pr(\\hat{Y} = 1 | D = \\text{privileged})}\n ==================================================================================================\n Args:\n privileged_group_idx (numpy array): Index of subarray corresponding to `privileged_groups`\n unprivileged_group_idx (numpy array): Index of subarray corresponding to `unprivileged_groups`\n predictions (numpy array): predictions Y_hat of random variable Y\n\n Returns:\n Equal opportunity diffence between priviliged and unpriviliged group \n (works ONLY with binary classification at this point)\n\n '''\n\n priviliged_predictions = predictions[priviliged_group]\n unpriviliged_predictions = predictions[unpriviliged_group]\n\n return (self.num_group_instances(priviliged_predictions,1)/len(priviliged_group))/(self.num_group_instances(unpriviliged_predictions,1)/len(unpriviliged_group))\n\n\n\n def avg_odds_diff(self,ground_truth, predictions,unpriviliged_group_idx,priviliged_group_idx, groups = None):\n \"\"\"\n The average odds denote the average of difference in FPR and TPR for unprivileged and privileged groups:\n \n \\tfrac{1}{2}\\left[(FPR_{D = \\text{unprivileged}} - FPR_{D = \\text{privileged}})\n + (TPR_{D = \\text{privileged}} - TPR_{D = \\text{unprivileged}}))\\right]\n ======================================================================\n Args:\n predictions: predictions Y_hat of random variable Y\n priviliged_group : indices of individuals in the priviliged groups\n unpriviliged_group : indices of individuals in the unpriviliged groups\n \n\n Returns:\n Average odds diffence between priviliged and unpriviliged group \n (works ONLY with binary classification at this point)\n\n \"\"\"\n results_priviliged = self.performance_measures(ground_truth,predictions,group_idx= priviliged_group_idx,group_membership=True)\n results_unpriviliged = self.performance_measures(ground_truth,predictions,group_idx= unpriviliged_group_idx,group_membership=True)\n FPR_unpriviliged, TPR_unpriviliged = results_unpriviliged['FPR'],results_unpriviliged['TPR']\n FPR_priviliged, TPR_priviliged = results_priviliged['FPR'],results_priviliged['TPR']\n\n return 0.5*(FPR_unpriviliged - FPR_priviliged) + 0.5*(TPR_unpriviliged-TPR_priviliged)\n\n def equal_opportunity_diff(self,ground_truth, predictions,unpriviliged_group_idx,priviliged_group_idx):\n \"\"\"Return the ratio of true positives to positive examples in the\n dataset, :math:`TPR = TP/P`, optionally conditioned on protected\n attributes.\n\n Adapted from IBM360 Research.\n\n Args:\n privileged_group_idx (numpy array): Index of subarray corresponding to `privileged_groups`\n unprivileged_group_idx (numpy array): Index of subarray corresponding to `unprivileged_groups`\n predictions (numpy array): predictions Y_hat of random variable Y\n\n Returns:\n Equal opportunity diffence between priviliged and unpriviliged group \n (works ONLY with binary classification at this point)\n \"\"\"\n \n return self.performance_measures(ground_truth,predictions,group_idx= unpriviliged_group_idx,group_membership=True)['TPR'] \\\n - self.performance_measures(ground_truth,predictions,group_idx= priviliged_group_idx,group_membership=True)['TPR']\n\n def predictive_equality_diff(self,ground_truth, predictions,unpriviliged_group_idx,priviliged_group_idx):\n \"\"\"\n We define the predictive equality as the situation when accuracy \n of decisions is equal across race groups, as measured by false positive rate (FPR). \n Drawing the analogy of gender classification, across all race groups, \n the ratio of men incorrectly predicted to be a woman is the same.\n\n More formally,\n\n E[d(X)|Y=0, g(X)] = E[d(X), Y=0]\n\n Args:\n privileged_group_idx (numpy array): Index of subarray corresponding to `privileged_groups`\n unprivileged_group_idx (numpy array): Index of subarray corresponding to `unprivileged_groups`\n predictions (numpy array): predictions Y_hat of random variable Y\n\n Returns:\n Predictive Equality diffence between priviliged and unpriviliged group \n (works ONLY with binary classification at this point)\n \"\"\"\n \n return self.performance_measures(ground_truth,predictions,group_idx= unpriviliged_group_idx,group_membership=True)['FPR'] \\\n - self.performance_measures(ground_truth,predictions,group_idx= priviliged_group_idx,group_membership=True)['FPR']\n \n def FNR_diff(self,ground_truth, predictions,unpriviliged_group_idx,priviliged_group_idx):\n '''\n The equality of the false negative rates across groups is an important fairness metric as well.\n \n In practice, this metric is implemented as a difference between the metric value for protected\n\n and uprotected groups.\n\n E[d(X)=0\\mid Y=1, g(X)] = E[d(X)=0, Y=1]\n ===================================================\n\n Args:\n privileged_group_idx (numpy array): Index of subarray corresponding to `privileged_groups`\n unprivileged_group_idx (numpy array): Index of subarray corresponding to `unprivileged_groups`\n predictions (numpy array): predictions Y_hat of random variable Y\n\n Returns:\n False Negative Rate diffence between priviliged and unpriviliged group \n (works ONLY with binary classification at this point)\n\n\n '''\n return self.performance_measures(ground_truth,predictions,group_idx= unpriviliged_group_idx,group_membership=True)['FNR'] \\\n - self.performance_measures(ground_truth,predictions,group_idx= priviliged_group_idx,group_membership=True)['FNR']\n\n def generalized_entropy_index(self,true_labels, predicted_labels, alpha=2):\n r\"\"\"Generalized entropy index is proposed as a unified individual and\n group fairness measure in [3]_. With :math:`b_i = \\hat{y}_i - y_i + 1`:\n\n >>>>>> Method adapted from IBM360 AI research group. <<<<<<<\n\n .. math::\n\n \\mathcal{E}(\\alpha) = \\begin{cases}\n \\frac{1}{n \\alpha (\\alpha-1)}\\sum_{i=1}^n\\left[\\left(\\frac{b_i}{\\mu}\\right)^\\alpha - 1\\right],& \\alpha \\ne 0, 1,\\\\\n \\frac{1}{n}\\sum_{i=1}^n\\frac{b_{i}}{\\mu}\\ln\\frac{b_{i}}{\\mu},& \\alpha=1,\\\\\n -\\frac{1}{n}\\sum_{i=1}^n\\ln\\frac{b_{i}}{\\mu},& \\alpha=0.\n \\end{cases}\n\n Args:\n alpha (int): Parameter that regulates the weight given to distances\n between values at different parts of the distribution.\n \n true_labels (numpy array): ground truth of random variable Y\n predicted_labels (numpy array): predictions Y_hat of random variable Y\n\n Returns:\n General Entropy Index of the CLassifier\n (works ONLY with binary classification at this point)\n\n References:\n .. [3] T. Speicher, H. Heidari, N. Grgic-Hlaca, K. P. Gummadi, A. Singla, A. Weller, and M. B. Zafar,\n \"A Unified Approach to Quantifying Algorithmic Unfairness: Measuring Individual and Group Unfairness via Inequality Indices,\"\n ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2018.\n \"\"\"\n y_pred = predicted_labels\n y_true = true_labels\n y_pred = (y_pred == self.positive_label_name).astype(\n np.float64)\n y_true = (y_true == self.positive_label_name).astype(np.float64)\n b = 1 + y_pred - y_true\n\n if alpha == 1:\n # moving the b inside the log allows for 0 values\n return np.mean(np.log((b / np.mean(b))**b) / np.mean(b))\n elif alpha == 0:\n return -np.mean(np.log(b / np.mean(b)) / np.mean(b))\n else:\n return np.mean((b / np.mean(b))**alpha - 1) / (alpha * (alpha - 1))\n\n def theil_index(self,true_labels,predicted_labels):\n '''\n The Theil index is the generalized entropy index with $\\alpha = 1$. \n See Generalized Entropy index.\n \n Args:\n true_labels (numpy array): ground truth of random variable Y\n predicted_labels (numpy array): predictions Y_hat of random variable Y\n\n Returns:\n Theil Index of the CLassifier\n (works ONLY with binary classification at this point)\n '''\n \n return self.generalized_entropy_index(true_labels = true_labels, predicted_labels = predicted_labels, alpha = 1)\n","sub_path":"FairML/fairness_metrics/fairness_metrics.py","file_name":"fairness_metrics.py","file_ext":"py","file_size_in_byte":11156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"24643332","text":"import random\nimport os\nimport numpy as np\nfrom PIL import Image\n\ntrain_path = './convert/'\ntest_path = './test/'\n\ndata_size = 1000\nc_size = 128\nimgs = list(filter(lambda x: x != 'Thumbs.db', os.listdir(train_path)))\nrandom.shuffle(imgs)\nimgs = imgs[:data_size]\ntest_dir = list(filter(lambda x: x != 'Thumbs.db', os.listdir(test_path)))\n\n\ndef get_test():\n datas = []\n for d in test_dir:\n im = Image.open(test_path + d)\n datas.append(np.array(im))\n\n datas = np.array(datas)\n\n labels = []\n\n for d in test_dir:\n tmp = [0, 0]\n if d.split('.')[0] == 'cat':\n tmp[0] = 1\n else:\n tmp[1] = 1\n\n labels.append(tmp)\n\n labels = np.array(labels)\n\n return (datas, labels)\n\n\ndef get_img(page, size):\n t = []\n\n for d in imgs[page * size:(page + 1) * size]:\n im = Image.open(train_path + d)\n t.append(np.array(im))\n\n t = np.array(t)\n\n return t\n\n\n# cat : 0\n# dog : 1\ndef get_label(page, size):\n t = []\n\n for d in imgs[page * size:(page + 1) * size]:\n tmp = [0, 0]\n if d.split('.')[0] == 'cat':\n tmp[0] = 1\n else:\n tmp[1] = 1\n\n t.append(tmp)\n\n t = np.array(t)\n return t\n\n\ndef get_batch(page, size):\n return (get_img(page, size), get_label(page, size))\n","sub_path":"ch2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"588206285","text":"from django.forms import DateInput\nfrom django import forms\nfrom .models import Event, Approval\n\nclass EventForm(forms.ModelForm):\n class Meta:\n model = Event\n # datetime-local is a HTML5 input type, format to make date time show on fields\n widgets = {\n 'date': DateInput(attrs={'type': 'datetime-local'}, format='%Y-%m-%dT%H:%M'),\n # 'end_time': DateInput(attrs={'type': 'datetime-local'}, format='%Y-%m-%dT%H:%M'),\n }\n fields = '__all__'\n exclude = ['user','calendar','color','approved']\n\n def __init__(self, *args, **kwargs):\n super(EventForm, self).__init__(*args, **kwargs)\n # input_formats parses HTML5 datetime-local input to datetime field\n self.fields['start_time'].input_formats = ('%Y-%m-%dT%H:%M',)\n # self.fields['end_time'].input_formats = ('%Y-%m-%dT%H:%M',)\n\nclass ApprovalForm(forms.ModelForm):\n def __init__(self, *args, **kwargs): \n super(ApprovalForm, self).__init__(*args, **kwargs)\n self.fields['comment'].widget.attrs['rows'] = 2\n class Meta:\n model = Approval\n fields = ['comment', 'month']\n \n\n\n","sub_path":"HR_tasks/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"475164495","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a BtkLpkia spider created on top of the ATSSpider\nscrapy crawl btklpkia -a url=\"http://btk.lpkia.ac.id/job\" -a mining_job_id=999 -a iteration=1 -a extract=1\nsample url:\n http://btk.lpkia.ac.id/job\n\"\"\"\nfrom re import compile\nfrom urlparse import urljoin\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, HtmlFormatter, ConvertDateString, NormalizedJoin\n\n\nclass BtkLpkia(ATSSpider):\n\n name = \"btklpkia\"\n ref_re = compile(\"job/(\\d+)/\")\n\n months_map = {\n \"juni\": \"june\",\n \"januari\": \"january\",\n \"oktober\": \"october\",\n \"juli\": \"july\",\n \"mei\": \"may\",\n \"november\": \"november\",\n \"september\": \"september\",\n \"maret\": \"march\",\n \"februari\": \"february\",\n \"agustus\": \"august\",\n \"april\": \"april\",\n \"desember\": \"december\",\n }\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath(\"//div[@class='BTK_Joblist']\")\n for job in jobs:\n job_link = job.xpath(\".//a[@class='title']/@href\").extract()\n if job_link:\n job_url = urljoin(response.url, job_link[0])\n meta = {\n 'title': job.xpath(\n \".//a[@class='title']/text()\"\n ).extract(),\n 'date': job.xpath(\n \".//span/b[contains(text(),'Posted :')]/following-sibling::text()\"\n ).extract(),\n 'location': job.xpath(\n \".//span[b[contains(text(),'Lokasi :')]]/text()\"\n ).extract(),\n 'jobtype': job.xpath(\n \".//span[b[contains(text(),'Tipe Pekerjaan :')]]/span/text()\"\n ).extract(),\n 'expiration_date': job.xpath(\n \".//span[contains(text(),'Batas Pengumpulan Lamaran :')]/following-sibling::text()\"\n ).extract(),\n }\n yield Request(\n url=job_url, meta=meta, callback=self.parse_job_callback()\n )\n\n next_page = job.xpath(\n \"//a[contains(text(),'Selanjutnya')]/@href\"\n ).extract()\n if next_page:\n yield Request(\n url=urljoin(response.url, next_page[0]),\n callback=self.parse\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta['title'])\n loader.add_value('location', response.meta['location'])\n loader.add_value('jobtype', response.meta['jobtype'])\n loader.add_value(\n 'date', response.meta['date'], self.format_date,\n NormalizedJoin(\" \"), ConvertDateString(\"%d %B %Y\")\n )\n loader.add_value(\n 'expiration_date', response.meta['expiration_date'],\n self.format_date, NormalizedJoin(\" \"),\n ConvertDateString(\"%d %B %Y\")\n )\n loader.add_xpath(\n 'description',\n \"//div[@class='btk-tabContent']/div[@class='row-fluid' and not(.//strong[contains(text(),'Kualifikasi :')])]\",\n HtmlFormatter()\n )\n loader.add_xpath(\n 'qualifications',\n \"//div[@class='btk-tabContent']/div[@class='row-fluid' and .//strong[contains(text(),'Kualifikasi :')]]\",\n HtmlFormatter()\n )\n loader.add_value(\n 'referencenumber', response.url, Prefix(\"%s-\" % self.name),\n re=self.ref_re\n )\n yield loader.load_item()\n\n def format_date(self, date_list=[]):\n if date_list:\n date_split = date_list[0].strip().split(\" \")\n if len(date_split) >= 3:\n month = date_split[1].lower()\n if month in self.months_map:\n date_split[1] = self.months_map[month]\n return date_split\n","sub_path":"brightcorp/brightcorp/spiders/btklpkia.py","file_name":"btklpkia.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"581092269","text":"#\n# netlab read command\n#\n# Read network topology, add default settings, and dump the results\n#\nimport typing\nimport sys\nimport argparse\n\nfrom . import common_parse_args, topology_parse_args\nfrom .. import read_topology,common\nfrom ..augment.main import transform_setup\n\n#\n# CLI parser for 'netlab read' command\n#\ndef read_topology_parse(args: typing.List[str]) -> argparse.Namespace:\n parser = argparse.ArgumentParser(\n parents=[ common_parse_args(),topology_parse_args() ],\n prog=\"netlab read\",\n description='Read network topology, add default settings, and dump the results')\n\n parser.add_argument(dest='topology', action='store', help='Topology file')\n parser.add_argument(\n '-o','--output',\n dest='output',\n type=argparse.FileType('w'),\n default=sys.stdout,\n action='store',\n help='Output file')\n return parser.parse_args(args)\n\ndef run(cli_args: typing.List[str]) -> None:\n args = read_topology_parse(cli_args)\n common.set_logging_flags(args)\n topology = read_topology.load(args.topology,args.defaults,\"package:topology-defaults.yml\")\n read_topology.add_cli_args(topology,args)\n common.exit_on_error()\n\n transform_setup(topology)\n args.output.write(topology.to_yaml())\n","sub_path":"netsim/cli/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"124676035","text":"\"\"\"Performs face alignment and stores face thumbnails in the output directory.\"\"\"\n\n# MIT License\n# \n# Copyright (c) 2016 David Sandberg\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom scipy import misc\nimport sys\nimport os\nimport argparse\nimport random\nimport align_dlib # @UnresolvedImport\nimport dlib\nimport facenet\nimport cv2\n\nimport mxnet as mx\nimport pdb\nfrom lightened_moon import lightened_moon_feature\nimport numpy as np\nimport face_recognition\nimport json\n\n#two tasks to do\n\n#COMPLETE 1) given a video, output JSON of all the features for each face found\n #probably also need to assign each face an ID, and keep recognition throughout the frames\n\n#IN PROGRESS 2) given a video and a JSON containing that information, produce a frame-by-frame visualization of the faces/attributes detected\n\ndef main(args):\n\n #MOON feature extractor; not sure how to make this a modular component\n symbol = lightened_moon_feature(num_classes=40, use_fuse=True)\n\n #the detector passed in from the command line; requires files in facenet/data/\n detector = align_dlib.AlignDlib(os.path.expanduser(args.dlib_face_predictor))\n\n landmarkIndices = align_dlib.AlignDlib.OUTER_EYES_AND_NOSE\n\n if args.landmarkIndices is not None:\n landmarkIndices = args.landmarkIndices\n\n video = cv2.VideoCapture(args.input_video)\n\n devs = mx.cpu()\n\n #begin to iterate over the frames and process them\n ret, frame = video.read()\n\n #a list of dictionaries containing face_output for each frame\n total_output = []\n\n #maps encoding matrix to id number\n known_faces_dict = dict()\n known_faces_encoding = []\n id_count = 0\n\n while ret is True:\n face_boxes = detector.getAllFaceBoundingBoxes(frame)\n\n id_attr, known_faces_dict, known_faces_encoding, id_count = processFrame(args, frame, known_faces_dict, known_faces_encoding, id_count, symbol, detector, landmarkIndices, devs, face_boxes)\n\n total_output.append(id_attr)\n\n ret, frame = video.read()\n\n #==========CONVERT TO JSON FILE===============\n print('done processing; converting to json')\n #print(total_output)\n #ith element represents the ith frame\n frame_num = 0\n json_output = '{\\r\\n\"video\":\\r\\n{\\r\\n\"frames\":\\r\\n[\\r\\n'\n for frame_info in total_output:\n #begin the num-faces entry\n json_output += '{\\r\\n\"num\": '+str(frame_num)+',\\r\\n'\n if len(frame_info.keys()) == 0:\n #if this still isnt valid, try doing \"faces\": 0 and closing the field\n # remove last occurrence of comma\n k = json_output.rfind(',')\n json_output = json_output[:k] + json_output[k+1:]\n json_output += '},\\r\\n' #close the num-faces entry; no faces field\n frame_num += 1\n continue\n json_output += '\"faces\":\\r\\n[\\r\\n'\n # process the face information in frame_info in a loop\n for face in frame_info.keys():\n #get actual content, which is a list\n #content shouldnt ever be empty, because there exists a key\n #TODO may be a bug bc of this assumption\n content = frame_info[face]\n pid = content[0]\n\n #check if content is length > 1\n #there may be an individual with 0 yes-attributes\n if len(content) == 3:\n #attributes will contain the topleft,bottomright coordinates,\n #followed by the attributes themselves\n attributes = content[1:len(content)-1]\n attributes.extend(['Negatives'])\n d = {pid:attributes} #looks like 0:[]\n json_output += json.dumps(d)+',\\r\\n'\n continue\n\n #attributes will contain the topleft, bottomright coordinates\n #followed by the attributes themselves\n attributes = content[1:len(content)-1]\n d = {pid:attributes}\n #now we have the proper split\n json_output += json.dumps(d)+',\\r\\n'\n #outside of loop\n # remove last occurrence of comma\n k = json_output.rfind(',')\n json_output = json_output[:k] + json_output[k+1:]\n json_output += ']\\r\\n' #close the faces array\n json_output += '},\\r\\n' #close the num-faces entry\n frame_num += 1\n # remove last occurrence of comma\n k = json_output.rfind(',')\n json_output = json_output[:k] + json_output[k+1:]\n json_output += '\\r\\n]\\r\\n}\\r\\n}'\n\n d = json.loads(json_output)\n json_output = json.dumps(d, indent=4, separators=(',', ': '))\n\n #write out to file\n print('done converting to json; writing to file')\n f = open('output.json', 'wb')\n f.write(json_output)\n f.close()\n print('done!')\n\ndef processFrame(args, frame, known_faces_dict, known_faces_encoding, id_count, symbol, detector, landmarkIndices, devs, face_boxes):\n\n if len(face_boxes) == 0:\n print('cannot find faces')\n\n #list where first entry is id, 2nd and 3rd entries are bounding box coordiantes, \n #and rest are attributes for that id\n #key is the face number, but that is not too relevant\n id_attr = dict()\n\n face_num = 0\n for box in face_boxes:\n\n #=========CROP FACE==========\n pad = [0.25, 0.25, 0.25, 0.25]\n left = int(max(0, box.left() - box.width()*float(pad[0])))\n top = int(max(0, box.top() - box.height()*float(pad[1])))\n right = int(min(frame.shape[1], box.right() + box.width()*float(pad[2])))\n bottom = int(min(frame.shape[0], box.bottom()+box.height()*float(pad[3])))\n\n cropped_face = frame[top:bottom, left:right]\n\n #align if specified\n if args.align == 1:\n scale = float(args.face_size) / args.image_size\n cropped_face = detector.align(args.image_size, cropped_face, landmarkIndices=landmarkIndices, skipMulti=False, scale=scale)\n\n if cropped_face is None:\n print('failed to align! will not save this face\\'s attributes or ID')\n continue\n\n #array of two points: top left, bottom right\n bounding_box_coordinates = [(top,left), (bottom,right)]\n\n #=========DRAW BOUNDING BOX=========\n '''\n pad = [0.25, 0.25, 0.25, 0.25]\n left = int(max(0, box.left() - box.width()*float(pad[0])))\n top = int(max(0, box.top() - box.height()*float(pad[1])))\n right = int(min(img.shape[1], box.right() + box.width()*float(pad[2])))\n bottom = int(min(img.shape[0], box.bottom()+box.height()*float(pad[3])))\n\n cv2.rectangle(img, (left, top), (right, bottom), (0,255,0),3) \n '''\n #========EXTRACT ATTRIBUTES=======\n # crop face area and resize as feature input\n gray = cv2.cvtColor(cropped_face, cv2.COLOR_BGR2GRAY)\n gray = cv2.resize(gray, (128, 128))/255.0\n temp_img = np.expand_dims(np.expand_dims(gray, axis=0), axis=0)\n # get pred\n _, arg_params, aux_params = mx.model.load_checkpoint(args.model_load_prefix, args.model_load_epoch)\n arg_params['data'] = mx.nd.array(temp_img, devs)\n exector = symbol.bind(devs, arg_params ,args_grad=None, grad_req=\"null\", aux_states=aux_params)\n exector.forward(is_train=False)\n exector.outputs[0].wait_to_read()\n output = exector.outputs[0].asnumpy()\n text = [\"5_o_Clock_Shadow\",\"Arched_Eyebrows\",\"Attractive\",\"Bags_Under_Eyes\",\"Bald\", \"Bangs\",\"Big_Lips\",\"Big_Nose\",\n \"Black_Hair\",\"Blond_Hair\",\"Blurry\",\"Brown_Hair\",\"Bushy_Eyebrows\",\"Chubby\",\"Double_Chin\",\"Eyeglasses\",\"Goatee\",\n \"Gray_Hair\", \"Heavy_Makeup\",\"High_Cheekbones\",\"Male\",\"Mouth_Slightly_Open\",\"Mustache\",\"Narrow_Eyes\",\"No_Beard\",\n \"Oval_Face\",\"Pale_Skin\",\"Pointy_Nose\",\"Receding_Hairline\",\"Rosy_Cheeks\",\"Sideburns\",\"Smiling\",\"Straight_Hair\",\n \"Wavy_Hair\",\"Wearing_Earrings\",\"Wearing_Hat\",\"Wearing_Lipstick\",\"Wearing_Necklace\",\"Wearing_Necktie\",\"Young\"]\n #pred = np.ones(40)\n\n attr_list = []\n i = 0\n for num in output[0]:\n if num > 0:\n attr_list.append(text[i])\n i+=1\n #=========WRITE ATTRIBUTES W/ YES NEXT TO BOUNDING BOX============\n '''\n yes_attributes = []\n index = 0\n\n for num in output[0]:\n if num > 0:\n yes_attributes.append(text[index])\n index+=1\n\n pad = 20\n for attr in yes_attributes:\n cv2.putText(img, attr, (right, top), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (0,0,255), 2)\n top = top + pad\n '''\n #========WRITE ATTRIBUTES AND ID TO DICT===========\n face_enc = face_recognition.face_encodings(cropped_face)\n\n if face_enc is None:\n #print('didnt catch this face')\n continue\n\n if len(face_enc) == 0:\n #print('didnt catch this face with face_recognition')\n continue\n\n #print('got the encoding; flattening and using first element as hash index')\n face_enc = face_enc[0]\n #print(face_enc.shape)\n face_enc_hashable = face_enc.flatten()[0]\n #print(face_enc_hashable[0])\n\n if len(known_faces_encoding) == 0:\n #print('first known face!')\n known_faces_dict[face_enc_hashable] = id_count\n known_faces_encoding = [face_enc]\n id_l = [id_count]\n id_l.extend(bounding_box_coordinates)\n id_l.extend(attr_list)\n\n #save into the dictionary; id_l format is [id, (topleft), (bottomright), attr...]\n id_attr[face_num] = id_l\n id_count += 1\n #print('added to dict of known faces')\n continue\n\n #print('comparing with list of known faces')\n compare_results = face_recognition.compare_faces(known_faces_encoding, face_enc)\n\n index = 0\n identifier = None\n #print(compare_results)\n #print('done comparisons on known faces; looking for a match')\n while index < len(compare_results):\n result = compare_results[index]\n if result:\n identifier = known_faces_encoding[index]\n break\n index += 1\n\n if identifier is None:\n #print('no match; adding this face to the list with new id')\n #add to dict and known encodings\n known_faces_encoding = np.append(known_faces_encoding, [face_enc], axis=0)\n known_faces_dict[face_enc_hashable] = id_count\n id_l = [id_count]\n id_l.extend(bounding_box_coordinates)\n id_l.extend(attr_list)\n #print('This should have at least one element: ' + str(id_l))\n id_attr[face_num] = id_l\n id_count += 1\n #print(known_faces_dict)\n #cv2.imshow('cropped face', cropped_face)\n #cv2.waitKey(0)\n else:\n #print('we have a match! getting id from match')\n #get the encoding that was True via the index and add to json dict\n similar_encoding = known_faces_encoding[index]\n similar_encoding_hash = similar_encoding.flatten()[0]\n projected_id = known_faces_dict[similar_encoding_hash]\n id_l = [projected_id]\n id_l.extend(bounding_box_coordinates)\n id_l.extend(attr_list)\n #print('This should have at least one element: ' + str(id_l))\n id_attr[face_num] = id_l\n\n face_num += 1\n\n #after running on sample.mp4, should ideally have only 2 entries\n #print(id_attr)\n\n return id_attr, known_faces_dict, known_faces_encoding, id_count\n\n#currently, the following supporting files from mxnet-face and facenet are needed:\n# ../data/shape_predictor_68_face_landmarks.dat from facenet (downloadable from repo)\n# lightened_moon.py from mxnet-face (should be in attribute)\n# lightened_moon folder containing lightened_moon_fuse from model folder in mxnet-face\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('input_video', type=str, help='Target image.')\n #parser.add_argument('cropped_img', type=str, help='Cropped face')\n parser.add_argument('--dlib_face_predictor', type=str,\n help='File containing the dlib face predictor.', default='../data/shape_predictor_68_face_landmarks.dat')\n\n # custom options added by me\n parser.add_argument('--align', type=int,\n help='Indicate whether faces should be aligned for feature extraction, default is 0. 0=No, 1=Yes. If yes, specify --image_size and --face_size if needed.', default=0)\n parser.add_argument('--image_size', type=int,\n help='Image size (height, width) in pixels.', default=110)\n parser.add_argument('--face_size', type=int,\n help='Size of the face thumbnail (height, width) in pixels.', default=96)\n parser.add_argument('--landmarkIndices', type=list,\n help='specify your own landmark indices as a list of three integers', default=align_dlib.AlignDlib.OUTER_EYES_AND_NOSE)\n # parser.add_argument('--use_center_crop', \n # help='Use the center crop of the original image after scaling the image using prealigned_scale.', action='store_true')\n # parser.add_argument('--prealigned_dir', type=str,\n # help='Replace image with a pre-aligned version when face detection fails.', default='')\n # parser.add_argument('--prealigned_scale', type=float,\n # help='The amount of scaling to apply to prealigned images before taking the center crop.', default=0.87)\n\n # parser.add_argument('--size', type=int, default=128,\n # help='the image size of lfw aligned image, only support squre size')\n # #parser.add_argument('--opencv', type=str, default='~/Desktop/mxnet-face/model/opencv/cascade.xml',\n # # help='the opencv model path')\n # parser.add_argument('--pad', type=float, nargs='+',\n # help=\"pad (left,top,right,bottom) for face detection region\")\n parser.add_argument('--model-load-prefix', type=str, default='lightened_moon/lightened_moon_fuse',\n help='the prefix of the model to load')\n parser.add_argument('--model-load-epoch', type=int, default=82,\n help='load the model on an epoch using the model-load-prefix')\n return parser.parse_args(argv)\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n","sub_path":"facenet/src/align/detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":15481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"566499670","text":"from pages.home import *\nfrom pages.search_page import *\nfrom pages.weer_rotterdam import *\nfrom selenium import webdriver\nimport pytest\nfrom time import sleep\n\n\n\n@pytest.fixture\ndef browser():\n '''Browser Chrome'''\n driver = webdriver.Chrome()\n yield driver\n driver.quit()\n\n\ncity = 'Rotterdam'\n\n\nclass TestSearch:\n def test_search(self, browser):\n home_page = HomePage(browser)\n search_page = SearchPage(browser)\n rotterdam_page = WeerRotterdamPage(browser)\n home_page.load()\n home_page.search()\n sleep(3)\n search_page.accept_cookie()\n sleep(3)\n search_page.enter_city(city)\n sleep(3)\n search_page.first_city()\n sleep(3)\n city_actuall = rotterdam_page.rotterdam_check()\n\n assert city == city_actuall, 'Error: can not find Rotterdam'\n\n\n\n","sub_path":"tests/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"390135339","text":"#!/usr/bin/env python\n\nfrom DisplacedSUSY.Configuration.systematicsDefinitions import *\n\nexternal_systematics_directory = 'DisplacedSUSY/Configuration/data/'\n\n\n# for the ABCD method and the signal region\nregion_names = {\n 'A' : 'Preselection_100um_SS',\n 'B' : 'Preselection_100um',\n 'C' : 'Preselection_100um_AntiIso_SS',\n 'D' : 'Preselection_100um_AntiIso',\n 'signal' : 'Preselection_100um',\n 'signal_antiIso' : 'Preselection_100um_AntiIso'\n}\n\n# these are the distributinos that will be fit in region A to extract the QCD yield\ndistributions_to_fit = [\n {'name' : 'electronAbsD0BeamspotM', \n #'lowerLimit' : 0.02, #you can define upper or lower limits here.\n #'upperLimit' : 0.05\n },\n {'name' : 'muonAbsD0BeamspotM' }, \n {'name' : 'electronEta' },\n {'name' : 'muonEta' },\n {'name' : 'electronMetMt' },\n {'name' : 'muonMetMt' }\n]\n\n# other contributions in region A that will be held constant in the fitting\nfitting_backgrounds = [\n# 'EWK_WNjets',\n# 'Top'\n\n #'Background'\n 'WNjets',\n 'Diboson',\n 'SingleTop',\n 'TTbar',\n 'DY',\n]\n\n# other contributions in regions C & D that should be subtracted off\nimpurities = [\n# 'EWK_WNjets',\n# 'Top'\n #'Background'\n 'WNjets',\n 'Diboson',\n 'SingleTop',\n 'TTbar',\n 'DY',\n]\n\n# the data dataset to be used when constructing the data-driven QCD sample\ndata_dataset = 'MuEG_22Jan2013'\n\n\n\n","sub_path":"BackgroundStudies/test/qcdABCDConfig.py","file_name":"qcdABCDConfig.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"389708125","text":"import asyncio\n\n\ndef load_tracked():\n\timport json\n\twith open('tracked', 'r') as f:\n\t\ttry:\n\t\t\treturn json.load(f)\n\t\texcept ValueError:\n\t\t\treturn [[\"::\", 46546, 0, 0]]\n\n\n@asyncio.coroutine\ndef client(host, port, *args):\n\treader, writer = yield from asyncio.open_connection(host, port)\n\n\twriter.write(\"On port {}\\n\".format(port).encode('ascii'))\n\twriter.write(b'1\\n')\n\twriter.write(b'2\\n')\n\twriter.write(b'3\\n')\n\n\twhile True:\n\t\tline = yield from reader.readline()\n\t\tprint(\"Received:\", line)\n\t\tif line == b'3\\n' or not line:\n\t\t\tbreak\n\twriter.close()\n\n\ndef spawner():\n\twhile not load_tracked():\n\t\tyield from asyncio.sleep(5.0)\n\n\tcoros = []\n\tfor server in load_tracked():\n\t\tcoros.append(asyncio.Task(client(*server)))\n\n\tyield from asyncio.gather(*coros)\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(spawner())\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"21774150","text":"\"\"\"\nch3 신경망\n\n# perceptron\n- 두개의 입력값 (x1, x2)\n- 출력값 y\n a = x1 * w1 + x2 * w2 + b 계산 \n y = 1, a > 임계값\n y = 0, a < 임계값\n- 활성화 함수(activation function)\n신경망의 뉴런(neuron)에서는 입력 신호의 가중치 합을 출력값으로 변환해주는 함수가 존재\n(ex_ a > 임계값)\n\n활성화 함수의 종류\n1. 계단함수\n2. sigmoid 함수 - 미분 가능하다는 장점 있음\n3. ReLU 함수 - 학습이 빨��지고, 연산비용 작음. 구현 매우 간단함\n4. tanh 함수 - 쌍곡선 함수 중 하나, 시그모이드 함수를 transformation 해서 얻을 수 있다\n\"\"\"\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\n# 1. 계단함수\ndef step_function(x):\n \"\"\"\n numpy 는 element 별로 연산을 한다.\n 리스트의 원소별로 >0을 계산한다.\n :param\n :return\n \"\"\"\n y = x > 0\n return y.astype(np.int)\n # np.array 를 사용하는 함수에서는 for 문 사용하지 말자~ 이미 그 안에 for 문 성격이 들어있다.\n # np.array 배열에 부등호 연산을 수행하면 배열의 원소 각각에 부등호 연산을 수행한 bool 배열이 생성됨\n # type 변환 - astype 로 type 을 변환함\n\n\ndef step_function(x):\n result = [1 if x_i > 0 else 0\n for x_i in x]\n return result\n\n\ndef step_function(x):\n result = []\n for x_i in x:\n if x_i > 0:\n result.append(1)\n else:\n result.append(0)\n return np.array(result)\n\n\n# 2. sigmoid 함수\ndef sigmoid(x):\n \"\"\"sigmoid = 1 / (1 + exp(-x))\"\"\"\n \"\"\"지수함수의 패키지별 비교\"\"\"\n # return 1 / (1 + math.exp(-x))\n return 1 / (1 + np.exp(-x))\n # np.array 의 특성\n # x에 여러가지 타입이 올 수 있음(Number, ndarray, Iterable(리스트나 튜플))\n # np.array 를 사용하는 함수에서는 for 문 사용하지 말자~ 이미 그 안에 for 문 성격이 들어있다.\n\n\n# 3. relu 함수 (렐루)\ndef relu(x):\n \"\"\"ReLU (Rectified Linear Unit)\n y = x, if x > 0\n y = 0, otherwise\n # 중요 ! x값을 그대로 리턴하는 선형의 성질을 지닌다\n \"\"\"\n # 정류시키다 -> 전기회로에서 - 흐름을 차단한다. (x<0이하를 차단하여 0을 출력)\n return np.maximum(0, x)\n # 0, x를 비교하여 max를 찾는다.\n\n\ndef relu(x):\n result = []\n for x_i in x:\n if x_i > 0:\n result.append(x_i)\n else:\n result.append(0)\n return np.array(result)\n\n\ndef relu(x):\n return [x_i if x_i > 0 else 0 for x_i in x]\n\n\ndef tanh(x):\n y = 2 * x\n return 2 * sigmoid(y) - 1\n\n\nif __name__ == '__main__':\n x = np.arange(-3, 4) # like python range(start, end) 를 array 로 만들어준다.\n print('x =', x)\n # for x_i in x:\n # print(step_function(x_i), end=' ')\n print('y = ', step_function(x)) # array 자체를 넣고 리턴값도 array 로 나오게 만들자\n\n # 2.\n print('sigmoid = ', sigmoid(x)) # 자체가 for 문!\n # sigmoid 를 여러개 조합하여 계산하기 때문에 ch01_review.ndarray 가 더 편하다\n\n # for x_i in x:\n # print(sigmoid(x_i), end=' ') # math.exp 는 list 를 파라미터로 넣으면 안되요~\n\n # 2. graph\n # step 함수, sigmoid 함수를 하나의 그래프에 그리기\n x = np.arange(-10, 10, 0.01) # [10, 9.99, ..]\n y1 = step_function(x)\n y2 = sigmoid(x)\n y3 = tanh(x)\n plt.plot(x, y1, label='Step Function')\n plt.plot(x, y2, label='Sigmoid Function')\n plt.plot(x, y3, label='Hyperbolic Tangent Function')\n plt.legend()\n plt.show()\n\n # 3. ReLU\n x = np.arange(-3, 4)\n relus = relu(x) # array 자체가 파라미터로 들어간다\n print('ReLU = ', relus)\n plt.plot(x, relus)\n plt.title('ReLU')\n plt.show()\n\n # 4. tanh\n x = np.arange(-3, 4)\n tanh = tanh(x)\n print('Hyperbolic Tangent : ', tanh)\n","sub_path":"lab_dl/ch03/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"42124580","text":"import pytest\n\nfrom tools.get_driver import GetDriver\nimport page\nfrom page.page_in import PageIn\nfrom log.get_logger import GetLogger\nfrom tools.read_yaml import read_yaml\n\nlog=GetLogger.get_logger()\n\n\nclass TestMpArticle:\n # 1. 初始化\n def setup(self):\n # 1.获取driver\n driver = GetDriver.get_web_driver('chrome', page.mp_url)\n # 2.获取统一入口类PageIn对象 由于需要获取两个page页面对象,所以将ageiIn单独实例化出来\n self.page_in = PageIn(driver)\n # 3.获取PageMpLogin对象并调用成功登录依赖方法\n self.page_in.page_get_PageMpLogin().page_mp_login_success()\n # 4.获取PageMpArticle页面对象\n self.article = self.page_in.page_get_PageMpArticle()\n\n # 2. 结束(注销)\n def teardown(self):\n # 关闭driver\n GetDriver.quit_web_driver()\n\n # 3. 测试发布文章方法\n @pytest.mark.parametrize(\"data\",read_yaml(\"mp_article.yaml\"))\n def test_mp_article(self,data):\n title=data['title']\n content=data['content']\n expect=data['expect']\n # 调用发布文章的方法\n self.article.page_mp_article(title, content)\n # 断言\n try:\n assert expect in self.article.page_get_info()\n except Exception as e:\n log.error(\"断言错误,错误信息{}\".format(e))\n self.article.base_get_img()\n raise\n","sub_path":"scripts/test02_mp_article.py","file_name":"test02_mp_article.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"3109960","text":"import json\nimport os\nimport sys\nimport traceback\nfrom os import path\n\nimport construct\n\nfrom mgz.parsed_game import ParsedGame\n\n\ndef parse(file_name: str):\n try:\n with open(file_name, 'rb') as handle:\n game = ParsedGame(handle)\n return game.parse_full()\n except Exception:\n print(\"Error parsing\")\n traceback.print_exc(file=sys.stderr)\n return None\n\n\ndef get_base_name(file_name: str) -> str:\n return '.'.join(file_name.split('.')[:-1])\n\n\ndef find_files(path_name: str):\n yield from (path.join(directory, file_name) for directory, _, file_names in os.walk(path_name) for file_name in\n file_names)\n\n\ndef find_mgz_files(path_name: str):\n yield from (file_name for file_name in find_files(path_name) if file_name.endswith('.mgz'))\n\n\ndef parse_in_directory(path_name: str):\n construct.setglobalstringencoding('latin1')\n files = find_mgz_files(path_name)\n for file in files:\n base = get_base_name(file)\n json_file = '{0}.full.json'.format(base)\n if not path.exists(json_file):\n print(\"parsing \" + file)\n parsed = parse(file)\n if parsed:\n try:\n json_string = json.dumps(parsed)\n with open(json_file, \"w\") as out:\n out.write(json_string)\n except Exception:\n print(\"Error writing\")\n traceback.print_exc(file=sys.stderr)\n print(\"Finished\")\n\n\nif __name__ == \"__main__\":\n p = sys.argv[1]\n parse_in_directory(p)\n","sub_path":"rec.py","file_name":"rec.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140383768","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2016-2018 by Brendt Wohlberg \n# Cristina Garcia-Cardona \n# All rights reserved. BSD 3-clause License.\n# This file is part of the SPORCO package. Details of the copyright\n# and user license can be found in the 'LICENSE.txt' file distributed\n# with the package.\n\n\"\"\"Classes for FISTA algorithm for the Convolutional BPDN problem\"\"\"\n\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport copy\nimport numpy as np\n\nimport benchmarks.other.sporco.cnvrep as cr\nimport benchmarks.other.sporco.linalg as sl\nfrom benchmarks.other.sporco.util import u\n\nfrom benchmarks.other.sporco.fista import fista\n\n__author__ = \"\"\"Cristina Garcia-Cardona \"\"\"\n\n\n\nclass ConvBPDN(fista.FISTADFT):\n r\"\"\"\n Base class for FISTA algorithm for the Convolutional BPDN (CBPDN)\n :cite:`garcia-2018-convolutional1` problem.\n\n |\n\n .. inheritance-diagram:: ConvBPDN\n :parts: 2\n\n |\n\n The generic problem form is\n\n .. math::\n \\mathrm{argmin}_\\mathbf{x} \\;\n f( \\{ \\mathbf{x}_m \\} ) + \\lambda g( \\{ \\mathbf{x}_m \\} )\n\n where :math:`f = (1/2) \\left\\| \\sum_m \\mathbf{d}_m * \\mathbf{x}_m -\n \\mathbf{s} \\right\\|_2^2`, and :math:`g(\\cdot)` is a penalty\n term or the indicator function of a constraint; with input\n image :math:`\\mathbf{s}`, dictionary filters :math:`\\mathbf{d}_m`,\n and coefficient maps :math:`\\mathbf{x}_m`. It is solved via the\n FISTA formulation\n\n Proximal step\n\n .. math::\n \\mathbf{x}_k = \\mathrm{prox}_{t_k}(g) (\\mathbf{y}_k - 1/L \\nabla\n f(\\mathbf{y}_k) ) \\;\\;.\n\n Combination step\n\n .. math::\n \\mathbf{y}_{k+1} = \\mathbf{x}_k + \\left( \\frac{t_k - 1}{t_{k+1}}\n \\right) (\\mathbf{x}_k - \\mathbf{x}_{k-1}) \\;\\;,\n\n with :math:`t_{k+1} = \\frac{1 + \\sqrt{1 + 4 t_k^2}}{2}`.\n\n\n After termination of the :meth:`solve` method, attribute\n :attr:`itstat` is a list of tuples representing statistics of each\n iteration. The fields of the named tuple ``IterationStats`` are:\n\n ``Iter`` : Iteration number\n\n ``ObjFun`` : Objective function value\n\n ``DFid`` : Value of data fidelity term :math:`(1/2) \\| \\sum_m\n \\mathbf{d}_m * \\mathbf{x}_m - \\mathbf{s} \\|_2^2`\n\n ``RegL1`` : Value of regularisation term :math:`\\sum_m \\|\n \\mathbf{x}_m \\|_1`\n\n ``Rsdl`` : Residual\n\n ``L`` : Inverse of gradient step parameter\n\n ``Time`` : Cumulative run time\n \"\"\"\n\n\n class Options(fista.FISTADFT.Options):\n r\"\"\"ConvBPDN algorithm options\n\n Options include all of those defined in\n :class:`.fista.FISTADFT.Options`, together with\n additional options:\n\n ``NonNegCoef`` : Flag indicating whether to force solution to\n be non-negative.\n\n ``NoBndryCross`` : Flag indicating whether all solution\n coefficients corresponding to filters crossing the image\n boundary should be forced to zero.\n\n ``L1Weight`` : An array of weights for the :math:`\\ell_1`\n norm. The array shape must be such that the array is\n compatible for multiplication with the X/Y variables. If this\n option is defined, the regularization term is :math:`\\lambda\n \\sum_m \\| \\mathbf{w}_m \\odot \\mathbf{x}_m \\|_1` where\n :math:`\\mathbf{w}_m` denotes slices of the weighting array on\n the filter index axis.\n\n \"\"\"\n\n defaults = copy.deepcopy(fista.FISTADFT.Options.defaults)\n defaults.update({'NonNegCoef': False, 'NoBndryCross': False})\n defaults.update({'L1Weight': 1.0})\n defaults.update({'L': 500.0})\n\n\n def __init__(self, opt=None):\n \"\"\"\n Parameters\n ----------\n opt : dict or None, optional (default None)\n ConvBPDN algorithm options\n \"\"\"\n\n if opt is None:\n opt = {}\n fista.FISTADFT.Options.__init__(self, opt)\n\n\n\n def __setitem__(self, key, value):\n \"\"\"Set options.\"\"\"\n\n fista.FISTADFT.Options.__setitem__(self, key, value)\n\n\n itstat_fields_objfn = ('ObjFun', 'DFid', 'RegL1')\n hdrtxt_objfn = ('Fnc', 'DFid', u('Regℓ1'))\n hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid', u('Regℓ1'): 'RegL1'}\n\n\n\n def __init__(self, D, S, lmbda=None, opt=None, dimK=None, dimN=2):\n \"\"\"\n This class supports an arbitrary number of spatial dimensions,\n `dimN`, with a default of 2. The input dictionary `D` is either\n `dimN` + 1 dimensional, in which case each spatial component\n (image in the default case) is assumed to consist of a single\n channel, or `dimN` + 2 dimensional, in which case the final\n dimension is assumed to contain the channels (e.g. colour\n channels in the case of images). The input signal set `S` is\n either `dimN` dimensional (no channels, only one signal),\n `dimN` + 1 dimensional (either multiple channels or multiple\n signals), or `dimN` + 2 dimensional (multiple channels and\n multiple signals). Determination of problem dimensions is\n handled by :class:`.cnvrep.CSC_ConvRepIndexing`.\n\n\n |\n\n **Call graph**\n\n .. image:: ../_static/jonga/fista_cbpdn_init.svg\n :width: 20%\n :target: ../_static/jonga/fista_cbpdn_init.svg\n\n |\n\n\n Parameters\n ----------\n D : array_like\n Dictionary array\n S : array_like\n Signal array\n lmbda : float\n Regularisation parameter\n opt : :class:`ConvBPDN.Options` object\n Algorithm options\n dimK : 0, 1, or None, optional (default None)\n Number of dimensions in input signal corresponding to multiple\n independent signals\n dimN : int, optional (default 2)\n Number of spatial/temporal dimensions\n \"\"\"\n\n # Set default options if none specified\n if opt is None:\n opt = ConvBPDN.Options()\n\n # Infer problem dimensions and set relevant attributes of self\n if not hasattr(self, 'cri'):\n self.cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)\n\n # Set dtype attribute based on S.dtype and opt['DataType']\n self.set_dtype(opt, S.dtype)\n\n # Set default lambda value if not specified\n if lmbda is None:\n cri = cr.CSC_ConvRepIndexing(D, S, dimK=dimK, dimN=dimN)\n Df = sl.rfftn(D.reshape(cri.shpD), cri.Nv, axes=cri.axisN)\n Sf = sl.rfftn(S.reshape(cri.shpS), axes=cri.axisN)\n b = np.conj(Df) * Sf\n lmbda = 0.1 * abs(b).max()\n\n # Set l1 term scaling and weight array\n self.lmbda = self.dtype.type(lmbda)\n self.wl1 = np.asarray(opt['L1Weight'], dtype=self.dtype)\n\n # Call parent class __init__\n self.Xf = None\n xshape = self.cri.shpX\n super(ConvBPDN, self).__init__(xshape, S.dtype, opt)\n\n # Reshape D and S to standard layout\n self.D = np.asarray(D.reshape(self.cri.shpD), dtype=self.dtype)\n self.S = np.asarray(S.reshape(self.cri.shpS), dtype=self.dtype)\n\n # Compute signal in DFT domain\n self.Sf = sl.rfftn(self.S, None, self.cri.axisN)\n\n # Create byte aligned arrays for FFT calls\n self.Y = self.X.copy()\n self.X = sl.pyfftw_empty_aligned(self.Y.shape, dtype=self.dtype)\n self.X[:] = self.Y\n\n # Initialise auxiliary variable Vf: Create byte aligned arrays\n # for FFT calls\n self.Vf = sl.pyfftw_rfftn_empty_aligned(self.X.shape, self.cri.axisN,\n self.dtype)\n\n\n self.Xf = sl.rfftn(self.X, None, self.cri.axisN)\n self.Yf = self.Xf.copy()\n self.store_prev()\n self.Yfprv = self.Yf.copy() + 1e5\n\n self.setdict()\n\n # Initialization needed for back tracking (if selected)\n self.postinitialization_backtracking_DFT()\n\n\n\n def setdict(self, D=None):\n \"\"\"Set dictionary array.\"\"\"\n\n if D is not None:\n self.D = np.asarray(D, dtype=self.dtype)\n self.Df = sl.rfftn(self.D, self.cri.Nv, self.cri.axisN)\n\n\n\n def getcoef(self):\n \"\"\"Get final coefficient array.\"\"\"\n\n return self.X\n\n\n\n def eval_grad(self):\n \"\"\"Compute gradient in Fourier domain.\"\"\"\n\n # Compute D X - S\n Ryf = self.eval_Rf(self.Yf)\n # Compute D^H Ryf\n gradf = np.conj(self.Df) * Ryf\n\n # Multiple channel signal, multiple channel dictionary\n if self.cri.Cd > 1:\n gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)\n\n return gradf\n\n\n\n def eval_Rf(self, Vf):\n \"\"\"Evaluate smooth term in Vf.\"\"\"\n\n return sl.inner(self.Df, Vf, axis=self.cri.axisM) - self.Sf\n\n\n\n def eval_proxop(self, V):\n \"\"\"Compute proximal operator of :math:`g`.\"\"\"\n\n return sl.shrink1(V, (self.lmbda / self.L) * self.wl1)\n\n\n\n def rsdl(self):\n \"\"\"Compute fixed point residual in Fourier domain.\"\"\"\n\n diff = self.Xf - self.Yfprv\n return sl.rfl2norm2(diff, self.X.shape, axis=self.cri.axisN)\n\n\n\n def eval_objfn(self):\n \"\"\"Compute components of objective function as well as total\n contribution to objective function.\n \"\"\"\n\n dfd = self.obfn_dfd()\n reg = self.obfn_reg()\n obj = dfd + reg[0]\n return (obj, dfd) + reg[1:]\n\n\n\n def obfn_dfd(self):\n r\"\"\"Compute data fidelity term :math:`(1/2) \\| \\sum_m\n \\mathbf{d}_m * \\mathbf{x}_m - \\mathbf{s} \\|_2^2`.\n This function takes into account the unnormalised DFT scaling,\n i.e. given that the variables are the DFT of multi-dimensional\n arrays computed via :func:`rfftn`, this returns the data fidelity\n term in the original (spatial) domain.\n \"\"\"\n\n Ef = self.eval_Rf(self.Xf)\n return sl.rfl2norm2(Ef, self.S.shape, axis=self.cri.axisN) / 2.0\n\n\n\n def obfn_reg(self):\n \"\"\"Compute regularisation term and contribution to objective\n function.\n \"\"\"\n\n rl1 = np.linalg.norm((self.wl1 * self.X).ravel(), 1)\n return (self.lmbda * rl1, rl1)\n\n\n\n def obfn_f(self, Xf=None):\n r\"\"\"Compute data fidelity term :math:`(1/2) \\| \\sum_m\n \\mathbf{d}_m * \\mathbf{x}_m - \\mathbf{s} \\|_2^2`\n This is used for backtracking. Since the backtracking is\n computed in the DFT, it is important to preserve the\n DFT scaling.\n \"\"\"\n\n if Xf is None:\n Xf = self.Xf\n\n Rf = self.eval_Rf(Xf)\n return 0.5 * np.linalg.norm(Rf.flatten(), 2)**2\n\n\n\n def reconstruct(self, X=None):\n \"\"\"Reconstruct representation.\"\"\"\n\n if X is None:\n X = self.X\n Xf = sl.rfftn(X, None, self.cri.axisN)\n Sf = np.sum(self.Df * Xf, axis=self.cri.axisM)\n return sl.irfftn(Sf, self.cri.Nv, self.cri.axisN)\n\n\n\n\n\nclass ConvBPDNMask(ConvBPDN):\n r\"\"\"\n FISTA algorithm for Convolutional BPDN with a spatial mask.\n\n |\n\n .. inheritance-diagram:: ConvBPDNMask\n :parts: 2\n\n |\n\n Solve the optimisation problem\n\n .. math::\n \\mathrm{argmin}_\\mathbf{x} \\;\n (1/2) \\left\\| W \\left(\\sum_m \\mathbf{d}_m * \\mathbf{x}_m -\n \\mathbf{s}\\right) \\right\\|_2^2 + \\lambda \\sum_m\n \\| \\mathbf{x}_m \\|_1 \\;\\;,\n\n where :math:`W` is a mask array.\n\n See :class:`ConvBPDN` for interface details.\n \"\"\"\n\n\n def __init__(self, D, S, lmbda, W=None, opt=None, dimK=None, dimN=2):\n \"\"\"\n\n |\n\n\n Parameters\n ----------\n D : array_like\n Dictionary matrix\n S : array_like\n Signal vector or matrix\n lmbda : float\n Regularisation parameter\n W : array_like\n Mask array. The array shape must be such that the array is\n compatible for multiplication with input array S (see\n :func:`.cnvrep.mskWshape` for more details).\n opt : :class:`ConvBPDNMask.Options` object\n Algorithm options\n dimK : 0, 1, or None, optional (default None)\n Number of dimensions in input signal corresponding to multiple\n independent signals\n dimN : int, optional (default 2)\n Number of spatial dimensions\n \"\"\"\n\n super(ConvBPDNMask, self).__init__(D, S, lmbda, opt, dimK=dimK,\n dimN=dimN)\n\n if W is None:\n W = np.array([1.0], dtype=self.dtype)\n self.W = np.asarray(W.reshape(cr.mskWshape(W, self.cri)),\n dtype=self.dtype)\n\n # Create byte aligned arrays for FFT calls\n self.WRy = sl.pyfftw_empty_aligned(self.S.shape, dtype=self.dtype)\n self.Ryf = sl.pyfftw_rfftn_empty_aligned(self.S.shape, self.cri.axisN,\n self.dtype)\n\n\n\n def eval_grad(self):\n \"\"\"Compute gradient in Fourier domain.\"\"\"\n\n # Compute D X - S\n self.Ryf[:] = self.eval_Rf(self.Yf)\n\n # Map to spatial domain to multiply by mask\n Ry = sl.irfftn(self.Ryf, self.cri.Nv, self.cri.axisN)\n # Multiply by mask\n self.WRy[:] = (self.W**2) * Ry\n # Map back to frequency domain\n WRyf = sl.rfftn(self.WRy, self.cri.Nv, self.cri.axisN)\n\n gradf = np.conj(self.Df) * WRyf\n\n # Multiple channel signal, multiple channel dictionary\n if self.cri.Cd > 1:\n gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True)\n\n return gradf\n\n\n\n def obfn_dfd(self):\n r\"\"\"Compute data fidelity term :math:`(1/2) \\| W (\\sum_m\n \\mathbf{d}_m * \\mathbf{x}_{m} - \\mathbf{s}) \\|_2^2`\n \"\"\"\n\n Ef = self.eval_Rf(self.Xf)\n E = sl.irfftn(Ef, self.cri.Nv, self.cri.axisN)\n\n return (np.linalg.norm(self.W * E)**2) / 2.0\n\n\n\n def obfn_f(self, Xf=None):\n r\"\"\"Compute data fidelity term :math:`(1/2) \\| W (\\sum_m\n \\mathbf{d}_m * \\mathbf{x}_{m} - \\mathbf{s}) \\|_2^2`.\n This is used for backtracking. Since the backtracking is\n computed in the DFT, it is important to preserve the\n DFT scaling.\n \"\"\"\n\n if Xf is None:\n Xf = self.Xf\n\n Rf = self.eval_Rf(Xf)\n R = sl.irfftn(Rf, self.cri.Nv, self.cri.axisN)\n WRf = sl.rfftn(self.W * R, self.cri.Nv, self.cri.axisN)\n\n return 0.5 * np.linalg.norm(WRf.flatten(), 2)**2\n","sub_path":"benchmarks/other/sporco/fista/cbpdn.py","file_name":"cbpdn.py","file_ext":"py","file_size_in_byte":14485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"582658236","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api\n\nfrom flask_jwt import JWT, jwt_required\n\nfrom security import authenticate, identity #importing methods from security.py\n\napp = Flask(__name__)\napp.secret_key = 'mysecretkey' #add seccurity passphrase\napi = Api(app)\n\njwt = JWT(app,authenticate,identity) #introduces a new endpoint as /auth\n\nstores = [] #empty store initialized in memory\nclass Store(Resource):\n\n\t@jwt_required()\n\tdef get(self,name):\n\t\tfor store in stores:\n\t\t\tif store['name']== name:\n\t\t\t\treturn{\"store\":store}, 200\n\t\t\telse:\n\t\t\t\treturn{\"message\": \"{} not found\".format(name)},404\n\t\treturn {\"message\":\"This is simple get request\"}\n\n\tdef post(self,name):\n\t\tdata = request.get_json()\n\t\tfor store in stores:\n\t\t\tif store['name'] == name:\n\t\t\t\treturn{\"message\":\"{} already exists\".format(name)}\n\t\tstore = {'name': name, 'address': data['address'], 'employees': data[\"employees\"]}\n\t\tstores.append(store)\n\t\treturn{\"message\":\"succesfully created store {}\".format(name)}, 201\n\n\tdef put(self,name):\n\t\tdata = request.get_json()\n\t\tfor store in stores:\n\t\t\tif store['name'] == name:\n\t\t\t\tstore.update(data)\n\t\t\t\treturn{\"message\":\"{} already exists so updating..\".format(name)}\n\t\tstore = {'name': name, 'address': data['address'], 'employees': data[\"employees\"]}\n\t\tstores.append(store)\n\t\treturn{\"message\":\"succesfully created store {}\".format(name)}, 201\n\n\tdef delete(self,name):\n\t\tfor store in stores:\n\t\t\tif store['name']!= name:\n\t\t\t\treturn{\"message\":\"{} not found\"}, 404\n\n\nclass StoreList(Resource):\n\tdef get(self):\n\t\treturn {'stores' : stores}\n\n\napi.add_resource(Store,\"/store//\")\napi.add_resource(StoreList,\"/stores\")\n\napp.run(port=5000)","sub_path":"flaskrestfulapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"505236158","text":"import numpy as np\nfrom scipy.linalg import expm\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nfrom scipy.stats import bernoulli\nimport random\n\ntrans_mat = np.zeros((4, 10))\ntrans_mat[0, 1] = trans_mat[1, 3] = trans_mat[2, 4] = trans_mat[3, 8] = 1\n\n\ndef int2bin(n, count=24):\n return \"\".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])\n\n\ndef loc(x):\n \"\"\"\n turn a string of 0/1 into its decimal number\n :param x: string of 0/1\n :return: Decimal number\n \"\"\"\n return int(x, 2)\n\n\ndef dagger(x):\n return x.T.conj()\n\n\ndef state_init(N, site):\n \"\"\"\n :param N: length of state\n :param site: bit string of the site\n :type site: str\n :return: state\n \"\"\"\n init_state = np.zeros(2**N)\n init_state[loc(site)] = 1\n return init_state\n\n\ndef W_state(N, log = False):\n\n coe = 1/pow(N, 0.5)\n state = np.zeros(2**N, dtype = 'complex')\n for i in range(N):\n state[2**i] = coe\n if log:\n f = open('w_psi'+str(N)+'.txt', mode='w')\n for i in range(2**N):\n f.write(str(state[i].real)+' '+str(state[i].imag)+'\\n')\n f.close()\n return state\n\n\ndef state_save(state, path=None):\n N = int(np.log2(len(state)))\n path = path if path else str(N)+' qubits_state'+'.txt'\n f = open(path, mode='w')\n for i in range(2 ** N):\n f.write(str(state[i].real) + ' ' + str(state[i].imag) + '\\n')\n f.close()\n print(path)\n\n\ndef amp_save(state, path=None):\n N = int(np.log2(len(state)))\n path = path if path else str(N)+' qubits_state_amp'+'.txt'\n f = open(path, mode='w')\n for i in range(2 ** N):\n f.write(str(np.abs(state[i])) + ' ' + str(0.0000) + '\\n')\n f.close()\n print(path)\n\n\ndef sparse_check(x):\n tmp = x.flatten(order='C')\n nonzero = 0\n for i in range(len(tmp)):\n if tmp[i] != 0:\n nonzero += 1\n return nonzero, nonzero/len(tmp)\n\n\ndef unitary_check(x):\n threshold = 1e-10\n distance = np.linalg.norm(np.dot(dagger(x),x)-np.eye(len(x)))\n if distance> (N-i) & 1\n\n\ndef fastmul(m,n, gate, state):\n N = int(np.log2(len(state)))\n index = [2*site(i,N,m)+site(i,N,n) for i in range(2**N)]\n gate = gate.T\n tmat = gate[:, index]\n v = np.arange(2**N).reshape(1,2**N).repeat(4,0)\n for i in range(4):\n p = site(i, 2, 1)\n q = site(i, 2, 2)\n v[i, :] = set_bit_val(v[i, :], m, N, p)\n v[i, :] = set_bit_val(v[i, :], n, N, q)\n v = state[v]\n tmat *= v\n res = tmat.sum(0)\n return res\n\n\n\n\n\ndef swap(U, J, t, Delta=0):\n H = np.array([[U, -np.sqrt(2)*J, 0, 0, 0, 0, 0, 0, 0,0],\n [-np.sqrt(2)*J, Delta, -np.sqrt(2)*J, 0, 0, 0, 0, 0, 0,0],\n [0, -np.sqrt(2)*J, U+2*Delta, 0, 0, 0, 0, 0, 0,0],\n [0, 0, 0, Delta, 0, -J, -J, 0, 0, 0],\n [0, 0, 0, 0, Delta, -J, -J, 0, 0, 0],\n [0, 0, 0, -J, -J, U, 0, 0, 0, 0],\n [0, 0, 0, -J, -J, 0, U+2*Delta, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, U, -np.sqrt(2)*J, 0],\n [0, 0, 0, 0, 0, 0, 0, -np.sqrt(2)*J, Delta, -np.sqrt(2)*J],\n [0, 0, 0, 0, 0, 0, 0, 0, -np.sqrt(2)*J, U+2*Delta]])\n Evolution = expm(H * 2*np.pi*t*-1j)\n swap = np.dot(trans_mat, Evolution)\n swap = np.dot(swap, trans_mat.T)\n swap /= 1j\n return swap\n\n\ndef sto(t, Delta):\n \"\"\"\n only for 01/10 base\n :param t:\n :param Delta:\n :return:\n \"\"\"\n phase = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 2*Delta, 0],\n [0, 0, 0, 0]])\n Evolution = expm(phase * 2*np.pi*t*-1j)\n return Evolution\n\n\ndef noise(x):\n mu, sigma = x, x/25.76\n np.random.seed()\n return np.random.normal(mu, sigma, 1)[0]\n\n\ndef swap_noise(U, J, t, Delta = 0):\n # U, J, t, Delta = noise(U), noise(J), noise(t), Delta = noise(Delta)\n return swap(noise(U), noise(J), noise(t), noise(Delta))\n\n\ndef sto_noise(t, Delta):\n return sto(noise(t), noise(Delta))\n\n\ndef NumberOf1(n):\n count = 0\n while n&0xffffffff != 0:\n count += 1\n n = n & (n-1)\n return count\n\n\ndef phase_shift(n):\n phase = 0\n for _ in range(n):\n phase += noise(2*np.pi)\n return phase\n\n\ndef dephase(n):\n numbers = []\n for i in range(2 ** n):\n numbers.append(NumberOf1(i))\n numbers = np.array(numbers)\n dephase_mat = np.diag([np.exp(phase_shift(i)*-1j) for i in numbers])\n return dephase_mat\n\n\ndef density_mat(x):\n n = len(x)\n sam_shape = len(x.shape)\n if sam_shape == 1:\n dim = len(x)\n state = x.reshape(1, dim)\n state /= np.linalg.norm(state)\n den_mat = np.dot(dagger(state), state)\n else:\n dim = len(x[0])\n den_mat = np.zeros((dim, dim))\n for i in range(n):\n state = x[i].reshape(1, dim)\n state /= np.linalg.norm(state)\n if not i:\n den_mat = np.dot(dagger(state), state)\n else:\n den_mat += np.dot(dagger(state), state)\n den_mat /= n\n return den_mat\n\n\ndef fidelity_vec(x, y):\n return (np.dot(x.conj(), y)*np.dot(y.conj(), x)/np.linalg.norm(x)**2/np.linalg.norm(y)**2).real\n\n\ndef fidelity_essemble(x,y):\n n = len(y)\n fidelity = 0\n for i in range(n):\n fidelity += fidelity_vec(x, y[i])\n return fidelity/n\n\n\ndef purity(x):\n return (np.trace(np.dot(x, x))).real\n\n\ndef distribution(x):\n x /= np.linalg.norm(x, 2)\n prob = np.zeros(x.size)\n for i in range(len(x)):\n prob[i] = np.abs(x[i])**2\n return prob\n\n\ndef dis2state(x):\n state = np.array([np.sqrt(x[i]) for i in range(len(x))])\n return state\n\n\ndef sample(x, n=1):\n N = int(np.log2(len(x)))\n res = stats.rv_discrete(values=(range(len(distribution(x))), distribution(x))).rvs(size=n)\n if n == 1:\n return res[0]\n else:\n dis = sample_distribution(res, N)\n kl = KL(distribution(x), dis)\n print(kl)\n return res, dis\n\n\ndef sample_distribution(sample, N):\n n = len(sample)\n sample_dis = np.zeros(2**N)\n for i in sample:\n sample_dis[i] += 1\n sample_dis /= n\n return sample_dis\n\n\ndef essemble_distribution(x):\n n = len(x)\n dis = np.array([x[i][i] for i in range(n)])\n return dis.real\n\n\ndef KL(p, q):\n divergence = 0\n for i in range(len(p)):\n if p[i] and q[i]:\n divergence += p[i]*np.log(p[i]/q[i])\n return divergence\n\n\ndef KL_new(P,Q):\n N = len(P)\n epsilon = 0.01/N\n P = P + epsilon\n Q = Q + epsilon\n divergence = np.sum(P*np.log(P/Q))\n return divergence\n\n\ndef sample_plot(dis, N, M, KL=None):\n x = [int2bin(i,N) for i in range(len(dis))]\n plt.bar(x, dis)\n plt.ylim(0,1)\n for x, y in enumerate(dis):\n plt.text(x, y+0.02, '%s' %y, ha='center')\n if not KL:\n plt.title('{} qubits with {} measurements'.format(N, M))\n else:\n plt.title('{} qubits with {} measurements\\n KL = {}'.format(N, M, KL))\n plt.ylabel('Probility')\n plt.show()\n\n\ndef trans_base(bases, x):\n Z2Z = np.eye(2)\n Z2X = 1 / np.sqrt(2) * np.array([[1, 1], [1, -1]])\n Z2Y = 1 / np.sqrt(2) * np.array([[1, -1j], [1, 1j]])\n decode = {'Z': Z2Z, 'X': Z2X, 'Y': Z2Y, 'z': Z2Z, 'x': Z2X,'y': Z2Y}\n tmp_mat = decode[bases[0]]\n for i in range(1,len(bases)):\n tmp_mat = np.kron(tmp_mat, decode[bases[i]])\n return np.dot(tmp_mat, x)\n\n\ndef sample_bases(bases, state, M):\n N = int(np.log2(len(state)))\n f1 = open(str(N)+' qubits_measurement.txt', mode='w')\n f2 = open(str(N)+' qubits_measurement_bases.txt', mode='w')\n f3 = open(str(N)+' qubits_measurement_bases_set.txt', mode='w')\n for i in bases:\n measure = sample(trans_base(i, state), M)\n for j in measure:\n tmp = int2bin(j, N)\n for k in tmp:\n f1.write(k+' ')\n f1.write('\\n')\n for k in i:\n f2.write(k+' ')\n f2.write('\\n')\n for j in i:\n f3.write(j+' ')\n f3.write('\\n')\n f1.close()\n f2.close()\n f3.close()\n\n\ndef Z_sample(state, M, error=0):\n N = int(np.log2(len(state)))\n f1 = open(str(N)+' qubits_measurement_z.txt', mode='w')\n measure = sample(state, M)\n dis = sample_distribution(measure, N)\n kl = KL(dis, distribution(state))\n print(kl)\n if error:\n measure = sample_error(measure, N, error)\n dis = sample_distribution(measure, N)\n kl = KL(dis, distribution(state))\n print(kl)\n for j in measure:\n tmp = int2bin(j, N)\n for k in tmp:\n f1.write(k+' ')\n f1.write('\\n')\n f1.close()\n\n\ndef sample_error(samples, n, error):\n size = len(samples)\n flip = bernoulli.rvs(n * error, size=size)\n ker = [2 ** i for i in range(n)]\n count = 0\n for i in range(size):\n if flip[i]:\n count += 1\n flip[i] = random.choice(ker)\n print(count, count / size)\n sample_new = np.array([samples[i] ^ flip[i] for i in range(size)])\n return sample_new\n\n\ndef sample_save(samples, N, path=None):\n path = path if path else str(N)+' qubits_measurement_z.txt'\n f1 = open(path, mode='w')\n for j in samples:\n tmp = int2bin(j, N)\n for k in tmp:\n f1.write(k+' ')\n f1.write('\\n')\n f1.close()\n print(path)","sub_path":"qusource/qusource.py","file_name":"qusource.py","file_ext":"py","file_size_in_byte":9833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"478733762","text":"import argparse\nimport json\nfrom slurmpy import Slurm\nimport os\nimport glob\nimport sys\n\n#read the samplesheet\ndef read_samplesheet(samplesheet,project_dir):\n\tsamples={}\n\tfirst=True\n\tsample_col=0\n\tfor line in open(samplesheet):\n\t\tif \" \" in line:\n\t\t\tcontent=line.strip().split()\n\t\telif \",\" in line:\n\t\t\tcontent=line.strip().split(\",\")\n\t\telse:\n\t\t\tcontent=[line.strip()]\n\n\t\tif first:\n\t\t\tfirst=False\n\t\t\tfor i in range(0,len(content)):\n\t\t\t\tif content[i] == \"SampleID\":\n\t\t\t\t\tsample_col=i\n\t\t\tcontinue\n\n\t\tsample_name=content[ sample_col ]\n\n\n\t\tif sample_name in samples:\n\t\t\tcontinue\n\n\t\tse=True\n\t\tfor file in glob.glob(\"{}/*{}*/*.fastq.gz\".format(project_dir,sample_name)):\n\t\t\tif \"_R2\" in file and \"fastq\" in file:\n\t\t\t\tse=False\n\n\t\tif se:\n\t\t\tfastq=[ \"<( zcat {}/*{}*/*_R1*fastq.gz )\".format(project_dir,sample_name) ]\n\t\telse:\n\t\t\tfastq=[ \"<( zcat {}/*{}*/*_R1*fastq.gz )\".format(project_dir,sample_name), \"<( zcat {}/{}/*_R2*fastq.gz )\".format(project_dir,sample_name) ]\n\n\t\tsamples[sample_name]={\"fastq\":fastq,\"se\":se}\n\n\treturn(samples)\n\ndef check_config(config,args):\n\tif not os.path.isfile(config[\"reference\"]):\n\t\tprint(\"error: cannot find reference genome fasta, check your config file\")\n\t\tprint(config[\"reference\"])\n\t\tquit()\n\n\tif not os.path.isfile(config[\"wisecondorx\"][\"blacklist\"]):\n\t\tprint(\"error: cannot find the blacklist bed file, check your config file\")\n\t\tprint(config[\"wisecondorx\"][\"blacklist\"])\n\t\tquit()\n\n\tif not os.path.isfile(config[\"singularity\"]):\n\t\tprint(\"error: cannot find the singularity collection, check your config file\")\n\t\tprint(config[\"singularity\"])\n\t\tquit()\n\n\tif not os.path.isfile(config[\"wisecondorx\"][\"refpreface\"]) and (not args.mkref and not args.skip_preface):\n\t\tprint(\"error: cannot find the preface wisecondorX reference file, check your config file or use the --skip_preface option\")\n\t\tprint(\"remember to build the wisecondorX reference using the mkref option\")\n\t\tprint(config[\"wisecondorx\"][\"refpreface\"])\n\t\tquit()\n\n\tif not os.path.isfile(config[\"wisecondorx\"][\"reftest\"]) and (not args.mkref):\n\t\tprint(\"error: cannot find the aneuploidy test wisecondorX reference file, check your config file\")\n\t\tprint(\"remember to build the wisecondorX reference using the mkref option\")\n\t\tprint(config[\"wisecondorx\"][\"reftest\"])\n\t\tquit()\n\n#create a command for running bwa and wisecondorX convert (single end)\ndef align_and_convert_single_end(config,fastq,args,sample):\n\n\tout_prefix=\"{}/{}/{}\".format(args.out,sample,sample)\n\taln=\"singularity exec {} bwa aln -n 0 -k 0 {} {} > {}.sai\".format(config[\"singularity\"],config[\"reference\"],fastq,out_prefix)\n\tsamse=\"singularity exec {} bwa samse -n -1 {} {}.sai {} | singularity exec {} bamsormadup inputformat=sam threads=16 SO=coordinate outputformat=bam tmpfile={}/{} indexfilename={}.bam.bai > {}.bam\".format(config[\"singularity\"],config[\"reference\"],out_prefix,fastq,config[\"singularity\"],config[\"align\"][\"tmpdir\"],sample,out_prefix,out_prefix)\n\tconvert=\"singularity exec {} WisecondorX convert {}.bam {}.bam.wcx.npz\".format(config[\"singularity\"],out_prefix,out_prefix)\n\trun_bwa=\"\\n\".join([aln,samse,convert])\n\n\treturn(run_bwa)\n\n#create a command for running bwa and wisecondorX convert (paired end)\ndef align_and_convert_paired_end(config,fastq,args,sample):\n\n\tout_prefix=\"{}/{}/{}\".format(args.out,sample,sample)\n\taln_R1=\"singularity exec {} bwa aln -n 0 -k 0 {} {} > {}_R1.sai\".format(config[\"singularity\"],config[\"reference\"],fastq[0],out_prefix)\n\taln_R2=\"singularity exec {} bwa aln -n 0 -k 0 {} {} > {}_R2.sai\".format(config[\"singularity\"],config[\"reference\"],fastq[1],out_prefix)\n\tsampe=\"singularity exec {} bwa sampe -n -1 {} {}_R1.sai {}_R2.sai {} {} | singularity exec {} bamsormadup inputformat=sam threads=16 SO=coordinate outputformat=bam tmpfile={}/{} indexfilename={}.bam.bai > {}.bam\".format(config[\"singularity\"],config[\"reference\"],out_prefix,out_prefix,fastq[0],fastq[1],config[\"singularity\"],config[\"align\"][\"tmpdir\"],sample,out_prefix,out_prefix)\n\tconvert=\"singularity exec {} WisecondorX convert {}.bam {}.bam.wcx.npz\".format(config[\"singularity\"],out_prefix,out_prefix)\n\trun_bwa=\"\\n\".join([aln_R1,aln_R2,sampe,convert])\n\n\treturn(run_bwa)\n\n#generate wisecondorX reference files\ndef mkref(config,args):\n\twcx_mkref=\"singularity exec {} WisecondorX newref {}/**/*.wcx.npz {}.test.npz --nipt --binsize {}\".format(config[\"singularity\"],args.out,args.out.rstrip(\"/\"),config[\"wisecondorx\"][\"testbinsize\"] )\n\twcx_mkrefpreface=\"singularity exec {} WisecondorX newref {}/**/*.wcx.npz {}.preface.npz --nipt --binsize {}\".format(config[\"singularity\"],args.out,args.out.rstrip(\"/\"),config[\"wisecondorx\"][\"prefacebinsize\"])\n\treturn(\"\\n\".join([wcx_mkref,wcx_mkrefpreface]))\n\n#perform the wisecondorx test\ndef wisecondorx_test(config,args,sample):\n\n\tout_prefix=\"{}/{}/{}\".format(args.out,sample,sample)\n\twcx_test=\"singularity exec {} WisecondorX --loglevel info predict {}.bam.wcx.npz {} {}.WCXpredict --plot --bed --blacklist {} --zscore {}\".format(config[\"singularity\"],out_prefix,config[\"wisecondorx\"][\"reftest\"],out_prefix,config[\"wisecondorx\"][\"blacklist\"],config[\"wisecondorx\"][\"zscore\"])\n\twcx_preface=\"singularity exec {} WisecondorX --loglevel info predict {}.bam.wcx.npz {} {}.WCXpredict.preface --plot --bed --blacklist {}\".format(config[\"singularity\"],out_prefix,config[\"wisecondorx\"][\"refpreface\"],out_prefix,config[\"wisecondorx\"][\"blacklist\"])\n\tgender=\"singularity exec {} WisecondorX gender {}.bam.wcx.npz {} > {}.wcx.npz.gender.txt\".format(config[\"singularity\"],out_prefix,config[\"wisecondorx\"][\"reftest\"],out_prefix)\n\treturn(\"\\n\".join([wcx_test,gender,wcx_preface]))\n\n#fetal fraction estimation using tiddit and AMYCNE\ndef amycne_ffy(config,args,sample):\n\tout_prefix=\"{}/{}/{}\".format(args.out,sample,sample)\n\tpath_gc_tab=\"{}/{}/{}.gc.tab\".format(args.out,sample,sample)\n\ttiddit=\"singularity exec {} python /bin/TIDDIT.py --cov --bam {}.bam -z {} -o {}.tiddit\".format(config[\"singularity\"],out_prefix,config[\"tiddit\"][\"binsize\"],out_prefix)\n\tgc_tab=\"singularity exec FluFFyPipe_0.0.sif python /bin/AMYCNE/Generate_GC_tab.py --fa {} --size {} --n_mask > {}\".format(config[\"reference\"],config[\"tiddit\"][\"binsize\"],path_gc_tab)\n\tamycne=\"singularity exec {} python /bin/AMYCNE/AMYCNE.py --ff --coverage {}.tiddit.tab --gc {} --Q {} > {}.tiddit.AMYCNE.tab\".format(config[\"singularity\"],out_prefix,path_gc_tab,config[\"amycne\"][\"minq\"],out_prefix)\n\treturn(\"\\n\".join([tiddit,gc_tab,amycne]))\n\n#fetal fraction estimation using Preface\ndef preface(config,args,sample):\n\tout_prefix=\"{}/{}/{}\".format(args.out,sample,sample)\n\tpreface=\"singularity exec {} Rscript /bin/PREFACE-0.1.1/PREFACE.R predict --infile {}.WCXpredict.preface_bins.bed --model {}/model.RData > {}_bins.bed.PREFACE.txt\".format(config[\"singularity\"],out_prefix,config[\"preface\"][\"model_dir\"],out_prefix)\n\treturn(preface)\n\n#collect QC stats using picard tools\ndef picard_qc(config,args,sample):\n\tout_prefix=\"{}/{}/{}\".format(args.out,sample,sample)\n\tpicard_gc=\"singularity exec {} picard CollectGcBiasMetrics I={}.bam O={}_gc_bias_metrics.txt CHART={}_gc_bias_metrics.pdf S={}.gc.summary.tab R={} {}\".format(config[\"singularity\"],out_prefix,out_prefix,out_prefix,out_prefix,config[\"reference\"],config[\"picard\"][\"javasettings\"])\n\tpicard_insert=\"singularity exec {} picard CollectInsertSizeMetrics I={}.bam O={}_insert_size_metrics.txt H={}_insert_size_histogram.pdf M=0.5 {}\".format(config[\"singularity\"],out_prefix,out_prefix,out_prefix,config[\"picard\"][\"javasettings\"])\n\tpicard_complexity=\"singularity exec {} picard EstimateLibraryComplexity I={}.bam O={}_complex_metrics.txt {}\".format(config[\"singularity\"],out_prefix,out_prefix,config[\"picard\"][\"javasettings\"])\n\treturn(\"\\n\".join([picard_gc,picard_insert,picard_complexity]))\n\n#construct Preface model\ndef preface_model(config,args):\n\tpreface=\"singularity exec {} Rscript /bin/PREFACE-0.1.1/PREFACE.R train --config {}.PREFACE.config.tab --outdir {} {}\".format(config[\"singularity\"],args.out.rstrip(\"/\"),config[\"preface\"][\"model_dir\"],config[\"preface\"][\"modelsettings\"])\n\treturn(preface)\n\n#generate a csv summary\ndef summarise(config,args):\n\tsummary=\"singularity exec {} python /bin/FluFFyPipe/scripts/generate_csv.py --folder {} --samplesheet {} --Zscore {} --minCNV {} > {}/{}.summary.csv\".format(config[\"singularity\"],args.out,args.sample,config[\"summary\"][\"zscore\"],config[\"summary\"][\"mincnv\"],args.out,args.out.strip(\"/\").split(\"/\")[-1])\n\treturn(summary)\t\n\nparser = argparse.ArgumentParser(\"\"\"fluffypipe.py --sample --project --out --config config.json\"\"\")\nparser.add_argument('--project' ,type=str, help=\"input project folder\", required=True)\nparser.add_argument('--out' ,required=True,type=str, help=\"output folder\")\nparser.add_argument('--config' ,required=True,type=str, help=\"json config file\")\nparser.add_argument('--sample', type=str,required=True, help=\"path to samplesheet\")\nparser.add_argument('--mkref', help=\"generate wisecondorX reference\",action=\"store_true\")\nparser.add_argument('--mkmodel', help=\"generate PREFACE model\",action=\"store_true\")\nparser.add_argument('--version', help=\"print version\",action=\"store_true\")\nparser.add_argument('--skip_preface', help=\"Skip Preface fetal fraction prediction\",action=\"store_true\")\nargs= parser.parse_args()\n\nversion=\"0.0.0\"\nif args.version:\n\tprint (\"FluFFYPipe version {}\".format(version))\n\tquit()\n\nif not os.path.isdir(args.out):\n\tos.system( \"mkdir {}\".format(args.out) )\n\n#copy config to output directory\nos.system(\"cp {} {}\".format(args.config,args.out))\n#write version and command line to output directory\nf=open(\"{}/cmd.txt\".format(args.out),\"w\")\nf.write(\"fluffypipe-{}\\n\".format(version))\nf.write(\" \".join(sys.argv))\nf.close()\n\n\nwith open(args.config) as f:\n\tconfig = json.load(f)\n\ncheck_config(config,args)\n\nsamples=read_samplesheet(args.sample,args.project)\n\nif args.mkref:\n\tjobids=[]\n\tfor sample in samples:\n\t\tfastq=samples[sample]\n\t\trun_bwa=align_and_convert(config,fastq,args,sample)\n\t\tbwa = Slurm(\"bwaAln-{}\".format(sample),{\"account\": config[\"slurm\"][\"account\"], \"partition\": \"node\",\"time\":config[\"slurm\"][\"time\"] },log_dir=\"{}/logs\".format(args.out),scripts_dir=\"{}/scripts\".format(args.out))\n\t\tjobids.append(bwa.run(run_bwa))\n\n\twcxmkref = Slurm(\"wcxmkref\",{\"account\": config[\"slurm\"][\"account\"], \"partition\": \"node\",\"time\":config[\"slurm\"][\"time\"] },log_dir=\"{}/logs\".format(args.out),scripts_dir=\"{}/scripts\".format(args.out))\n\twcxmkref.run( mkref(config,args),depends_on=jobids )\n\nelif args.mkmodel:\n\tf=open(\"{}.PREFACE.config.tab\".format(args.out.rstrip(\"/\")), \"w\" )\n\tf.write(\"ID\\tfilepath\\tgender\\tFF\\n\")\n\tfor sample in samples:\n\t\tfor line in open(\"{}/{}/{}.AMYCNE.tab\".format(args.out,sample,sample) ):\n\t\t\tif \"medA\" in line:\n\t\t\t\tcontinue\n\t\t\tcontent=line.strip().split()\n\t\t\tif female in line:\n\t\t\t\tff=\"NA\"\n\t\t\t\tgender=\"F\"\n\t\t\telse:\n\t\t\t\tgender=\"M\"\n\t\t\t\tff=float(content[-2])*100\n\n\t\t\tout.append(\"{}\\t{}/{}/{}.WCXpredict.preface_bins.bed\\t{}\\t\".format(sample,args.out,sample,sample,gender,ff))\n\n\tf.write(\"\\n\".join(out))\n\tf.close()\n\n\tif config[\"preface\"][\"model_dir\"] == \"\":\n\t\tprint (\"error: the model_dir parameter is not set, check your config file\")\n\t\tquit()\n\n\trun_model=preface_model(config,args,sample)\n\nelse:\n\tjobids=[]\n\tfor sample in samples:\n\t\tos.system( \"mkdir {}/{}\".format(args.out,sample) )\n\n\t\tfastq=samples[sample]\n\t\tif fastq[\"se\"]:\n\t\t\trun_bwa=align_and_convert_single_end(config,fastq[\"fastq\"][0],args,sample)\n\t\telse:\n\t\t\trun_bwa=align_and_convert_paired_end(config,fastq[\"fastq\"],args,sample)\n\n\t\tbwa = Slurm(\"bwaAln-{}\".format(sample),{\"account\": config[\"slurm\"][\"account\"], \"partition\": \"node\",\"time\":config[\"slurm\"][\"time\"] },log_dir=\"{}/logs\".format(args.out),scripts_dir=\"{}/scripts\".format(args.out))\n\t\talign_jobid=bwa.run(run_bwa)\n\n\t\trun_ffy=amycne_ffy(config,args,sample)\n\t\tffy = Slurm(\"amycne-{}\".format(sample),{\"account\": config[\"slurm\"][\"account\"], \"partition\": \"core\",\"time\":config[\"slurm\"][\"time\"] },log_dir=\"{}/logs\".format(args.out),scripts_dir=\"{}/scripts\".format(args.out))\n\t\tjobids.append(ffy.run( run_ffy,depends_on=[align_jobid] ))\n\n\t\trun_picard=picard_qc(config,args,sample)\n\t\tpicard = Slurm(\"picard_qc-{}\".format(sample),{\"account\": config[\"slurm\"][\"account\"], \"partition\": \"core\",\"time\":config[\"slurm\"][\"time\"] },log_dir=\"{}/logs\".format(args.out),scripts_dir=\"{}/scripts\".format(args.out))\n\t\tjobids.append(picard.run( run_picard,depends_on=[align_jobid] ))\n\n\t\trun_wcx=wisecondorx_test(config,args,sample)\n\t\twcx_test = Slurm(\"wcx-{}\".format(sample),{\"account\": config[\"slurm\"][\"account\"], \"partition\": \"core\",\"time\":config[\"slurm\"][\"time\"] },log_dir=\"{}/logs\".format(args.out),scripts_dir=\"{}/scripts\".format(args.out))\n\t\tjobids.append(wcx_test.run( run_wcx,depends_on=[align_jobid] ))\n\t\twcx_test_jobid=jobids[-1]\n\n\t\tif not args.skip_preface:\n\t\t\trun_preface=preface(config,args,sample)\n\t\t\tpreface_predict = Slurm(\"preface_predict-{}\".format(sample),{\"account\": config[\"slurm\"][\"account\"], \"partition\": \"core\",\"time\":config[\"slurm\"][\"time\"] },log_dir=\"{}/logs\".format(args.out),scripts_dir=\"{}/scripts\".format(args.out))\n\t\t\tjobids.append(preface_predict.run(run_preface,depends_on=[wcx_test_jobid]))\n\n\trun_summarise=summarise(config,args)\n\tsummarise_batch = Slurm(\"summarise_batch-{}\".format( args.project.strip(\"/\").split(\"/\")[-1] ),{\"account\": config[\"slurm\"][\"account\"], \"partition\": \"core\",\"time\":config[\"slurm\"][\"time\"] },log_dir=\"{}/logs\".format(args.out),scripts_dir=\"{}/scripts\".format(args.out))\n\tsummarise_batch.run(run_summarise,depends_on=jobids )\n","sub_path":"fluffypipe.py","file_name":"fluffypipe.py","file_ext":"py","file_size_in_byte":13429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"324600609","text":"# -*- coding: utf-8 -*-\n\nfrom flask import request, current_app\nfrom flask_restplus import Namespace, Resource, abort\nfrom .. import auth\nfrom ..serializers.groups import group_container, group_post_model, group_model, group_patch_model, group_full_model, \\\n group_supply_model, group_full_model_with_seed\nfrom app.models import Project, Group, Student, Campus\nfrom utils.iota import generate_seed, make_transfer\n\nns = Namespace('groups', description='Groups related operation')\n\n\n# ================================================================================================\n# ENDPOINTS\n# ================================================================================================\n#\n# API groups endpoints\n#\n# ================================================================================================\n\n\n@ns.route('/')\nclass GroupCollection(Resource):\n decorators = [auth.login_required]\n\n @ns.marshal_with(group_container)\n def get(self):\n \"\"\"\n Return groups\n \"\"\"\n return {'groups': [gr for gr in Group.objects]}\n\n @ns.marshal_with(group_full_model_with_seed)\n @ns.expect(group_post_model)\n def post(self):\n \"\"\"\n Add group\n \"\"\"\n data = request.json\n\n p = Project.objects.get_or_404(id=data['project'])\n if Group.objects(project=p, name=data['name']).count() > 0:\n abort(400, error='Name already exist')\n\n gr = Group(\n project=p,\n name=data['name'],\n seed=generate_seed()\n )\n\n d = gr.deposit_address\n gr.save()\n\n return gr\n\n\n@ns.route('/')\nclass GroupItem(Resource):\n decorators = [auth.login_required]\n\n @ns.marshal_with(group_full_model_with_seed)\n def get(self, id):\n \"\"\"\n Return group\n \"\"\"\n gr = Group.objects.get_or_404(id=id)\n\n return gr\n\n @ns.response(204, 'Group successfully patched')\n @ns.expect(group_patch_model)\n def patch(self, id):\n \"\"\"\n Patch group\n \"\"\"\n data = request.json\n if len(data) == 0:\n abort(400, error='No data')\n\n gr = Group.objects.get_or_404(id=id)\n\n gs = Group.objects(project=gr.project, name=data['name']).first()\n\n if gs is not None and gs.id != gr.id:\n abort(400, error='Name already exist')\n\n gr.name = data['name']\n gr.save()\n\n return 'Group successfully patched', 204\n\n @ns.response(204, 'Group successfully deleted')\n def delete(self, id):\n \"\"\"\n Delete group\n \"\"\"\n gr = Group.objects.get_or_404(id=id)\n\n gr.delete()\n\n return 'Group successfully deleted', 204\n\n\n@ns.route('//supply')\n@ns.response(404, 'Group not found')\nclass GroupItemSupply(Resource):\n decorators = [auth.login_required]\n\n @ns.response(204, 'Group successfully supply')\n @ns.expect(group_supply_model)\n def post(self, id):\n \"\"\"\n Supply account\n \"\"\"\n data = request.json\n gr = Group.objects.get_or_404(id=id)\n c = Campus.objects.get_or_404(id=data['campus'])\n\n if gr.project.campus.id != c.id:\n abort(400, error='Not authorized')\n\n if c.balance < data['value']:\n abort(400, error='Insufficient funds')\n\n make_transfer(current_app.config['IOTA_HOST'], {\n 'recipient_address': gr.deposit_address.address,\n 'message': 'From EPSI',\n 'tag': 'SUPPLYGROUP',\n 'value': data['value'],\n 'seed': c.seed,\n 'deposit_address': c.deposit_address.address\n })\n\n return 'Group successfully supply', 204\n\n\n@ns.route('//students/')\n@ns.response(404, 'Group not found')\nclass GroupItemStudent(Resource):\n decorators = [auth.login_required]\n\n @ns.response(204, 'Student successfully added')\n def post(self, id, sid):\n \"\"\"\n Add student\n \"\"\"\n gr = Group.objects.get_or_404(id=id)\n s = Student.objects.get_or_404(id=sid)\n\n if gr.project.campus.id != s.campus.id:\n abort(400, error='Not authorized')\n\n if s in gr.students:\n abort(400, error='Student already exist')\n\n gr.students.append(s)\n gr.save()\n\n return 'Student successfully added', 204\n\n @ns.response(204, 'Student successfully removed')\n def delete(self, id, sid):\n \"\"\"\n Remove student\n \"\"\"\n gr = Group.objects.get_or_404(id=id)\n s = Student.objects.get_or_404(id=sid)\n\n if gr.project.campus.id != s.campus.id:\n abort(400, error='Not authorized')\n\n if s not in gr.students:\n abort(400, error='Student not exist in group')\n\n gr.students.remove(s)\n\n gr.save()\n\n return 'Student successfully removed', 204\n","sub_path":"app/admin/endpoints/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"624547178","text":"from flask import Flask, request, jsonify\nimport boto3\nimport tensorflow_text as text\nimport tensorflow as tf\nimport json\n\napp = Flask(__name__)\n\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef create_task():\n\n if request.method == \"POST\":\n sentences = request.get_json(force=True)\n if type(sentences) is str:\n sentence = json.loads(sentences)\n else:\n sentence = sentences\n output = dict()\n output[\"input\"] = sentence\n output[\"pred\"] = []\n\n checkpoint = load_model_on_keras()\n\n for s in sentence['data']:\n result = predict(checkpoint, s)\n output[\"pred\"].append([result])\n\n return jsonify(output), 201\n\n if request.method == \"GET\":\n msg = f\"Please compose your request in POST type with data.\"\n return jsonify({\"msg\": msg})\n\n\ndef get_model_from_s3():\n ACCESS_KEY = ''\n SECRET_KEY = ''\n s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY)\n s3.download_file('bucket', 's3 file location', 'local file location') # change the required\n # return model_h5_file\n\n\ndef load_model_on_keras():\n checkpoint = tf.saved_model.load('location of the model') # change the path\n return checkpoint\n\n\ndef predict(checkpoint, sentence):\n x=[]\n x.append(sentence)\n metric_result = float(tf.sigmoid(checkpoint(tf.constant(x))))\n return metric_result\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=\"5000\", debug=True)\n","sub_path":"Inference/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"152233894","text":"\"\"\"----------------------------------------------------------------------------\nMODULE:\n FFXMMConfirmationInSC\n\nDESCRIPTION:\n OPEN EXTENSION MODULE\n State chart creation for FX trade confirmation.\n The definition parameter must completely define the content of the business\n process state chart, including all states and transitions between them. Its\n format is a dictionary of states mapped to a dictionary of transitions as\n event->next_state items, e.g.:\n {'state a': {'event to go to b': 'state b', 'event to go to c': 'state c'}}\n\nFUNCTIONS:\n define_state_transition():\n Defines the state transition for state chart\n create_fx_trade_conf_sc():\n Creates the state chart\n\nVERSION: 3.0.1-0.5.3470\n\nRESTRICTIONS/LIMITATIONS:\n\t1. Any modifications to the script/encrypted module/clear text code within the core is not supported.\n\t2. This module is not customizable.\n\t3. The component may not work as expected with any modifications done to this module at user end.\n----------------------------------------------------------------------------\"\"\"\n\nimport FSwiftReaderLogger\nnotifier = FSwiftReaderLogger.FSwiftReaderLogger('FXMMConfIn', 'FFXMMConfirmationInNotify_Config')\nimport FMTStateChart\n\n\ndef define_state_transition():\n \"\"\"\n Creates a state chart with the given name, if required.\n The definition parameter must completely define the content of the business\n process state chart, including all states and transitions between them. Its\n format is a dictionary of states mapped to a dictionary of transitions as\n event->next_state items, e.g.:\n\n {'state a': {'event to go to b': 'state b', 'event to go to c': 'state c'}}\n\n All defined next_states values must be unique within a state's transitions\n (i.e. multiple events cannot lead to the same next_state).\n \"\"\"\n\n state_chart_name = 'FSwiftFXMMConfirmationIn'\n old_state_chart_name = 'FXTradeConfMsg'\n state_chart = {\n 'Ready' : {'Identified' : 'Paired',\n 'NotIdentified' : 'Unpaired'},\n 'Paired' : {'NoMatch' : 'Difference',\n 'Match' : 'Matched',\n 'Cancel' : 'Cancelled'},\n 'Unpaired' : {'Identified' : 'Paired',\n 'Amend' : 'Amended',\n 'Cancel' : 'Cancelled'},\n 'Difference' : {'ManuallyMatched' : 'Matched',\n 'Unpair' : 'Unpaired',\n 'Re-Match' : 'Paired',\n 'Amend' : 'Amended',\n 'Cancel' : 'Cancelled'},\n 'Matched' : {'Unpair' : 'Unpaired',\n 'Amend' : 'Amended',\n 'Cancel' : 'Cancelled'}\n }\n return state_chart_name, state_chart, old_state_chart_name\n\n\n\n\ndef define_state_chart_narrative_in():\n \"\"\"\n\tCreates a state chart with the given name, if required.\n\tThe definition parameter must completely define the content of the business\n\tprocess state chart, including all states and transitions between them. Its\n\tformat is a dictionary of states mapped to a dictionary of transitions as\n\tevent->next_state items, e.g.:\n\n\t {'state a': {'event to go to b': 'state b', 'event to go to c': 'state c'}}\n\n\tAll defined next_states values must be unique within a state's transitions\n\t(i.e. multiple events cannot lead to the same next_state).\n\t\"\"\"\n\n state_chart_name = 'FSwiftNarrativeIn'\n old_state_chart_name = 'DebitCreditConfMsg'\n state_chart = {\n 'Ready' : {'Identified' : 'Paired',\n 'NotIdentified' : 'Unpaired',\n 'IdentifiedForParty' : 'PairedWithParty'},\n 'PairedWithParty' : {'ManuallyPaired' : 'Paired' },\n 'Unpaired' : {'Identified' : 'Paired',\n 'Ignore' : 'Ignored' },\n 'Paired' : {'Unpair' : 'Unpaired'}\n }\n return state_chart_name, state_chart, old_state_chart_name\n\n\ndef create_fx_trade_conf_sc():\n state_chart_name, state_chart_dict, old_state_chart_name = define_state_transition()\n state_chart = FMTStateChart.create_state_chart(state_chart_name, state_chart_dict, old_state_chart_name)\n\n\n\n # EDIT 'coordinate string' ONLY IF STATE CHART MENTIONED ABOVE IS CHANGED.\n co_string = 'Ready,-500,0;Paired,-300,0;Matched,400,0;Unpaired,-300,-200;Cancelled,100,-200;Difference,100,200;Amended,400,200;'\n try:\n FMTStateChart.layout_state_chart(state_chart_name, co_string)\n notifier.INFO('Done with layout of state chart %s.'%state_chart_name)\n except Exception as e:\n notifier.WARN(str(e))\n\n state_chart_name, state_chart_dict, old_state_chart_name = define_state_chart_narrative_in()\n state_chart = FMTStateChart.create_state_chart(state_chart_name, state_chart_dict, old_state_chart_name)\n\n co_string = 'Ready,-500,0;Paired,-300,0;Matched,400,0;Unpaired,-300,-200;PairedWithParty,-300,200;Ignored, 100, -200;'\n try:\n FMTStateChart.layout_state_chart(state_chart_name, co_string)\n print(('Done with layout of state chart %s.' % state_chart_name))\n except Exception as e:\n notifier.WARN(str(e))\n\n#create_fx_trade_conf_sc()\n#delete_state_chart('FSwiftFXMMConfirmationIn')\n\n","sub_path":"Extensions/FSwiftFXMMConfirmation/FPythonCode/FFXMMConfirmationInSC.py","file_name":"FFXMMConfirmationInSC.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"356849594","text":"#imports\r\nimport keyboard\r\nimport time\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport time\r\nimport Launchbite_settings as s\r\n\r\n#general setup\r\nwindow = Tk()\r\nstyle = ttk.Style()\r\nwindow.title('Launchdesk')\r\n\r\n#image setup\r\natom_logo = PhotoImage(file='atom_logo.gif')\r\n\r\n#function setup\r\ndef open_chrome():\r\n keyboard.press_and_release('windows+5')\r\ndef open_gitkraken():\r\n keyboard.press_and_release('windows+6')\r\ndef open_fusion():\r\n keyboard.press_and_release('windows+3')\r\ndef open_eagle():\r\n keyboard.press_and_release('windows+4')\r\ndef open_githubDesk():\r\n keyboard.press_and_release('windows+7')\r\ndef open_atom():\r\n keyboard.press_and_release('windows+8')\r\ndef open_mu():\r\n keyboard.press_and_release('windows+9')\r\n\r\n#button setup\r\nchromebutton = ttk.Button(window, text = 'chrome', command = open_chrome)\r\ngitkrakenButton = ttk.Button(window, text = 'GitKraken', command = open_gitkraken)\r\nfusionButton = ttk.Button(window, text='Fusion 360', command = open_fusion)\r\neagleButton = ttk.Button(window, text='Eagle', command = open_eagle)\r\ngithubDeskButton = ttk.Button(window, text='Github Desktop', command = open_githubDesk)\r\natomButton = ttk.Button(window, text='atom', command = open_atom)\r\nmuButton = ttk.Button(window, text='Mu', command = open_mu)\r\n\r\n#style settings\r\nif s.appTheme=='plain':\r\n style.theme_use('default')\r\nif s.appTheme=='retro':\r\n style.theme_use('classic')\r\nif s.appTheme=='big':\r\n style.theme_use('clam')\r\nif s.appTheme=='classic':\r\n style.theme_use('winnative')\r\nif s.appTheme=='other1':\r\n style.theme_use('alt')\r\nif s.appTheme=='other2':\r\n style.theme_use('vista')\r\nif s.appTheme=='other3':\r\n style.theme_use('xpnative')\r\n\r\n#size settings\r\nif s.shape=='slim':\r\n if s.size=='medium':\r\n window.geometry('200x250')\r\n if s.size=='large':\r\n window.geometry('300x350')\r\nif s.shape=='square':\r\n if s.size=='micro':\r\n window.geometry('175x175')\r\n if s.size=='medium':\r\n window.geometry('275x275')\r\n if s.size=='large':\r\n window.geometry('375x375')\r\n\r\n#load and launch\r\nchromebutton.pack()\r\ngitkrakenButton.pack()\r\neagleButton.pack()\r\ngithubDeskButton.pack()\r\natomButton.pack()\r\nmuButton.pack()\r\nwindow.mainloop()\r\n","sub_path":"custom desktop.py","file_name":"custom desktop.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"331715545","text":"#!/usr/bin/env python2\n# The MIT License (MIT)\n#\n# Copyright (c) 2013 Wayne Prasek \n# based on work Copyright (c) 2012 Chris Oelmueller \n# based on work Copyright (c) 2010 Thomas Adamcik\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is fur-\n# nished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIA-\n# BILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport collections\nimport csv\nimport datetime\nfrom DateTime import DateTime\nimport json\nimport urllib\nimport pickle \n\nfrom data import DEFAULT_USER, USERNAMES, MILESTONES, STATES, LABELS\n\n#######################################################################\n# Configuration. Who doesn't like configuration.\n#######################################################################\n# Your repository name (the subdirectory we will write data to).\nREPO = 'fife/'\n# Locations of particular files. It is recommended to keep issues\n# and comments in the same directory, which usually is issues/.\nISSUES_PATH = REPO + 'issues/%s.json'\nCOMMENTS_PATH = REPO + 'issues/%s.%s.comments.json'\nMILESTONES_PATH = REPO + 'milestones/%s.json'\n# Path to the ticket pickle file\nTICKET_FILE = REPO + 'tickets.p'\n#\n# Magic report id we used to attach special labels to certain tickets\n# As all EASY_ stuff, commented out but we used it just like that.\n#EASY_NO = 14\n#\n\ndef trac_to_gh(text):\n \"\"\"Right, fun goes here. If you aspire to write magic syntax conversion\n from trac to github flavored markdown, this is your place to be. We just\n replaced basic markup because even getting the links right was a headache.\n \"\"\"\n t = text.replace('}}}', '```')\n t = t.replace('{{{', '```')\n t = t.replace('[[BR]]', '\\n')\n return t or '.' # No empty issue bodies are supported\n\ndef github_label(text):\n \"\"\"If you do not like the idea of having all your labels converted to\n lower case, now would be a great opportunity to edit one line of code.\n \"\"\"\n return unicode(text.lower())\n\ndef github_time(date):\n \"\"\"Takes trac date, returns github-ready timestamp.\n \"\"\"\n time = DateTime(str(date))\n return time.ISO8601()\n\ndef massage_comment(ticket, date, author, body):\n \"\"\"Expands the ticket comment list for *ticket* with a json comment\n representation of *date*, *author* (mapped to github) and text *body*.\n \"\"\"\n body = trac_to_gh(body)\n\n # Not sure whether we have a related github account for that user.\n if USERNAMES.get(author):\n user = USERNAMES[author]\n else: # If we do not, at least mention the user in our comment body\n user = DEFAULT_USER\n \n body = 'This comment was posted by **{reporter}**\\r\\n\\r\\n'.format(\n reporter=author) + body + \"\\r\\n\\r\\n\" + github_time(date)\n\n return {\n 'body': body,\n 'user': user,\n 'created_at': github_time(date),\n }\n\ndef write_issue(ticket, outfile):\n \"\"\"Dumps a csv line *row* from the issue query to *outfile*.\n \"\"\"\n # Issue text body\n body = ticket[3]['description']\n body = trac_to_gh(body)\n\n # Default state: open (no known resolution)\n state = STATES.get(ticket[3]['status'], 'open')\n\n # Trac will have stored some kind of username.\n reporter = ticket[3]['reporter']\n\n # Not sure whether we have a related github account for that user.\n if USERNAMES.get(reporter):\n userdata = USERNAMES[reporter]\n else: # If we do not, at least mention the user in our issue body\n userdata = DEFAULT_USER\n \n body = ('This issue was reported by **%s**\\r\\n\\r\\n' % reporter) + body\n\n # Whether this is stored in 'milestone' or '__group__' depends on the\n # query type. Try to find the data or assign the default milestone 0.\n milestone_info = ticket[3]['milestone']\n milestone = MILESTONES.get(milestone_info, 3)\n\n labels = [] # Collect random tags that might serve as labels\n for tag in ('type', 'component', 'priority'):\n if ticket[3].get(tag) and LABELS.get(ticket[3][tag]):\n label = LABELS[ticket[3][tag]]\n labels.append({'name': github_label(label)})\n\n\n # Dates\n updated_at = DateTime(str(ticket[2])).ISO8601()\n created_at = DateTime(str(ticket[1])).ISO8601()\n \n # Now prepare writing all data into the json files\n dct = {\n 'title': ticket[3]['summary'],\n 'body': body,\n 'state': state,\n 'user': userdata,\n 'milestone': int(milestone),\n 'labels': labels,\n 'updated_at': updated_at,\n 'created_at': created_at,\n }\n\n # Assigned user in trac and github account of that assignee\n# assigned_trac = ticket[3]['owner']\n# assigned = USERNAMES.get(assigned_trac)\n # Assigning really does not make sense without github account\n# if state == 'open' and assigned and assigned['login'] != 'fifengine':\n# print assigned\n# dct['assignee'] = assigned\n\n # Everything collected, write the json file\n json.dump(dct, outfile, indent=5)\n\ndef main():\n #######################################################################\n # Gather information about our tickets (mainly assembles comment list)\n #######################################################################\n # Stores a list of comments for each ticket by ID\n\n #prock - load pickle file here and generate a row\n f = open(TICKET_FILE, 'rb')\n tickets = pickle.load(f)\n f.close()\n\n comment_coll = collections.defaultdict(list)\n\n for ticketid in tickets:\n comments = tickets[ticketid][4]\n for comment in comments:\n if (\"comment\" in comment):\n dct = massage_comment(ticketid, comment[0],comment[1],comment[4])\n comment_coll[ticketid].append(dct)\n\n #######################################################################\n # Write the ticket comments to json files indicating their parent issue\n #######################################################################\n for ticket, data in comment_coll.iteritems():\n if (comment_coll[ticket][0]['body'] != '.'):\n count = 0\n for row in data:\n with open(COMMENTS_PATH % (ticket,count), 'w') as f:\n json.dump(row, f, indent=5)\n count = count + 1\n\n #######################################################################\n # Write the actual ticket data to separate json files (GitHub API v3)\n #######################################################################\n for ticketid in tickets:\n with open(ISSUES_PATH % str(ticketid), 'w') as f:\n write_issue(tickets[ticketid], f)\n\n #######################################################################\n # Finally, dump all milestones and the related data. This script is not\n # attempting to extract due dates or other data. We just manually mined\n # the milestone names once and stored that in MILESTONES for reference.\n #######################################################################\n for name, id in MILESTONES.iteritems():\n with open(MILESTONES_PATH % id, 'w') as f:\n dct = {\n 'number': id,\n 'creator': DEFAULT_USER,\n 'title': name,\n }\n json.dump(dct, f, indent=5)\n\nif __name__ == '__main__':\n main()\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":8247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"161113826","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit as fit\nfrom scipy.fft import fft #as fft\nfrom scipy.fft import ifft #as ifft\n\n# Program description\n'''\nUses Fourier method of Sziklas and Seigman (1975) [see also Leavey and Courtial, Young TIM User Guide (2017).] for beam propagation.\nCalculation of:\n1) the effect of transverse displacement of septum window in LO beam path, with a wedge of 0.75˚upon the position and direction of the beam at the OMC waist.\nUse trig to determine displacement of beam for a given displacement of the wedged substrate. Propagate from the wedge and apply an offset to the beam at source.\n'''\n\n# General comment: \n# Erroneous results can occur due to poor fit in k-space. If this is the case, use initial width greater than expected width in Gaussfit. \n# Beam profile requires initial width to be smaller than expected width. \n\n# Inputs\nwav = 1.064e-3 # wavelength in mm\nz0 = -1974.2 # input waist location\nb0 = 701.1 # input Rayleigh range\nw0 = np.sqrt(b0 * wav / np.pi) # input waist size - specified by Rayleigh range\nx0 = 0.0 # initial offset in mm\na0 = 0.0 # initial angle in mrad\nspace_1 = 2220 # LO-VIEW - OM1\nspace_2 = 1590 # OM1 - OM2\nspace_3 = 1220 # OM2 - OMC\nalpha = 0.0059 # Deflection imparted by wedged substrate with wedge angle of 0.75˚\nR1 = 1000 * 5.709 # OM1\nR2 = 1000 * 2.360 # OM2\ntheta_i = (np.pi/180) * 0.75 # wedge angle of 0.75˚, and incidence angle at wedged surface\nn = 1.45 # refractive index\ntheta_r = np.arcsin(n * np.sin(theta_i)) # angle of reflection\ntheta_d = theta_r - theta_i # angle of deflection\ng = np.tan(theta_i) * np.sin(theta_d) # ratio of displacement of beam to displacement of wedged substrate\n\nclass Beam: \n '''Represents Gaussian beam in x, propagating along z. \n Attributes: \n U: 1D array, Complex amplitude\n w: 1D array, width\n z: 1D array, z-co-ordinate\n x: 1D array, x-co-ordinate\n kneg/kpos: 1D arrays, negative/positive components of k-vactor along x-axis (spatial frequencies)\n kx: 1D array, component of k-vector along x-axis (spatial frequency)\n kz: 1D array, component of k-vector along z-axis\n kwav: constant, magnitude of wavevector\n zres: constant: resolution in z\n '''\n def __init__(self, *args): # Initialises amplitude array.\n x0 = 0\n a0 = 0\n W = 200 # Width of window in mm\n xres = 0.01 # 1D array representing kz-space. Derived from condition for monochromaticity.\n N = int(W / xres) # Number of x-bins (keeps resolution the same) \n self.x = np.linspace(-W/2, W/2, N) # 1D array representing x-space\n self.kneg = np.linspace(-(np.pi * N)/W, -(2 * np.pi)/W, int(N/2)) # 1D array representing kx-space from max -ve value to min -ve value\n self.kpos = np.linspace((2 * np.pi)/W, (np.pi * N)/W, int(N/2)) # 1D array representing kx-space from min +ve value to max +ve value\n self.kx = np.concatenate((self.kpos,self.kneg), axis = 0) # 1D array representing kx-space. Order of values matches that spatial frequency distribution derived from FFT of amlitude distribution\n self.kwav = 2 * np.pi / wav # Magnitude of wave-vector\n self.kz = np.sqrt(self.kwav**2 - self.kx**2) # 1D array representing kz-space. Derived from condition for monochromaticity. \n self.zres = 3000 # z-resolution in mm: 3000 for most; 50 for beam profile. \n if len(args) == 2: # Two arguments: Instantiates Beam object from waist size, w0, and distance to waist, z0. \n w0 = args[0]\n z0 = args[1]\n elif len(args) == 4: # Four arguments: Instantiates Beam object from waist size, w0, distance to waist, z0, input offset, x0, and input angle, a0. \n w0 = args[0]\n z0 = args[1]\n x0 = args[2]\n a0 = args[3]\n a0 = a0 / 1000 # Converts input angle from mrad to rad\n q0 = z0 - 1j * np.pi * w0**2 / wav # Input beam parameter\n U0 = (1/q0) * np.exp(1j * self.kwav * (self.x-x0)**2 / (2 * q0)) # Input array, offset by x0\n U0 = U0 * np.exp(-1j * self.kwav * self.x * np.sin(a0)) # Tilt beam by initial angle, a0\n self.U = U0 # Initialise amplitude array\n self.w = [Gaussfit(self.x,abs(self.U),1)[2]] # Initialise width list\n self.z = [0] # Initialise z-position list.\n\n def step(self, D): # Propagate input Beam object over distance, D; return Beam object. Fourier algorithm. \n Pin = fft(self.U) # FFT of amplitude distribution, U, gives spatial frequency distribution, Pin at initial position, z.\n Pout = Pin * np.exp(1j * self.kz * D) # Multiply Spatial frequency distribution by phase-factor corresponding to propagation through distance, D, to give new spatial frequency distribution, Pout. \n Uout = ifft(Pout) # IFFT of spatial frequency distribution gives amplitude distribution, Uout, at plane z = z + D\n self.U = Uout\n return self\n\n def propagate(self,distance,profile=False): # Propagate Beam object through distance with resolution, zres; return Beam object. \n Uprev = self\n if profile:\n w = Uprev.w # unpack width_list\n z = Uprev.z # unpack z-position_list\n res = 50 # Set res to 50 if generating plot of beam profile.\n else:\n res = self.zres # Otherwise use global variable, zres\n num = distance // res # number of steps: divide distance by resolution. \n rem = distance % res # remainder of division: final step size. If num = 0, i.e. zres > distance, single step taken, equal to distance. \n for i in range(num): # num steps\n Unext = Uprev.step(res)\n Uprev = Unext\n if profile:\n zprev = z[-1]\n z.append(zprev + res) # Build up z-array as go along. \n wnext = Gaussfit(Unext.x,abs(Unext.U),1)[2]\n w.append(wnext)\n Unext = Uprev.step(rem) # Final step of size rem. \n if profile:\n zprev = z[-1]\n z.append(zprev + rem) \n wnext = Gaussfit(Unext.x,abs(Unext.U),1)[2]\n w.append(wnext)\n Unext.w = w\n Unext.z = z\n return Unext\n\n def tilt(self,angle): # Applies linear phase-gradient, simulating effect of tilting mirror. Input angle in mrad. \n Uin = self.U\n a = angle / 1000\n Uout = Uin * np.exp(-1j * self.kwav * self.x * np.sin(a))\n self.U = Uout\n return self\n\n def lens(self,f,x0): # Lens element of focal length, f, centre offset from beam by x0. \n Uin = self.U\n Uout = Uin * np.exp(-1j * self.kwav * (self.x - x0)**2 / (2 * f))\n self.U = Uout\n return self\n\n def wedge(self,alpha,x0): # Wedge imparting deflection, alpha to beam, centre offset from beam by x0. \n Uin = self.U\n Uout = Uin * np.exp(-1j * self.kwav * (self.x - x0) * np.sin(alpha))\n self.U = Uout\n return self\n\n def mirror(self,R): # Mirror element of radius of curvature, R. \n Uin = self.U\n Uout = Uin * np.exp(-1j * self.kwav * self.x**2 / R)\n self.U = Uout\n return self\n\n def amp_plot(self,n=1): # Plot magnitude of Amplitude array in x-space. \n Uplot = abs(self.U)/max(abs(self.U))\n plt.figure(n)\n plt.plot(self.x,Uplot,'o', label = 'model data', markersize = 3)\n axes = plt.gca()\n axes.set_xlim([-2, 2])\n axes.set_ylim([0, 1.1])\n plt.grid(which = 'major', axis = 'both')\n plt.xlabel('x / mm')\n plt.ylabel('Normalised amplitude distribution')\n #plt.legend()\n plt.tight_layout()\n\n def amp_fit(self,plot=False,n=1): # Fit (and Plot) magnitude of Amplitude array in x-space. \n Uplot = abs(self.U)/max(abs(self.U))\n xparams = Gaussfit(self.x,Uplot)\n UFit = Gaussian(self.x, xparams[0], xparams[1], xparams[2])\n if plot == True:\n plt.figure(n)\n plt.plot(self.x,Uplot,'o', label = 'model data', markersize = 3)\n plt.plot(self.x,UFit,'-', label = count)\n axes = plt.gca()\n axes.set_xlim([-1, 1])\n axes.set_ylim([0, 1.1])\n plt.grid(which = 'major', axis = 'both')\n plt.xlabel('x / mm')\n plt.ylabel('Normalised amplitude distribution')\n #plt.legend()\n plt.tight_layout()\n return xparams\n\n def freq_fit(self,plot=False,n=2): # Fit ( and plot) magnitude of Spatial frequency array in k-space. \n P = fft(self.U)\n kplot = np.concatenate((self.kneg,self.kpos), axis = 0) \n Pneg = P[int(len(P)/2):]\n Ppos = P[:int(len(P)/2)]\n Pswap = np.concatenate((Pneg,Ppos), axis = 0)\n Pabs = abs(Pswap)\n Pplot = Pabs/max(Pabs)\n kparams = Gaussfit(kplot,Pplot)\n PFit = Gaussian(kplot, kparams[0], kparams[1], kparams[2])\n if plot == True:\n plt.figure(n)\n plt.plot(kplot,Pplot,'o', label = 'model data', markersize = 3)\n plt.plot(kplot,PFit,'-', label = 'fit')\n axes = plt.gca()\n axes.set_xlim([-25, 25])\n axes.set_ylim([0, 1.1])\n plt.grid(which = 'major', axis = 'both')\n plt.xlabel('k_x / mm^-1')\n plt.ylabel('Normalised spatial frequency distribution')\n #plt.legend()\n plt.tight_layout()\n return kparams\n\ndef Gaussian(space, offset, height, width): # Defines Gaussian function for fitting; space is a 1D array. \n return height * np.exp(-((space-offset)/width)**2)\n\ndef Gaussfit(space,Array,init_width=30):# Fit Gaussian to magnitude of Amplitude array. Return fit parameters. \n init_params = [0.1,1,init_width] # Use initial width parameter smaller than expected width for beam profile. Otherwise, use initial width parameter larger than expected width - avoids bad fit in k-space. \n est_params, est_err = fit(Gaussian, space, Array, p0 = init_params)\n return est_params # [offset, amplitude, width]\n\ndef beam_profile(): \n # Runs series of methods corresponding to propagation of beam through various elements in system. \n # Calculate beam waist along the way ('True' condition passed to propagate). \n U = Beam(w0,z0)\n U = U.propagate(space_1,True)\n U = U.mirror(R1)\n U = U.propagate(space_2,True)\n U = U.mirror(R2)\n U = U.propagate(space_3,True)\n U = U.propagate(3500,True)\n width_plot(U.z,U.w)\n\ndef width_plot(distance_list,width_list,n=3): # Plots beam profile for a given waist array. \n zplot = 0.001 * np.asarray(distance_list)\n wplot = np.asarray(width_list)\n plt.figure(figsize=(9, 7), dpi=120)\n plt.plot(zplot,wplot, linewidth = 3)\n axes = plt.gca()\n axes.set_xlim([0, 10])\n axes.set_ylim(0.0,2.6)\n axes.set_xticks(np.linspace(0,10,11))\n axes.set_yticks(np.linspace(0.0,2.6,14))\n plt.grid(which = 'both', axis = 'both', linestyle = '--')\n axes.set_xlabel('Distance from Viewport / m')\n axes.set_ylabel('Beam size / mm')\n #axes.vlines(x = 0.001 * space_0, ymin = 0, ymax = 120,\\\n #linewidth = 2,color = 0.75*np.array([1,0.25,0.25]),linestyles = 'dashed',label = 'Viewport')\n #axes.vlines(x = 0.001 * (space_0 + space_1), ymin = 0, ymax = 120,\\\n #linewidth = 2,color = 0.75*np.array([0.25,1,0.25]),linestyles = 'dashed',label = 'OM1') \n #axes.vlines(x = 0.001 * (space_0 + space_1 + space_2), ymin = 0, ymax = 120,\\\n #linewidth = 2,color = 0.75*np.array([1,1,0.25]),linestyles = 'dashed',label = 'OM2') \n axes.vlines(x = 0.001 * (space_1 + space_2 + space_3), ymin = 0, ymax = 120,\\\n linewidth = 2,color = 0.75*np.array([0.25,0.25,0.25]),linestyles = 'dashed',label = 'OMC')\n plt.legend()\n #plt.title('')\n plt.tight_layout()\n\ndef L1_displ(x0):\n d = x0 * g # displacement of beam given displacement of wedged substrate\n # Runs series of methods corresponding to propagation of beam through various elements in system. Fixed spacings, defined in global variables. \n # Wedged substrate displaced by x0, beam displaced by d. Returns, x and k offsets at OMC waist. \n U = Beam(w0,z0,d,0)\n U = U.propagate(space_1,True)\n U = U.mirror(R1)\n U = U.propagate(space_2,True)\n U = U.mirror(R2)\n U = U.propagate(space_3,True)\n xparams = U.amp_fit()\n Dx = xparams[0] #/ abs(xparams[2]) # normalise offset to width in x-space\n kparams = U.freq_fit()\n Dk = kparams[0] #/ abs(kparams[2]) # normalise offset to width in k-space\n Beta = 1000 * np.arctan(Dk / np.sqrt(U.kwav**2 - Dk**2))\n return (Dx, Beta)\n\ndef L1_test(): # Displace L1 by equal positive and negative amounts. Return x and k offsets at OMC waist.\n x1 = []\n b1 = []\n displ = np.linspace(-1e3, 1e3, 2) # displace wedged substrate by +/- 1 m\n for i in range(len(displ)):\n Dx, Beta = L1_displ(displ[i])\n x1.append(Dx)\n b1.append(Beta)\n return (x1,b1)\n\ndef L1_dep(): # Calculates orthogonality between mirrors: angle between unit vectors corresponding to effect of mirror tilt in x-k space. \n Mtest = L1_test()\n xb_plot(Mtest[0],Mtest[1])\n\ndef xb_plot(x1,b1,n=4): # Plots displacement in x-k space caused when lens, L1 displaced.\n plt.figure(n, figsize=(6, 5.5), dpi=120)\n plt.plot(x1, b1) \n plt.title('Wedged viewport in LO beam displaced by +/- 1 m') \n axes = plt.gca()\n plt.xlabel('Change in position at OMC waist / mm')\n plt.ylabel('Change in direction at OMC waist / mrad')\n textstr = 'Wedge angle: 0.75˚\\nSensitivity: %.3f mm/m\\n %.3f mrad/m' % ((x1[1] - x1[0])/2,(b1[1] - b1[0])/2)\n props = dict(boxstyle='square', facecolor='white', alpha=0.5)\n axes.text(0.02, 0.98, textstr, transform=axes.transAxes, fontsize=10, verticalalignment='top', bbox=props)\n plt.tight_layout()\n\ndef main():\n beam_profile()\n #print(L1_displ(10))\n #L1_test()\n L1_dep()\n plt.show()\n \nif __name__ == \"__main__\":\n main()","sub_path":"LO-VIEW-WEDGE.py","file_name":"LO-VIEW-WEDGE.py","file_ext":"py","file_size_in_byte":15667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"347839836","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport sys\n\nsys.path.append('/home/dima/tavk_bot/')\n\nimport os\nimport traceback\nimport requests\nimport time\nimport telebot\n\nfrom controllers.personController import PersonController\nfrom controllers.timeController import add_row_time\nfrom config import config\nimport models.database_postgers as db\nfrom models import commands\n\n\nbot = telebot.TeleBot(config.token)\n\ndef main():\n while True:\n base = db.Database(config.connect)\n id = base.command_select(commands.select_ids)\n base.close()\n id = [item[0] for item in id]\n id = id[:930]\n info_about_persons = PersonController().get_users(id) # получение информации о пользователях\n for info_about_person in info_about_persons:\n\n id_person = str(info_about_person['id'])\n online_person = info_about_person['online']\n\n base = db.Database(config.connect)\n online_chats = base.command_select(commands.select_online, person_id=id_person)\n base.close()\n\n if online_chats and online_chats[0][0] != online_person and 'deactivated' not in info_about_person:\n for chat_id in online_chats:\n chat_id = chat_id[1]\n\n #анализ пользовательской информации\n person = PersonController(info_about_person)\n\n # изменение online/offline\n base = db.Database(config.connect)\n row_id = base.command_select(commands.select_row_id, chat_id=chat_id, person_id=id_person)[0][0]\n base.command(commands.update_online, u_id=row_id, online=bool(online_person))\n one_person_db = base.command_select(commands.select_one_person, u_id=row_id)\n base.close()\n\n text = person.result(chat_id, name=one_person_db[0][4])\n\n\n notif_online = one_person_db[0][5]\n notif_offline = one_person_db[0][6]\n # отправление сообщения о активность пользователя в сети\n if notif_online and online_person or notif_offline and not online_person:\n try:\n bot.send_message(int(chat_id), text, parse_mode='Markdown',\n disable_web_page_preview=True)\n except telebot.apihelper.ApiException:\n base = db.Database(config.connect)\n base.command(commands.delete_time, u_id=row_id)\n base.command(commands.delete_person, u_id=row_id)\n base.close()\n\n add_row_time(person, chat_id)\n\n time.sleep(15)\n\nif __name__ == '__main__':\n try:\n main()\n except (requests.exceptions.ReadTimeout, KeyError):\n with open('push_except.txt', 'a') as file:\n print(traceback.format_exc(), file=file)\n bot.send_message(chat_id=220307296, text='Request error #1')\n time.sleep(15)\n os.system('./notification1.py &')\n except:\n with open('push_new_except.txt', 'a') as file:\n print(traceback.format_exc(), file=file)\n bot.send_message(chat_id=220307296, text='Unexcepted error #1')\n time.sleep(15)\n os.system('./notification1.py &')\n\n","sub_path":"views/notification1.py","file_name":"notification1.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"26623315","text":"import socketserver\nimport optparse\nfrom conf import settings\nfrom core import server\nclass main():\n def __init__(self):\n self.op = optparse.OptionParser()\n #self.op.add_option(\"-s\",\"--s\",dest=\"server\")\n #self.op.add_option(\"-P\",\"--port\",dest='port')\n options,args = self.op.parse_args()\n print(args)\n self.verif_args(options,args)\n\n def verif_args(self,options,args):\n cmd=args[0]\n if hasattr(self,cmd):\n fun=getattr(self,cmd)\n fun()\n def start(self):\n print(\" ftp is working...\")\n print(settings.IP)\n s = socketserver.ThreadingTCPServer((settings.IP,settings.PORT), server.myserver)\n s.serve_forever()\n\n\n","sub_path":"ftpserver/core/ftp_server_main.py","file_name":"ftp_server_main.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"614241687","text":"import numpy as np\nimport theano\nimport theano.tensor as T\n\nfrom lasagne import *\nfrom crayimage import nn\nfrom crayimage.nn.nn import Expression\n\n__all__ = [\n 'ToyGAN'\n]\n\nclass ToyGAN(object):\n def _constants(self):\n self.c_reg_generator = 0.0\n self.c_reg_objective = 1.0e-3\n\n def __init__(self, true_net, generator, discriminator, loss, solver, **kwargs):\n \"\"\"\n :param true_net: generator for ground truth;\n :param generator: trainable expression for generator;\n :param objective: discriminator + it's loss\n :param kwargs: constants\n \"\"\"\n self._constants()\n\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n self.true_net = true_net\n self.generator = generator\n\n self.X_real = true_net()\n self.X_pseudo = generator()\n\n self.output_real = discriminator(self.X_real)\n self.output_pseudo = discriminator(self.X_pseudo)\n\n self.pure_loss_generator, self.pure_loss_discriminator = loss(self.output_real, self.output_pseudo)\n\n if self.c_reg_generator is not None:\n self.loss_discriminator = self.pure_loss_discriminator + self.c_reg_objective * discriminator.reg_l2()\n else:\n self.loss_discriminator = self.pure_loss_discriminator\n\n if self.c_reg_generator is not None:\n self.loss_generator = self.pure_loss_generator + self.c_reg_generator * generator.reg_l2()\n else:\n self.loss_generator = self.pure_loss_generator\n\n self.get_real = theano.function([], self.X_real)\n self.get_realistic = theano.function([], self.X_pseudo)\n\n self.train = solver(self)","sub_path":"crayimage/cosmicGAN/gannery/toyGAN.py","file_name":"toyGAN.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"182662945","text":"import numpy as np\r\n\r\nclass Bfee:\r\n\r\n field_len = 0 # uin16: length of {code, field}\r\n code = 0 # uint8\r\n\r\n # fields\r\n timestamp_low = 0 # uint32: NIC网卡1MHz时钟的低32位\r\n bfee_count = 0 # uint16: 驱动记录并发送到用户空间的波束形成测量值的数量\r\n Nrx = 0 # uint8: 接收端使用的天线数量\r\n Ntx = 0 # uint8: 发送端使用的天线数量\r\n rssi_a = 0 # uint8: 每个天线由接收端NIC测量出的RSSI值\r\n rssi_b = 0 # uint8\r\n rssi_c = 0 # uint8\r\n noise = 0 # int8: 单位为db\r\n agc = 0 # uint8: Automatic Gain Control\r\n antenna_sel = 0 # uint8\r\n len = 0 # uint16\r\n fake_rate_n_flags = 0 # uint16\r\n\r\n csi = None # CSI raw data, 30 × Ntx × Nrx complex matrix\r\n perm = list(range(3)) # 排列方式:展示NIC如何将3个接收天线的信号排列到3个RF链路上\r\n\r\n def to_dict(self):\r\n bfee_dict = {}\r\n bfee_dict['timestamp_low'] = self.timestamp_low\r\n bfee_dict['bfee_count'] = self.bfee_count\r\n bfee_dict['Nrx'] = self.Nrx\r\n bfee_dict['Ntx'] = self.Ntx\r\n bfee_dict['rssi_a'] = self.rssi_a\r\n bfee_dict['rssi_b'] = self.rssi_b\r\n bfee_dict['rssi_c'] = self.rssi_c\r\n bfee_dict['noise'] = self.noise\r\n bfee_dict['agc'] = self.agc\r\n bfee_dict['antenna_sel'] = self.antenna_sel\r\n bfee_dict['perm'] = self.perm\r\n bfee_dict['len'] = self.len\r\n bfee_dict['fake_rate_n_flags'] = self.fake_rate_n_flags\r\n bfee_dict['csi'] = self.csi\r\n return bfee_dict\r\n\r\n def to_json(self):\r\n import json\r\n\r\n def complex2str_recursively(target):\r\n if type(target[0]) != list:\r\n for i in range(len(target)):\r\n target[i] = str(target[i])[1:-1]\r\n else:\r\n for i in range(len(target)):\r\n complex2str_recursively(target[i])\r\n return target\r\n\r\n bfee_dict = {}\r\n bfee_dict['timestamp_low'] = self.timestamp_low\r\n bfee_dict['rssi_a'] = self.rssi_a\r\n bfee_dict['rssi_b'] = self.rssi_b\r\n bfee_dict['rssi_c'] = self.rssi_c\r\n bfee_dict['agc'] = self.agc\r\n bfee_dict['csi'] = complex2str_recursively(self.csi.tolist())\r\n return json.dumps(bfee_dict)\r\n\r\n def to_simple_bytes(self, encoding_order=\"little\"):\r\n bytes_ = self.timestamp_low.to_bytes(4, encoding_order, signed=False)\r\n bytes_ += self.rssi_a.to_bytes(1, encoding_order, signed=False)\r\n bytes_ += self.rssi_b.to_bytes(1, encoding_order, signed=False)\r\n bytes_ += self.rssi_c.to_bytes(1, encoding_order, signed=False)\r\n bytes_ += self.agc.to_bytes(1, encoding_order, signed=False)\r\n\r\n shape = self.csi.shape\r\n for i in range(shape[0]):\r\n for j in range(shape[1]):\r\n for k in range(shape[2]):\r\n bytes_ += int(self.csi[i, j, k].real).to_bytes(1, \r\n encoding_order, signed=True)\r\n bytes_ += int(self.csi[i, j, k].imag).to_bytes(1,\r\n encoding_order, signed=True)\r\n return bytes_\r\n\r\n\r\n @staticmethod\r\n def records_from_offline_file(filename, timeCount=False):\r\n if timeCount:\r\n import time\r\n time_sta = time.time()\r\n\r\n with open(filename, \"rb\") as f: # 一次将所有的文件内容读取完 => 离线的\r\n array = f.read()\r\n\r\n res = list()\r\n file_len = len(array)\r\n counter = 0\r\n calc_len = 0\r\n\r\n # Initialize variables\r\n cur = 0 # Current offset into file\r\n broken_perm = 0 # Flag marking whether we've encountered a broken CSI yet\r\n triangle = [0, 1, 3] # What perm should sum to for 1,2,3 antennas (0, 1, 2)\r\n\r\n while cur < (file_len - 3):\r\n bfee = Bfee()\r\n\r\n # Read size and code\r\n bfee.field_len = int.from_bytes(\r\n array[cur:cur+2], byteorder='big', signed=False)\r\n bfee.code = array[cur+2]\r\n cur += 3\r\n\r\n # there is CSI in field if code == 187,If unhandled code skip (seek over) the record and continue\r\n if bfee.code != 187:\r\n cur = cur + bfee.field_len - 1 # skip all other info\r\n continue\r\n\r\n # get beamforming or phy data\r\n bfee.timestamp_low = int.from_bytes(\r\n array[cur:cur+4], byteorder='little', signed=False)\r\n bfee.bfee_count = int.from_bytes(\r\n array[cur+4:cur+6], byteorder='little', signed=False)\r\n bfee.Nrx = array[cur+8]\r\n bfee.Ntx = array[cur+9]\r\n bfee.rssi_a = array[cur+10]\r\n bfee.rssi_b = array[cur+11]\r\n bfee.rssi_c = array[cur+12]\r\n bfee.noise = array[cur+13] - 256\r\n bfee.agc = array[cur+14]\r\n bfee.antenna_sel = array[cur+15]\r\n bfee.len = int.from_bytes(\r\n array[cur+16:cur+18], byteorder='little', signed=False)\r\n bfee.fake_rate_n_flags = int.from_bytes(\r\n array[cur+18:cur+20], byteorder='little', signed=False)\r\n calc_len = (\r\n 30 * (bfee.Nrx * bfee.Ntx * 8 * 2 + 3) + 6) / 8\r\n bfee.csi = np.zeros(\r\n shape=(30, bfee.Nrx, bfee.Ntx), dtype=np.dtype(np.complex))\r\n bfee.perm[0] = (bfee.antenna_sel) & 0x3\r\n bfee.perm[1] = (bfee.antenna_sel >> 2) & 0x3\r\n bfee.perm[2] = (bfee.antenna_sel >> 4) & 0x3\r\n cur += 20\r\n\r\n # get payload\r\n payload = array[cur:cur+bfee.len]\r\n cur += bfee.len\r\n\r\n # Check that length matches what it should\r\n if (bfee.len != calc_len):\r\n print(\"MIMOToolbox:read_bfee_new:size\",\r\n \"Wrong beamforming matrix size.\")\r\n\r\n # Compute CSI from all this crap\r\n try:\r\n index = 0\r\n for i in range(30):\r\n index += 3\r\n remainder = index % 8\r\n for j in range(bfee.Nrx):\r\n for k in range(bfee.Ntx):\r\n real_bin = bytes([(payload[int(index / 8)] >> remainder) | (\r\n payload[int(index/8+1)] << (8-remainder)) & 0b11111111])\r\n real = int.from_bytes(\r\n real_bin, byteorder='little', signed=True)\r\n imag_bin = bytes([(payload[int(index / 8+1)] >> remainder) | (\r\n payload[int(index/8+2)] << (8-remainder)) & 0b11111111])\r\n imag = int.from_bytes(\r\n imag_bin, byteorder='little', signed=True)\r\n tmp = np.complex(float(real), float(imag))\r\n bfee.csi[i, j, k] = tmp\r\n index += 16\r\n except:\r\n print(\"Illegal data occurred at the {}th bfee\".format(counter))\r\n \r\n\r\n # matrix does not contain default values\r\n if sum(bfee.perm) != triangle[bfee.Nrx-1]:\r\n print('WARN ONCE: Found CSI (', filename, ') with Nrx=',\r\n bfee.Nrx, ' and invalid perm=[', bfee.perm, ']\\n')\r\n else:\r\n temp_csi = np.zeros(\r\n bfee.csi.shape, dtype=np.dtype(np.complex))\r\n for r in range(bfee.Nrx):\r\n temp_csi[:, bfee.perm[r], :] = bfee.csi[:, r, :]\r\n bfee.csi = temp_csi\r\n\r\n res.append(bfee)\r\n counter += 1\r\n\r\n if timeCount:\r\n time_end = time.time()\r\n print(\"time costed during reading data:\", time_end - time_sta, \"s\")\r\n return res\r\n","sub_path":"Bfee.py","file_name":"Bfee.py","file_ext":"py","file_size_in_byte":8143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"492444169","text":"import os\r\nfrom merge import *\r\ndataset_path = './model/Hikvision'\r\n\r\nfileList = os.listdir(dataset_path)\r\n\r\n\r\ndef lack_hyphen(model):\r\n for alphabet in model:\r\n if alphabet == '-':\r\n return False\r\n return True\r\n\r\n\r\nprint(\"Cleaning... Hikvision\")\r\nfor file in fileList:\r\n [model, forma] = file.split('.')\r\n if (lack_hyphen(model) or model[0] == 'I') and model != 'others':\r\n os.remove(dataset_path+'/'+file)\r\n","sub_path":"Rule_Based_Nan/Filter/Hikvision.py","file_name":"Hikvision.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"143669424","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 22 12:09:42 2017\n\n@author: hamiri\n\"\"\"\n\n\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\nimport numpy as np\n#from matplotlib.backends.backend_pdf import PdfPages\n\nMvectorp2 = np.array([0,0,1])\nB = 2.5e-5\nT = 0.006\nBearth = np.array([0,0,B])\n\ngamma = 42577478.9229\n\n\nN = 100\nomega = 1.1*(gamma*B)\nOC = '1.1 *'\nq = '(f)'\ndef g(M,t):\n Mx= M[0]\n My= M[1]\n Mz= M[2] \n if t < 10*T:\n return np.cross((gamma*np.array([Mx, My, Mz])), Bearth)\n if 10*T <= t < 60*T:\n return np.cross((gamma*np.array([Mx, My, Mz])), np.array([(B/N) * np.sin(omega*t), 0, B]))\n if 60*T <= t <= 70*T:\n return np.cross((gamma*np.array([Mx, My, Mz])), Bearth)\n else:\n return np.array([0, 0, 0])\n\n\n\n\ninit = 0,0,1\nt = np.linspace(0,70*T, 1e4)\nsol = odeint(g, init, t)\n\n\n\nplt1 = plt.plot(t, sol[:,1])\nplt.xlabel(\"time (seconds)\")\nplt.ylabel(\"y-component of M (vector spin)\")\nplt.title(\"Precession of spin vector, N = %s, Omega = %s 2$\\pi$$\\omega$$\\gamma$\"%(N, OC))\n#plt.xlim(0.01,0.20)\nplt.savefig(\"%s y-t plane N = %s w = %s 2piwy.pdf\"%(q,N,OC))\n\nplt.show()\n\n#Plot on the x-y plane\n\nplt2 = plt.plot(t, sol[:,0])\nplt.xlabel(\"time (seconds)\")\nplt.ylabel(\"x-component of M (vector spin)\")\nplt.title(\"Precession of spin vector, N = %s, Omega = %s 2$\\pi$$\\omega$$\\gamma$\"%(N, OC))\nplt.savefig(\"%s x-t plane N = %s w = %s 2piwy.pdf\"%(q,N,OC))\nplt.show()\n#This is the x-x plane\n\nplt3 = plt.plot(t, sol[:,2])\nplt.xlabel(\"time (seconds)\")\nplt.ylabel(\"z-component of M (vector spin)\")\nplt.title(\"Precession of spin vector, N = %s, Omega = %s 2$\\pi$$\\omega$$\\gamma$\"%(N, OC))\nplt.savefig(\"%s z-t plane N = %s w = %s 2piwy.pdf\"%(q,N,OC))\nplt.show()\n\nx = sol[:,0]\ny = sol[:,1]\nz = sol[:,2]\n\ntheta = np.arccos(z/np.sqrt(x**2 + y**2 + z**2))\nphi = np.arctan2(y, x)\n\nplt4 = plt.plot(t, theta)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"$\\Theta$\")\nplt.title(\"Time vs Theta, N = %s Omega = %s 2$\\pi$$\\omega$$\\gamma$\"%(N,OC))\nplt.savefig(\"%s theta-time plane N = %s w = %s 2piwy.pdf\"%(q,N,OC))\nplt.show()\n\nplt5 = plt.plot(t, phi)\nplt.xlabel(\"Time(s)\")\nplt.ylabel(\"$\\phi$\")\nplt.title(\"Time vs Phi, N = %s, Omega = %s 2$\\pi$$\\omega$$\\gamma$\"%(N,OC))\nplt.savefig(\"%s phi-time plane N = %s w = %s 2piwy.pdf\"%(q,N,OC))\nplt.show()\n","sub_path":"sim2.py","file_name":"sim2.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"347913898","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests for `pyairfoil` package.\"\"\"\n\nimport pytest\n\nfrom click.testing import CliRunner\n\nfrom pyairfoil.pyairfoil import *\nfrom pyairfoil import cli\n\n\ndef test_airfoil_registration():\n \"\"\" Test airfoil registration \"\"\"\n new_airfoil = AirfoilPritchard()\n assert AirfoilPritchard.__name__ in AIRFOIL_TYPES.keys()\n\n\ndef test_command_line_interface():\n \"\"\"Test the CLI.\"\"\"\n runner = CliRunner()\n result = runner.invoke(cli.main)\n assert result.exit_code == 0\n assert 'pyairfoil.cli.main' in result.output\n help_result = runner.invoke(cli.main, ['--help'])\n assert help_result.exit_code == 0\n assert '--help Show this message and exit.' in help_result.output\n","sub_path":"tests/test_pyairfoil.py","file_name":"test_pyairfoil.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"461298138","text":"from __future__ import absolute_import, unicode_literals, division, print_function\nimport numpy as np\nfrom astropy.modeling.core import Model\nfrom astropy import units as u\nfrom . import model_base\n\nfrom .extension import BaseExtension\nfrom jwst.transforms.jwextension import JWSTExtension\nfrom gwcs.extension import GWCSExtension\n\n\njwst_extensions = [GWCSExtension(), JWSTExtension(), BaseExtension()]\n\n\n__all__ = ['DistortionModel', 'DistortionMRSModel', 'SpecwcsModel', 'RegionsModel',\n 'WavelengthrangeModel', 'CameraModel', 'CollimatorModel', 'OTEModel',\n 'FOREModel', \"FPAModel\", 'IFUPostModel', 'IFUFOREModel', 'IFUSlicerModel',\n 'MSAModel', 'FilteroffsetModel', 'DisperserModel']\n\n\nclass _SimpleModel(model_base.DataModel):\n \"\"\"\n A model for a reference file of type \"distortion\".\n \"\"\"\n schema_url = None\n reftype = None\n\n def __init__(self, init=None, model=None, input_units=None, output_units=None, **kwargs):\n\n super(_SimpleModel, self).__init__(init=init, **kwargs)\n if model is not None:\n self.model = model\n if input_units is not None:\n self.meta.input_units = input_units\n if output_units is not None:\n self.meta.output_units = output_units\n if init is None:\n try:\n self.populate_meta()\n except NotImplementedError:\n pass\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n self.meta.telescope = self.meta.telescope\n\n def populate_meta(self):\n \"\"\"\n Subclasses can overwrite this to populate specific meta keywords.\n \"\"\"\n raise NotImplementedError\n\n def to_fits(self):\n raise NotImplementedError(\"FITS format is not supported for this file.\")\n\n def validate(self):\n assert isinstance(self.model, Model)\n assert isinstance(self.meta.input_units, (str, u.NamedUnit))\n assert isinstance(self.meta.output_units, (str, u.NamedUnit))\n assert self.meta.instrument.name in [\"NIRCAM\", \"NIRSPEC\", \"MIRI\", \"TFI\", \"FGS\", \"NIRISS\"]\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass DistortionModel(_SimpleModel):\n \"\"\"\n A model for a reference file of type \"distortion\".\n \"\"\"\n schema_url = \"distortion.schema.yaml\"\n reftype = \"distortion\"\n\n def validate(self):\n super(DistortionModel, self).validate()\n if self.meta.instrument.name == 'NIRCAM':\n assert self.meta.instrument.module is not None\n assert self.meta.instrument.channel is not None\n assert self.meta.instrument.p_pupil is not None\n\n\nclass DistortionMRSModel(model_base.DataModel):\n \"\"\"\n A model for a reference file of type \"distortion\" for the MIRI MRS.\n \"\"\"\n schema_url = \"distortion_mrs.schema.yaml\"\n reftype = \"distortion\"\n\n def __init__(self, init=None, x_model=None, y_model=None, alpha_model=None, beta_model=None,\n bzero=None, bdel=None, input_units=None, output_units=None, **kwargs):\n\n super(DistortionMRSModel, self).__init__(init=init, **kwargs)\n\n if x_model is not None:\n self.x_model = x_model\n if y_model is not None:\n self.y_model = y_model\n if alpha_model is not None:\n self.alpha_model = alpha_model\n if beta_model is not None:\n self.beta_model = beta_model\n if bzero is not None:\n self.bzero = bzero\n if bdel is not None:\n self.bdel = bdel\n if input_units is not None:\n self.meta.input_units = input_units\n if output_units is not None:\n self.meta.output_units = output_units\n if init is None:\n try:\n self.populate_meta()\n except NotImplementedError:\n pass\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n self.meta.telescope = self.meta.telescope\n\n def populate_meta(self):\n self.meta.instrument.name = \"MIRI\"\n self.meta.exposure.type = \"MIR_MRS\"\n self.meta.input_units = u.pix\n self.meta.output_units = u.arcsec\n\n def to_fits(self):\n raise NotImplementedError(\"FITS format is not supported for this file.\")\n\n def validate(self):\n assert isinstance(self.meta.input_units, (str, u.NamedUnit))\n assert isinstance(self.meta.output_units, (str, u.NamedUnit))\n assert self.meta.instrument.name == \"MIRI\"\n assert self.meta.exposure.type == \"MIR_MRS\"\n assert self.meta.instrument.channel in (\"12\", \"34\", \"1\", \"2\", \"3\", \"4\")\n assert self.meta.instrument.band in (\"SHORT\", \"LONG\", \"MEDIUM\")\n assert self.meta.instrument.detector in (\"MIRIFUSHORT\", \"MIRIFULONG\")\n assert all([isinstance(m, Model) for m in self.x_model])\n assert all([isinstance(m, Model) for m in self.y_model])\n assert all([isinstance(m, Model) for m in self.alpha_model])\n assert all([isinstance(m, Model) for m in self.beta_model])\n assert len(self.abv2v3_model.model) == 2\n assert len(self.abv2v3_model.channel_band) == 2\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass SpecwcsModel(_SimpleModel):\n \"\"\"\n A model for a reference file of type \"specwcs\".\n \"\"\"\n schema_url = \"specwcs.schema.yaml\"\n reftype = \"specwcs\"\n\n def validate(self):\n assert isinstance(self.meta.input_units, (str, u.NamedUnit))\n assert isinstance(self.meta.output_units, (str, u.NamedUnit))\n assert self.meta.instrument.name in [\"NIRCAM\", \"NIRSPEC\", \"MIRI\", \"TFI\", \"FGS\", \"NIRISS\"]\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass RegionsModel(model_base.DataModel):\n \"\"\"\n A model for a reference file of type \"regions\".\n \"\"\"\n schema_url = \"regions.schema.yaml\"\n reftype = \"regions\"\n\n def __init__(self, init=None, regions=None, **kwargs):\n super(RegionsModel, self).__init__(init=init, **kwargs)\n if regions is not None:\n self.regions = regions\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n self.meta.telescope = self.meta.telescope\n\n def to_fits(self):\n raise NotImplementedError(\"FITS format is not supported for this file.\")\n\n def validate(self):\n assert isinstance(self.regions.copy(), np.ndarray)\n assert self.meta.instrument.name == \"MIRI\"\n assert self.meta.exposure.type == \"MIR_MRS\"\n assert self.meta.instrument.channel in (\"12\", \"34\", \"1\", \"2\", \"3\", \"4\")\n assert self.meta.instrument.band in (\"SHORT\", \"LONG\")\n assert self.meta.instrument.detector in (\"MIRIFUSHORT\", \"MIRIFULONG\")\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass WavelengthrangeModel(model_base.DataModel):\n \"\"\"\n A model for a reference file of type \"wavelenghrange\".\n \"\"\"\n schema_url = \"wavelengthrange.schema.yaml\"\n reftype = \"wavelengthrange\"\n\n def __init__(self, init=None, channels=None, wrange=None, order=None, wunits=None, **kwargs):\n\n super(WavelengthrangeModel, self).__init__(init=init, **kwargs)\n if channels is not None:\n self.channels = channels\n if wrange is not None:\n self.wavelengthrange = wrange\n if order is not None:\n self.order = order\n if wunits is not None:\n self.meta.wavelength_units = wunits\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n self.meta.telescope = self.meta.telescope\n\n def to_fits(self):\n raise NotImplementedError(\"FITS format is not supported for this file.\")\n\n def validate(self):\n assert self.meta.instrument.name in (\"MIRI\", \"NIRSPEC\")\n assert self.meta.exposure.type in (\"MIR_MRS\", \"NRS_AUTOFLAT\", \"NRS_AUTOWAVE\", \"NRS_BOTA\",\n \"NRS_BRIGHTOBJ\", \"NRS_CONFIRM\", \"NRS_DARK\", \"NRS_FIXEDSLIT\",\n \"NRS_FOCUS\", \"NRS_IFU\", \"NRS_IMAGE\", \"NRS_LAMP\", \"NRS_MIMF\",\n \"NRS_MSASPEC\", \"NRS_TACONFIRM\", \"NRS_TACQ\", \"NRS_TASLIT\", \"N/A\",\n \"ANY\")\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass FPAModel(model_base.DataModel):\n \"\"\"\n A model for a NIRSPEC reference file of type \"fpa\".\n \"\"\"\n schema_url = \"fpa.schema.yaml\"\n reftype = \"fpa\"\n\n def __init__(self, init=None, nrs1_model=None, nrs2_model=None, **kwargs):\n\n super(FPAModel, self).__init__(init=init, **kwargs)\n if nrs1_model is not None:\n self.nrs1_model = nrs1_model\n if nrs2_model is not None:\n self.nrs2_model = nrs2_model\n if init is None:\n self.populate_meta()\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n self.meta.telescope = self.meta.telescope\n\n def populate_meta(self):\n self.meta.instrument.name = \"NIRSPEC\"\n self.meta.instrument.p_detector = \"NRS1|NRS2|\"\n self.meta.exposure.p_exptype = \"NRS_TACQ|NRS_TASLIT|NRS_TACONFIRM|\\\n NRS_CONFIRM|NRS_FIXEDSLIT|NRS_IFU|NRS_MSASPEC|NRS_IMAGE|NRS_FOCUS|\\\n NRS_MIMF|NRS_BOTA|NRS_LAMP|NRS_BRIGHTOBJ|\"\n\n def to_fits(self):\n raise NotImplementedError(\"FITS format is not supported for this file.\")\n\n def validate(self):\n assert isinstance(self.nrs1_model, Model)\n assert isinstance(self.nrs2_model, Model)\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass IFUPostModel(model_base.DataModel):\n \"\"\"\n A model for a NIRSPEC reference file of type \"ifupost\".\n \"\"\"\n schema_url = \"ifupost.schema.yaml\"\n reftype = \"ifupost\"\n\n def __init__(self, init=None, models=None, **kwargs):\n\n super(IFUPostModel, self).__init__(init=init, **kwargs)\n if models is not None:\n if len(models) != 30:\n raise ValueError(\"Expected 30 slice models, got {0}\".format(len(models)))\n else:\n for i, m in enumerate(models):\n setattr(self, \"slice_{0]\".format(i), m)\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n self.meta.telescope = self.meta.telescope\n\n def populate_meta(self):\n self.meta.instrument.name = \"NIRSPEC\"\n self.meta.instrument.p_detector = \"NRS1|NRS2|\"\n self.meta.exposure.type = \"NRS_IFU\"\n\n def to_fits(self):\n raise NotImplementedError(\"FITS format is not supported for this file.\")\n\n def validate(self):\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass IFUSlicerModel(model_base.DataModel):\n \"\"\"\n A model for a NIRSPEC reference file of type \"ifuslicer\".\n \"\"\"\n schema_url = \"ifuslicer.schema.yaml\"\n reftype = \"ifuslicer\"\n\n def __init__(self, init=None, model=None, data=None, **kwargs):\n\n super(IFUSlicerModel, self).__init__(init=init, **kwargs)\n if model is not None:\n self.model = model\n if data is not None:\n seld.data = data\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n self.meta.telescope = self.meta.telescope\n\n def populate_meta(self):\n self.meta.instrument.name = \"NIRSPEC\"\n self.meta.instrument.p_detector = \"NRS1|NRS2|\"\n self.meta.exposure.type = \"NRS_IFU\"\n\n def to_fits(self):\n raise NotImplementedError(\"FITS format is not supported for this file.\")\n\n def validate(self):\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass MSAModel(model_base.DataModel):\n \"\"\"\n A model for a NIRSPEC reference file of type \"msa\".\n \"\"\"\n schema_url = \"msa.schema.yaml\"\n reftype = \"msa\"\n\n def __init__(self, init=None, models=None, data=None, **kwargs):\n super(MSAModel, self).__init__(init=init, **kwargs)\n if models is not None and data is not None:\n self.Q1 = {'model': models['Q1'], 'data': data['Q1']}\n self.Q2 = {'model': models['Q2'], 'data': data['Q2']}\n self.Q3 = {'model': models['Q3'], 'data': data['Q3']}\n self.Q4 = {'model': models['Q4'], 'data': data['Q4']}\n self.Q5 = {'model': models['Q5'], 'data': data['Q5']}\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n self.meta.telescope = self.meta.telescope\n\n def populate_meta(self):\n self.meta.instrument.name = \"NIRSPEC\"\n self.meta.instrument.p_detector = \"NRS1|NRS2|\"\n self.meta.exposure.p_exptype = \"NRS_TACQ|NRS_TASLIT|NRS_TACONFIRM|\\\n NRS_CONFIRM|NRS_FIXEDSLIT|NRS_IFU|NRS_MSASPEC|NRS_IMAGE|NRS_FOCUS|\\\n NRS_MIMF|NRS_BOTA|NRS_LAMP|NRS_BRIGHTOBJ|\"\n\n def to_fits(self):\n raise NotImplementedError(\"FITS format is not supported for this file.\")\n\n def validate(self):\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass DisperserModel(model_base.DataModel):\n \"\"\"\n A model for a NIRSPEC reference file of type \"disperser\".\n \"\"\"\n schema_url = \"disperser.schema.yaml\"\n reftype = \"disperser\"\n\n def __init__(self, init=None, angle=None, gwa_tiltx=None, gwa_tilty=None,\n kcoef=None, lcoef=None, tcoef=None, pref=None, tref=None,\n theta_x=None, theta_y=None,theta_z=None, tilt_x=None, tilt_y=None,\n **kwargs):\n super(DisperserModel, self).__init__(init=init, **kwargs)\n if angle is not None:\n self.angle = angle\n if gwa_tiltx is not None:\n gwa_tiltx = gwa_tiltx\n if gwa_tilty is not None:\n gwa_tilty = gwa_tilty\n if kcoef is not None:\n self.kcoef = kcoef\n if lcoef is not None:\n self.lcoef = lcoef\n if tcoef is not None:\n self.tcoef = tcoef\n if pref is not None:\n self.pref = pref\n if tref is not None:\n self.tref = tref\n if theta_x is not None:\n self.theta_x = theta_x\n if theta_y is not None:\n self.theta_y = theta_y\n if theta_z is not None:\n self.theta_z = theta_z\n if tilt_x is not None:\n self.tilt_x = tilt_x\n if tilt_y is not None:\n self.tilt_y = tilt_y\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n self.meta.telescope = self.meta.telescope\n\n def populate_meta(self):\n self.meta.instrument.name = \"NIRSPEC\"\n self.meta.instrument.p_detector = \"NRS1|NRS2|\"\n self.meta.exposure.p_exptype = \"NRS_TACQ|NRS_TASLIT|NRS_TACONFIRM|\\\n NRS_CONFIRM|NRS_FIXEDSLIT|NRS_IFU|NRS_MSASPEC|NRS_IMAGE|NRS_FOCUS|\\\n NRS_MIMF|NRS_BOTA|NRS_LAMP|NRS_BRIGHTOBJ|\"\n self.meta.instrument.p_grating = \"G140M|G235M|G395M|G140H|G235H|G395H|PRISM|MIRROR|\"\n\n def to_fits(self):\n raise NotImplementedError(\"FITS format is not supported for this file.\")\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n\n def validate(self):\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass FilteroffsetModel(model_base.DataModel):\n \"\"\"\n A model for a NIRSPEC reference file of type \"disperser\".\n \"\"\"\n schema_url = \"filteroffset.schema.yaml\"\n reftype = \"filteroffset\"\n\n def __init__(self, init=None, filters=None, **kwargs):\n super(FilteroffsetModel, self).__init__(init, **kwargs)\n if filters is not None:\n self.filters = filters\n\n def populate_meta(self):\n self.meta.instrument.name = \"MIRI\"\n self.meta.instrument.detector = \"MIRIMAGE\"\n self.meta.instrument.pfilter = \"F1130W|F1140C|F2300C|F2100W|F1800W|\\\n F1550C|F560W|F2550WR|FND|F2550W|F1500W|F1000W|F1065C|F770W|F1280W|\"\n\n def on_save(self, path=None):\n self.meta.reftype = self.reftype\n self.meta.telescope = self.meta.telescope\n\n def validate(self):\n assert self.meta.instrument.name == \"MIRI\"\n assert self.meta.instrument.detector == \"MIRIMAGE\"\n assert self.meta.description is not None\n assert self.meta.telescope is not None\n assert self.meta.reftype is not None\n assert self.meta.author is not None\n assert self.meta.pedigree is not None\n\n\nclass IFUFOREModel(_SimpleModel):\n \"\"\"\n A model for a NIRSPEC reference file of type \"ifufore\".\n \"\"\"\n schema_url = \"ifufore.schema.yaml\"\n reftype = \"ifufore\"\n\n def populate_meta(self):\n self.meta.instrument.name = \"NIRSPEC\"\n self.meta.instrument.p_detector = \"NRS1|NRS2|\"\n self.meta.exposure.type = \"NRS_IFU\"\n\n\nclass CameraModel(_SimpleModel):\n \"\"\"\n A model for a reference file of type \"camera\".\n \"\"\"\n schema_url = \"camera.schema.yaml\"\n reftype = 'camera'\n\n def populate_meta(self):\n self.meta.instrument.name = \"NIRSPEC\"\n self.meta.instrument.p_detector = \"NRS1|NRS2|\"\n self.meta.exposure.p_exptype = \"NRS_TACQ|NRS_TASLIT|NRS_TACONFIRM|\\\n NRS_CONFIRM|NRS_FIXEDSLIT|NRS_IFU|NRS_MSASPEC|NRS_IMAGE|NRS_FOCUS|\\\n NRS_MIMF|NRS_BOTA|NRS_LAMP|NRS_BRAIGHTOBJ|\"\n\n\nclass CollimatorModel(_SimpleModel):\n \"\"\"\n A model for a reference file of type \"collimator\".\n \"\"\"\n schema_url = \"collimator.schema.yaml\"\n reftype = 'collimator'\n\n def populate_meta(self):\n self.meta.instrument.name = \"NIRSPEC\"\n self.meta.instrument.p_detector = \"NRS1|NRS2|\"\n\n\nclass OTEModel(_SimpleModel):\n \"\"\"\n A model for a reference file of type \"ote\".\n \"\"\"\n schema_url = \"ote.schema.yaml\"\n reftype = 'ote'\n\n def populate_meta(self):\n self.meta.instrument.name = \"NIRSPEC\"\n self.meta.instrument.p_detector = \"NRS1|NRS2|\"\n\n\nclass FOREModel(_SimpleModel):\n \"\"\"\n A model for a reference file of type \"fore\".\n \"\"\"\n schema_url = \"fore.schema.yaml\"\n reftype = 'fore'\n\n def populate_meta(self):\n self.meta.instrument.name = \"NIRSPEC\"\n self.meta.instrument.p_detector = \"NRS1|NRS2|\"\n self.meta.instrument.p_filter = \"CLEAR|F070LP|F100LP|F110W|F140X|F170LP|F290LP|\"\n","sub_path":"jwst/datamodels/wcs_ref_models.py","file_name":"wcs_ref_models.py","file_ext":"py","file_size_in_byte":19714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"155752186","text":"\"\"\"Gerencia os infratores\"\"\"\n\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom django.conf import settings\n\n\nclass InfratorManager(models.Manager):\n def get_page(self, page, procurar):\n\n if procurar is not None and procurar != '':\n condutores = self.filter(Q(nome__icontains=procurar) |\n Q(cnh__icontains=procurar) |\n Q(cpf__icontains=procurar) |\n Q(rg__icontains=procurar))\n else:\n condutores = self.filter()\n\n condutores = condutores.order_by('nome')\n\n paginator = Paginator(condutores, settings.NR_REGISTROS_PAGINA)\n try:\n condutores_page = paginator.page(page)\n except:\n condutores_page = paginator.page(paginator.num_pages)\n\n return condutores_page\n","sub_path":"detransapp/manager/infrator.py","file_name":"infrator.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"203970601","text":"import random\nimport math\nimport config\n\nclass Individuale(object):\n def __init__(self):\n self.chrom = [random.randint(0,1) for _ in range(config.N)]\n self.fitness = 0.0\n self.capacity = 0.0\n \n def evaluate(self):\n self.fitness = 0.0\n self.capacity = 0.0\n for i in range(config.N):\n self.fitness += (self.chrom[i]) * config.ITEMS[i][1]\n self.capacity += (self.chrom[i]) * config.ITEMS[i][0]\n if self.capacity > config.B:\n self.fitness = 0.0\n \n def crossover(self, p1, p2):\n point = random.randint(0,config.N-2)\n for i in range(point):\n self.chrom[i] = p1.chrom[i]\n for i in range(point, config.N):\n self.chrom[i] = p2.chrom[i]\n\n def mutate(self):\n for i in range(config.N):\n if random.random() < config.MUTATE_PROB:\n self.chrom[i] = 1 - self.chrom[i]","sub_path":"sample-code/week1-GeneticAlgorithm/GA-KnapsackProblem/Individual.py","file_name":"Individual.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"177648648","text":"# -*- coding= utf-8 -*-\n\nfrom cat.models import Questionnaires, Questions, Subjects, ProjectAdminUser, QuizProject, Company, JobGroup\nfrom django.contrib.auth.models import User, Group\nimport numpy as np\nimport random\nfrom itertools import izip\nfrom guardian.shortcuts import assign_perm\n\nfor name in ('default',):\n q = Questionnaires.objects.db_manager(name).create(questionnaires='test',\n levels_count='3,3,3',\n questionnaires_type='brm')\n q1 = Questionnaires.objects.db_manager(name).create(questionnaires='test1',\n levels_count='4,4,4',\n questionnaires_type='brm')\n\n q2 = Questionnaires.objects.db_manager(name).create(questionnaires='test2',\n levels_count='4,4,4',\n questionnaires_type='grm')\n\n questions_text = ['test_%s' % i for i in range(300)]\n a = np.random.uniform(1, 3, 300)\n a.sort()\n b = np.random.normal(size=300)\n random_choices = ['1,0,0,0', '0,1,0,0', '0,0,1,0', '0,0,0,1']\n choices = [random.choice(random_choices) for i in range(300)]\n level = [1 for i in range(100)]\n level.extend([2 for i in range(100)])\n level.extend([3 for i in range(100)])\n all_set = izip(questions_text, a, b, choices, level)\n\n Questions.objects.db_manager(name).bulk_create(Questions(question=each[0],\n slop=each[1],\n threshold=each[2],\n choice_text=each[3],\n choice_value=each[3],\n level=each[4],\n questionnaires=q) for each in all_set)\n\n Questions.objects.db_manager(name).bulk_create(Questions(question=each[0],\n slop=each[1],\n threshold=each[2],\n choice_text=each[3],\n choice_value=each[3],\n level=each[4],\n questionnaires=q) for each in all_set)\n\n all_set = izip(questions_text, a, b, choices, level)\n\n Questions.objects.db_manager(name).bulk_create(Questions(question=each[0],\n slop=each[1],\n threshold=each[2],\n choice_text=each[3],\n choice_value=each[3],\n level=each[4],\n questionnaires=q1) for each in all_set)\n\n # =================================================================================================================\n b = np.array([])\n for i in range(50):\n b1 = np.random.uniform(-3, -2, size=1)\n b2 = np.random.uniform(-2.1, -1, size=1)\n b3 = np.random.uniform(-1.1, 0, size=1)\n b = np.append(b, np.array([[b1, b2, b3]]))\n\n for i in range(100):\n b1 = np.random.uniform(-2, -1, size=1)\n b2 = np.random.uniform(-1.1, 0, size=1)\n b3 = np.random.uniform(0.1, 1, size=1)\n b = np.append(b, np.array([[b1, b2, b3]]))\n\n for i in range(100):\n b1 = np.random.uniform(-1, 0, size=1)\n b2 = np.random.uniform(0.1, 1, size=1)\n b3 = np.random.uniform(1.1, 2, size=1)\n b = np.append(b, np.array([[b1, b2, b3]]))\n\n for i in range(50):\n b1 = np.random.uniform(0, 1, size=1)\n b2 = np.random.uniform(1.1, 2, size=1)\n b3 = np.random.uniform(2.1, 3, size=1)\n b = np.append(b, np.array([[b1, b2, b3]]))\n\n len(b)\n b.shape = 300, 3\n\n b = list(list(_) for _ in b)\n\n b = [','.join([str(__) for __ in _]) for _ in b]\n\n grm_random_choices = ['1,2,3,4', '2,1,3,4', '3,1,2,4', '4,1,2,3']\n\n grm_choices = [random.choice(grm_random_choices) for i in range(300)]\n\n all_set = izip(questions_text, a, b, grm_choices, level)\n\n Questions.objects.db_manager(name).bulk_create(Questions(question=each[0],\n slop=each[1],\n\n thresholds=each[2],\n choice_text=each[3],\n choice_value=each[3],\n level=each[4],\n questionnaires=q2,\n )\n for each in all_set)\n\n job_group_list = [JobGroup(name=u'销售'), JobGroup(name=u'研发'), JobGroup(name='会计')]\n for g in job_group_list:\n g.save()\n\n c = Company.objects.create(company_name=u'英特尔')\n\n p = QuizProject.objects.create(project_name='hello')\n\n assign_perm('cat.change_quizproject', c.companyadminuser.user, p)\n assign_perm('cat.view_quizproject', c.companyadminuser.user, p)\n\n for i in range(1, 11):\n u = Subjects.objects.db_manager(name).create(name='name%s' % i, project=p)\n\n\n g = JobGroup.objects.get(id=1)\n assign_perm('cat.do_questionnaire', g.group, q)\n\n\n","sub_path":"cat/create_model.py","file_name":"create_model.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154816407","text":"import cv2\nfrom cv2 import imread\nfrom keras.models import load_model\nfrom PIL import Image\nimport numpy as np\n\n\"\"\"\nLive Predict\nThis script loads a pre-trained CNN model and classifies American Sign Language\nfinger spelling from from live camera input with no filtering on the camera frame\nSignum: Software Design SP18 Final Project\nIsaac Vandor, Utsav Gupta, Diego Berny\n\"\"\"\n\n#model = load(trained_model='model.h5')\nmodel = load_model('models/model.h5')\n#result = predict(trained_model=model, test_image=frame)\n#'Data/OutputData/scaledoutput.jpg'\nalphabets = {\"A\": 0, \"B\":1, \"C\": 2, \"D\":3, \"E\": 4, \"F\": 5,\n \"G\": 6, \"H\": 7, \"I\": 8, \"J\": 9, \"K\": 10, \"L\": 11,\n \"M\": 12, \"N\": 13, \"O\": 14, \"P\": 15, \"Q\": 16, \"S\": 17,\n \"T\": 18, \"U\": 19, \"V\": 20, \"W\": 21, \"X\": 22, \"Y\": 23}\n\n#alphabet = find_alphabet(letter_list=result, letter_dict=alphabets)\n#print(\"The alphabet is: \" + alphabet)\n\ncapture_region_x=0.55 # roi x start point\ncapture_region_y=0.9 # roi y start point\nsize = 200\n\n# ====================== Live loop ======================\n# =======================================================\ncamera_input = int(input('Enter camera number: '))\nvideo_capture = cv2.VideoCapture(camera_input)\nvideo_capture.set(10,200)\n#print(\"Press b to capture background & begin detection or r to reset\")\n\nwhile video_capture.isOpened():\n # Capture frame-by-frame\n ret, frame = video_capture.read()\n frame = cv2.flip(frame, 1) # flip the frame horizontally\n cv2.rectangle(frame, (int(capture_region_x * frame.shape[1]), 0),\n (frame.shape[1], int(capture_region_y * frame.shape[0])), (255, 0, 0), 2)\n\n frame = cv2.resize(frame, (size, size))\n img = frame\n img = img.astype(np.float32)/255.0 # convert to float32\n img = img[:,:,::-1] # convert from RGB to BGR\n\n result = model.predict(np.expand_dims(frame, axis=0))[0]\n\n letter_list=result\n letter_dict = alphabets\n idx = letter_list.argmax(axis=0) # find the index of the biggest argument\n\n # look for the key corresponding to the biggest argument\n decoded = [key for key, value in letter_dict.items() if value == idx]\n alphabet = decoded[0]\n\n width = int(video_capture.get(3) + 0.25)\n height = int(video_capture.get(4) + 0.25)\n\n # Annotate image with most probable prediction\n cv2.putText(frame, text=alphabet,\n org=(width // 2 + 50, height // 2 + 50),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=10, color=(255, 255, 0),\n thickness=15, lineType=cv2.LINE_AA)\n\n result = predict(trained_model=model, test_image=frame)\n alphabet = find_alw4phabet(letter_list=result, letter_dict=alphabets)\n print(\"The alphabet is: \" + alphabet)\n\n # Display the resulting frame\n cv2.imshow('Video', frame)\n\n k = cv2.waitKey(10)\n if k == 27: # press ESC to exit\n break\n# Release the capture\nvideo_capture.release()\ncv2.destroyAllWindows()\n0","sub_path":"live_predict.py","file_name":"live_predict.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"237079962","text":"import random\nfrom tqdm import tqdm\nimport spacy\nimport ujson as json\nfrom collections import Counter\nimport numpy as np\nimport os.path\nimport argparse\nimport torch\n# import pickle\nimport torch\nimport os\nfrom joblib import Parallel, delayed\n\nimport torch\n\nnlp = spacy.blank(\"en\")\nfrom nltk.tag import StanfordNERTagger\nfrom nltk.tokenize import word_tokenize\nimport bisect\nimport re\nimport pandas as pd\nimport csv\n\n\ndef word_tokenize(sent):\n doc = nlp(sent)\n return [token.text for token in doc]\n\ndef find_tokenized_length(token_list):\n length = 0\n for token in token_list:\n if token not in [' ',' ',' ']:\n length+=1\n return length\ndef _process_article(article, config):\n paragraphs = article['context']\n # some articles in the fullwiki dev/test sets have zero paragraphs\n if len(paragraphs) == 0:\n paragraphs = [['some random title', 'some random stuff']]\n# st = StanfordNERTagger('/home/mwdgdx/stanford-ner-2018-10-16/classifiers/english.muc.7class.distsim.crf.ser.gz','/home/mwdgdx/stanford-ner-2018-10-16/stanford-ner.jar',encoding='utf-8')\n\n context_tokens = []\n offsets = []\n sent_map = [] # (sent_idx, is_sup_fact=True/False) \n sent_idx=0\n para_map = [] \n sup_list=[]\n def _process(sent_tokens, is_sup_fact, is_title=False):\n nonlocal context_tokens, offsets, sent_map,sent_idx, para_map, para_idx\n my_N_tokens = len(sent_tokens)\n context_tokens.extend(sent_tokens)\n sent_map += [(sent_idx,para_idx,is_sup_fact,sent_tokens[i]) for i in range(my_N_tokens) if sent_tokens[i] not in [' ',' ',' ']]\n sent_idx+=1 \n\n if 'supporting_facts' in article:\n sp_set = set(list(map(tuple, article['supporting_facts'])))\n else:\n sp_set = set()\n\n st = StanfordNERTagger('/home/mwdgdx/stanford-ner-2018-10-16/classifiers/english.muc.7class.distsim.crf.ser.gz','/home/mwdgdx/stanford-ner-2018-10-16/stanford-ner.jar',encoding='utf-8')\n token_list = word_tokenize(article['question'])\n ques_length = find_tokenized_length(token_list)\n for para_idx , para in enumerate(paragraphs):\n cur_title, cur_para = para[0], para[1]\n token_list += word_tokenize(cur_title)\n _process(word_tokenize(cur_title), False, is_title=True)\n c_p= [word_tokenize(sent) for sent in cur_para]\n for sent in cur_para:\n token_list +=word_tokenize(sent)\n for sent_id, sent in enumerate(c_p):\n is_sup_fact = (cur_title, sent_id) in sp_set\n if is_sup_fact:\n sup_list +=[sent_idx]\n _process(sent, is_sup_fact)\n# ori_list = token_list\n token_list = st.tag(token_list)\n ques_tokens= token_list[:ques_length]\n context_tokens = token_list[ques_length:]\n# print(len(sent_map), len(context_tokens))\n# testing NER Efficiency \n# qi = pd.DataFrame({'q+c': [i for i in ori_list if i!=' ' and i !=' ' and i !='']})\n# qi.to_csv('NER seperate'+article['_id']+'.csv', header=True,index=False, sep=',')\n# qi = pd.DataFrame({'q+c': [e[0] for e in token_list ]})\n# qi.to_csv('NER token_list'+article['_id']+'.csv', header=True,index=False, sep=',')\n example = {'context_tokens': context_tokens, 'ques_tokens': ques_tokens, 'id': article['_id'], 'sent_map': sent_map, 'sup': sup_list}\n return example\n\ndef process_file(filename, config, word_counter=None, char_counter=None):\n data = json.load(open(filename, 'r'))\n\n examples = []\n data=data[:]\n# 先看10篇\n outputs = Parallel(n_jobs=12, verbose=10)(delayed(_process_article)(article, config) for article in data)\n # outputs = [_process_article(article, config) for article in data]\n examples = [e for e in outputs]\n\n print(\"{} questions in total\".format(len(examples)))\n\n return examples\n\n\n\n\n# 记录:\n# node is_support degree type\ndef graph_process(text, ques, sent_map,sup_list,art_idx):\n print(\"length of text and sent_map\",len(text), len(sent_map))\n if len(text)!=len(sent_map):\n print(\"error\")\n return 1\n \n sup_set=set()\n nodes=set()\n q_s=set()\n for e in ques:\n if e[1]!='O':\n q_s.add(e[0].lower())\n# 大小写\n w_m={}\n s_m={}\n# qp_m={}\n# tp_m={}\n# nodes is the index of each entity\n# first filled with entities in question\n for i in range(len(text)):\n if text[i][1]!='O':\n word=text[i][0].lower()\n for q_w in q_s:\n if (word in q_w) or (q_w in word):\n nodes.add(i)\n# add next sentence\n nextSentIdx=i+1\n while nextSentIdx< len(sent_map) and sent_map[nextSentIdx][0]<=(sent_map[i][0]+1):\n if text[nextSentIdx] in {\"she\",\"it\",\"they\",\"he\",\"their\",\"her\",\"his\",\"its\",\"him\",\"them\"}:\n s_m.add(sent_map[nextSentIdx][0])\n break\n nextSentIdx+=1\n# add word in word_map\n if q_w in w_m:\n w_m[q_w].add(i)\n else:\n w_m[q_w]={i}\n# add sent in sent_map\n if sent_map[i][0] in s_m:\n s_m[sent_map[i][0]].add(i)\n else:\n s_m[sent_map[i][0]]={i}\n# if sent_map[i][1] in qp_m:\n# qp_m[sent_map[i][1]].add(i)\n# else:\n# qp_m[sent_map[i][1]]={i}\n break\n is_changed=True\n while is_changed:\n is_changed =False\n for i in range(len(text)):\n if ((text[i][1]!='O') and (i not in nodes)):\n word=text[i][0].lower()\n for q_w in w_m:\n if (word in q_w) or (q_w in word):\n nodes.add(i)\n nextSentIdx=i+1\n while nextSentIdx< len(sent_map) and sent_map[nextSentIdx][0]<=(sent_map[i][0]+1):\n if text[nextSentIdx] in {\"she\",\"it\",\"they\",\"he\",\"their\",\"her\",\"his\",\"its\",\"him\",\"them\"}:\n s_m.add(sent_map[nextSentIdx][0])\n break\n nextSentIdx+=1\n is_changed = True\n if q_w in w_m:\n w_m[q_w].add(i)\n else:\n w_m[q_w]={i}\n if sent_map[i][0] in s_m:\n s_m[sent_map[i][0]].add(i)\n else:\n s_m[sent_map[i][0]]={i} \n break\n if sent_map[i][0] in s_m:\n nodes.add(i)\n nextSentIdx=i+1\n while nextSentIdx< len(sent_map) and sent_map[nextSentIdx][0]<=(sent_map[i][0]+1):\n if text[nextSentIdx] in {\"she\",\"it\",\"they\",\"he\",\"their\",\"her\",\"his\",\"its\",\"him\",\"them\"}:\n s_m.add(sent_map[nextSentIdx][0])\n break\n nextSentIdx+=1\n is_changed = True\n# 这里有可能会列入不一样的列表\n if word in w_m:\n w_m[word].add(i)\n else:\n w_m[word]={i}\n if sent_map[i][0] in s_m:\n s_m[sent_map[i][0]].add(i)\n else:\n s_m[sent_map[i][0]]={i} \n\n # if sent_map[i][1] in qp_m:\n# if sent_map[i][1] in tp_m:\n# tp_m[sent_map[i][1]]+=1\n# else:\n# tp_m[sent_map[i][1]]= 1\n n_info={}\n is_node = [0 for e in text]\n for node in nodes: \n n_info[node]=[0,int(sent_map[node][2] == True)]\n is_node[node]=1\n if sent_map[node][2] == True:\n sup_set.add(sent_map[node][0])\n for w_s in w_m.values():\n for w in w_s:\n n_info[w][0]+=len(w_s)\n \n for s_s in s_m.values():\n for w in s_s:\n n_info[w][0]+=len(s_s)\n print(art_idx, len(sup_set),len(sup_list))\n# for key, value in qp_m.items():\n# for w in value:\n# if key in tp_m:\n# n_info[w][0]+=tp_m[key]\n# edge= [value[0] for value in n_info.values()]\n# is_sup = [value[1] for value in n_info.values()]\n# gi = pd.DataFrame({'text':[e for e in text], 'is_sup': [i[2] for i in sent_map], 'is_node':is_node})\n# gi.to_csv('graph_info'+str(art_idx)+'.csv', header=False,index=False, sep=',')\n# qi = pd.DataFrame({'question': [e for e in ques]})\n# qi.to_csv('ques_info'+str(art_idx)+'.csv', header=False,index=False, sep=',')\n return int(len(sup_set) != len(sup_list))\n\ndef prepro(config):\n# word_counter, char_counter = Counter(), Counter()\n# examples = process_file(config.data_file, config, word_counter, char_counter)\n# torch.save(examples, '/scratch/qmei_fluxg/mwdgdx/graph_process_371.pkl') \n examples = torch.load('/scratch/qmei_fluxg/mwdgdx/graph_process.pkl')\n missed =0\n for i, example in enumerate(examples):\n text = example['context_tokens']\n ques = example['ques_tokens']\n sent_map = example['sent_map']\n sup_list = example['sup']\n missed += graph_process(text, ques,sent_map,sup_list, i)\n print(\"missed\" , missed, \"from\", len(examples))\n print (\"percentile\", missed/len(examples))\n\n","sub_path":"process_graph.py","file_name":"process_graph.py","file_ext":"py","file_size_in_byte":9595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"637921729","text":"from django.urls import path, include\n\nfrom . import views\nfrom django.contrib import admin\nfrom survey.dash_apps.finished_apps import main_dashboard\n\nurlpatterns = [\n # Doesn't do anything now\n path('', views.index, name='index'),\n\n path('login/', views.login, name='login'),\n path('logout/', views.logout, name='logout'),\n path('user/', views.user, name='user'),\n path('register/', views.register, name='register'),\n\n path('resources/', views.resources, name='resources'),\n path('admin/', admin.site.urls),\n\n #for dashboard\n path('django_plotly_dash/', include('django_plotly_dash.urls')),\n\n # Individual Observations\n path('observation_ind_new/', views.observation_ind_new, name='observation_ind_new'),\n path('observation_ind_detail//', views.observation_ind_detail, name='observation_ind_detail'),\n\n # General Observations\n path('observation_new/', views.general_observation, name='general_observation'),\n path('observation_detail//', views.observation_detail, name='observation_detail'),\n\n\n # dashboard for graphs\n path('dashboard/', views.dashboard, name=\"dashboard\"),\n\n # Survey Individual\n path('survey_ind_detail//', views.survey_ind_detail, name='survey_ind_detail'),\n path('survey_new_ind/', views.survey_individual, name='survey_individual'),\n path('survey_ind_extra_detail///', views.survey_ind_extra_detail, name='survey_ind_extra_detail'),\n\n # Survey\n path('survey_detail//', views.survey_detail, name='survey_detail'),\n path('survey_new/', views.survey_new, name='survey_new'),\n\n]","sub_path":"survey/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"529521006","text":"#!/usr/bin/env/ python3\n\nimport random\nF = open('wordlist.txt')\nwords = F.readlines()\nF.close()\nwhile True:\n word = words[random.randrange(len(words))]\n while len(word) > 5 or len(word) == 0:\n word = words[random.randrange(0, len(words))]\n word = word.rstrip()\n old_word = word\n word = list(word)\n while word:\n print(word.pop(random.randrange(len(word))), end = ' ')\n print('\\nType your answer')\n match_word = input()\n new_word = match_word + '\\n'\n if new_word in words and set(match_word) == set(old_word):\n print('You win.')\n else:\n print('The answer is ' + old_word)\n","sub_path":"assignments/ass1/example-code/jumble.py","file_name":"jumble.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"90602625","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nobjects = {}\norbits = {}\ndistance = {}\noutputDataFileAJ281 = np.zeros(24*3).reshape(24,3)\ncount1 = 0\ncount2 = 1\ncount3 = 2\nfor i in range(1,9):\n print(i)\n PATH = './USQHPCResultsAndPrograms/4.5Gyr_180000OutputModifiedTest/AJ281'+str(i)+'.csv'\n objects[\"df{0}\".format(i)] = pd.read_csv(PATH)\n distance[\"AJ281{0}\".format(i)] = np.zeros(len(objects['df'+str(i)]))\n for j in range(len(distance[\"AJ281\"+str(i)])):\n distance[\"AJ281\"+str(i)][j] = math.sqrt((math.pow(objects['df'+str(i)].values[j,0],2))+(math.pow(objects['df'+str(i)].values[j,1],2))+(math.pow(objects['df'+str(i)].values[j,2],2)))\n\nprint(distance['AJ2811'])\nprint('YES')\nprint(distance['AJ2812'])\n\nPATH1 = './USQHPCResultsAndPrograms/FINALGGDATA/jupiterXYZAJ281.csv'\nPATH2 = './USQHPCResultsAndPrograms/FINALGGDATA/saturnXYZAJ281.csv'\nPATH3 = './USQHPCResultsAndPrograms/FINALGGDATA/uranusXYZAJ281.csv'\nPATH4 = './USQHPCResultsAndPrograms/FINALGGDATA/neptuneXYZAJ281.csv'\ndf1 = pd.read_csv(PATH1)\ndf2 = pd.read_csv(PATH2)\ndf3 = pd.read_csv(PATH3)\ndf4 = pd.read_csv(PATH4)\n\nprint('YES GG READ')\nprint('YES GG READ')\n\njupiterDistanceAJ281 = np.zeros(len(df1))\nsaturnDistanceAJ281 = np.zeros(len(df2))\nuranusDistanceAJ281 = np.zeros(len(df3))\nneptuneDistanceAJ281 = np.zeros(len(df4))\n\nprint('READ GG ARRAY')\nprint('READ GG ARRAY')\n\nAJ2811Distance = np.zeros(len(distance['AJ2811']))\nAJ2812Distance = np.zeros(len(distance['AJ2812']))\nAJ2813Distance = np.zeros(len(distance['AJ2813']))\nAJ2814Distance = np.zeros(len(distance['AJ2814']))\nAJ2815Distance = np.zeros(len(distance['AJ2815']))\nAJ2816Distance = np.zeros(len(distance['AJ2816']))\nAJ2817Distance = np.zeros(len(distance['AJ2817']))\nAJ2818Distance = np.zeros(len(distance['AJ2818']))\n\nprint('Dictionary items read into arrays')\nprint('Dictionary items read into arrays')\n\nfor i in range(len(df1)):\n jupiterDistanceAJ281[i] = math.sqrt((math.pow(df1.values[i,0],2))+(math.pow(df1.values[i,1],2))+(math.pow(df1.values[i,2],2)))\n saturnDistanceAJ281[i] = math.sqrt((math.pow(df2.values[i,0],2))+(math.pow(df2.values[i,1],2))+(math.pow(df2.values[i,2],2)))\n uranusDistanceAJ281[i] = math.sqrt((math.pow(df3.values[i,0],2))+(math.pow(df3.values[i,1],2))+(math.pow(df3.values[i,2],2)))\n neptuneDistanceAJ281[i] = math.sqrt((math.pow(df4.values[i,0],2))+(math.pow(df4.values[i,1],2))+(math.pow(df4.values[i,2],2)))\n\nprint('GG distance read into arrays')\nprint('GG distance read into arrays')\n\nfinalGGData = np.zeros(180000*4).reshape(180000,4)\nfor i in range(len(finalGGData)):\n finalGGData[i,0] = jupiterDistanceAJ281[i]\n finalGGData[i,1] = saturnDistanceAJ281[i]\n finalGGData[i,2] = uranusDistanceAJ281[i]\n finalGGData[i,3] = neptuneDistanceAJ281[i]\n\nprint('Final GG array made')\nprint('Final GG array made')\n\nfor i in range(len(distance['AJ2811'])):\n AJ2811Distance[i] = distance['AJ2811'][i]\n \nfor i in range(len(distance['AJ2812'])):\n AJ2812Distance[i] = distance['AJ2812'][i]\n\nfor i in range(len(distance['AJ2813'])):\n AJ2813Distance[i] = distance['AJ2813'][i]\n\nfor i in range(len(distance['AJ2814'])):\n AJ2814Distance[i] = distance['AJ2814'][i]\n\nfor i in range(len(distance['AJ2815'])):\n AJ2815Distance[i] = distance['AJ2815'][i]\n\nfor i in range(len(distance['AJ2816'])):\n AJ2816Distance[i] = distance['AJ2816'][i]\n\nfor i in range(len(distance['AJ2817'])):\n AJ2817Distance[i] = distance['AJ2817'][i]\n \nfor i in range(len(distance['AJ2818'])):\n AJ2818Distance[i] = distance['AJ2818'][i]\n\nprint('Clones added to their respective arrays')\nprint('Clones added to their respective arrays')\n\nnp.savetxt(\"AJ2811Distance.csv\", AJ2811Distance, delimiter=\",\")\ndf = pd.read_csv('AJ2811Distance.csv', header=None)\ndf.rename(columns={0: 'Distance'}, inplace=True)\ndf.to_csv('AJ2811Distance.csv', index=False) \n\nnp.savetxt(\"AJ2812Distance.csv\", AJ2812Distance, delimiter=\",\")\ndf = pd.read_csv('AJ2812Distance.csv', header=None)\ndf.rename(columns={0: 'Distance'}, inplace=True)\ndf.to_csv('AJ2812Distance.csv', index=False)\n\nnp.savetxt(\"AJ2813Distance.csv\", AJ2813Distance, delimiter=\",\")\ndf = pd.read_csv('AJ2813Distance.csv', header=None)\ndf.rename(columns={0: 'Distance'}, inplace=True)\ndf.to_csv('AJ2813Distance.csv', index=False)\n\nnp.savetxt(\"AJ2814Distance.csv\", AJ2814Distance, delimiter=\",\")\ndf = pd.read_csv('AJ2814Distance.csv', header=None)\ndf.rename(columns={0: 'Distance'}, inplace=True)\ndf.to_csv('AJ2814Distance.csv', index=False)\n\nnp.savetxt(\"AJ2815Distance.csv\", AJ2815Distance, delimiter=\",\")\ndf = pd.read_csv('AJ2815Distance.csv', header=None)\ndf.rename(columns={0: 'Distance'}, inplace=True)\ndf.to_csv('AJ2815Distance.csv', index=False)\n\nnp.savetxt(\"AJ2816Distance.csv\", AJ2816Distance, delimiter=\",\")\ndf = pd.read_csv('AJ2816Distance.csv', header=None)\ndf.rename(columns={0: 'Distance'}, inplace=True)\ndf.to_csv('AJ2816Distance.csv', index=False)\n\nnp.savetxt(\"AJ2817Distance.csv\", AJ2817Distance, delimiter=\",\")\ndf = pd.read_csv('AJ2817Distance.csv', header=None)\ndf.rename(columns={0: 'Distance'}, inplace=True)\ndf.to_csv('AJ2817Distance.csv', index=False)\n\nnp.savetxt(\"AJ2818Distance.csv\", AJ2818Distance, delimiter=\",\")\ndf = pd.read_csv('AJ2818Distance.csv', header=None)\ndf.rename(columns={0: 'Distance'}, inplace=True)\ndf.to_csv('AJ2818Distance.csv', index=False)\n\nnp.savetxt(\"FinalGGDataAJ281.csv\", finalGGData, delimiter=\",\")\ndf = pd.read_csv('FinalGGDataAJ281.csv', header=None)\ndf.rename(columns={0: 'Jupiter', 1: 'Saturn', 2: 'Uranus', 3: 'Neptune'}, inplace=True)\ndf.to_csv('FinalGGDataAJ281.csv', index=False) \nprint('Complete 1')\nprint('Complete 1')\n\n","sub_path":"FinalPrograms/AJ281Distance_0.py","file_name":"AJ281Distance_0.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"209637000","text":"class Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n needleLen, haystackLen = len(needle), len(haystack)\n if needleLen == 0:\n return 0\n if needleLen > haystackLen:\n return -1\n base, modulus = 26, 2 ** 32\n \n def charToInt(c):\n return ord(c) - ord('a')\n \n haystackHash = needleHash = 0\n for i in range(needleLen):\n needleHash = (needleHash * base + charToInt(needle[i])) % modulus\n haystackHash = (haystackHash * base + charToInt(haystack[i])) % modulus\n if needleHash == haystackHash:\n return 0\n baseL = pow(base, needleLen, modulus)\n for i in range(1, haystackLen - needleLen + 1):\n haystackHash = ((haystackHash * base) - (charToInt(haystack[i - 1]) * baseL) + charToInt(haystack[i + needleLen - 1])) % modulus\n if needleHash == haystackHash:\n return i\n return -1","sub_path":"LeetCode/Implement strStr().py","file_name":"Implement strStr().py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"561215796","text":"# You're given strings J representing the types of stones that are jewels, and S representing the stones you have. Each character in S is a type of stone you have. You want to know how many of the stones you have are also jewels.\n\n# The letters in J are guaranteed distinct, and all characters in J and S are letters. Letters are case sensitive, so \"a\" is considered a different type of stone from \"A\".\n\n# Example 1:\n\n# Input: J = \"aA\", S = \"aAAbbbb\"\n# Output: 3\n# Example 2:\n\n# Input: J = \"z\", S = \"ZZ\"\n# Output: 0\n# Note:\n\n# S and J will consist of letters and have length at most 50.\n# The characters in J are distinct.\n\n# Answers\n\nclass Solution:\n\n # Sol1\n# def numJewelsInStones(self, J: str, S: str) -> int:\n# print(J)\n# print(S)\n# j = list(J)\n# s = list(S)\n# print(j)\n# print(s)\n \n# counter = 0 \n# for i in s:\n# if i in j:\n# counter+=1\n# return counter\n\n # Sol 2\n \n def numJewelsInStones(self, J: str, S: str) -> int:\n stones = defaultdict(int)\n\n for s in S:\n stones[s] += 1\n\n return sum([stones.get(j, 0) for j in J])","sub_path":"771. Jewels and Stones.py","file_name":"771. Jewels and Stones.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"159667166","text":"\"\"\"\n@ProjectName: DXY-2019-nCoV-Crawler\n@FileName: script.py\n@Author: Jiabao Lin\n@Date: 2020/1/31\n\"\"\"\nfrom git import Repo\nfrom pymongo import MongoClient\nimport os\nimport time\nimport logging\nimport datetime\nimport pandas as pd\n\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\nuri = '**Confidential**'\nclient = MongoClient(uri)\ndb = client['2019-nCoV']\n\ncollections = ('DXYOverall', 'DXYArea', 'DXYNews', 'DXYRumors')\ntime_types = ('pubDate', 'createTime', 'modifyTime', 'dataInfoTime', 'crawlTime', 'updateTime')\n\n\ndef git_manager(changed_files):\n repo = Repo(path=os.path.split(os.path.realpath(__file__))[0])\n repo.index.add(changed_files)\n repo.index.commit(message='{datetime} - Change detected!'.format(datetime=datetime.datetime.now()))\n origin = repo.remote('origin')\n origin.push()\n logger.info('Pushing to GitHub successfully!')\n\n\nclass DB:\n def __init__(self):\n self.db = db\n\n def count(self, collection):\n return self.db[collection].count_documents(filter={})\n\n def dump(self, collection):\n return self.db[collection].aggregate(\n pipeline=[\n {\n '$sort': {\n 'updateTime': -1,\n 'crawlTime': -1\n }\n }\n ]\n )\n\n\nclass Listener:\n def __init__(self):\n self.db = DB()\n self.counter = dict()\n\n def run(self):\n while True:\n self.listener()\n time.sleep(3600)\n\n def listener(self):\n changed_files = list()\n for collection in collections:\n if not self.counter.get(collection, None):\n self.counter[collection] = self.db.count(collection=collection)\n else:\n if self.counter[collection] != self.db.count(collection=collection):\n self.dumper(collection=collection)\n changed_files.append(collection + '.csv')\n self.counter[collection] = self.db.count(collection=collection)\n logger.info('{collection} updated!'.format(collection=collection))\n if changed_files:\n git_manager(changed_files=changed_files)\n\n def dumper(self, collection):\n if collection == 'DXYArea':\n structured_results = list()\n results = self.db.dump(collection=collection)\n for province_dict in results:\n if province_dict.get('cities', None):\n for city_counter in range(len(province_dict['cities'])):\n city_dict = province_dict['cities'][city_counter]\n result = dict()\n result['provinceName'] = province_dict['provinceName']\n result['cityName'] = city_dict['cityName']\n\n result['province_confirmedCount'] = province_dict['confirmedCount']\n result['province_suspectedCount'] = province_dict['suspectedCount']\n result['province_curedCount'] = province_dict['curedCount']\n result['province_deadCount'] = province_dict['deadCount']\n\n result['city_confirmedCount'] = city_dict['confirmedCount']\n result['city_suspectedCount'] = city_dict['suspectedCount']\n result['city_curedCount'] = city_dict['curedCount']\n result['city_deadCount'] = city_dict['deadCount']\n\n result['updateTime'] = datetime.datetime.fromtimestamp(province_dict['updateTime']/1000)\n\n structured_results.append(result)\n df = pd.DataFrame(structured_results)\n df.to_csv(\n path_or_buf=os.path.join(\n os.path.split(os.path.realpath(__file__))[0], collection + '.csv'),\n index=False, encoding='utf_8_sig'\n )\n else:\n df = pd.DataFrame(data=self.db.dump(collection=collection))\n for time_type in time_types:\n if time_type in df.columns:\n df[time_type] = df[time_type].apply(lambda x: datetime.datetime.fromtimestamp(x / 1000) if not pd.isna(x) else '')\n df.to_csv(\n path_or_buf=os.path.join(\n os.path.split(os.path.realpath(__file__))[0], collection + '.csv'),\n index=False, encoding='utf_8_sig'\n )\n\n\nif __name__ == '__main__':\n listener = Listener()\n listener.run()\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"135013533","text":"#! /usr/bin/env python\nimport rospy\nimport actionlib\n\nfrom riptide_msgs.msg import DepthCommand, AttitudeCommand, LinearCommand, Imu, Object, Depth\nfrom std_msgs.msg import String, Int32, Float32, Float64\nfrom darknet_ros_msgs.msg import BoundingBoxes\nimport riptide_controllers.msg\n\nimport time\n\nclass WaitAction(object):\n\n def __init__(self):\n self._as = actionlib.SimpleActionServer(\n \"wait\", riptide_controllers.msg.WaitAction, execute_cb=self.execute_cb, auto_start=False)\n self._as.start()\n\n def execute_cb(self, goal):\n rospy.loginfo(\"Waiting for object %s\", goal.object)\n # Wait until you see the object a few times\n count = 0\n lastTime = time.time()\n while count < goal.times:\n boxes = rospy.wait_for_message(\"/state/bboxes\", BoundingBoxes)\n for a in boxes.bounding_boxes:\n if a.Class == goal.object:\n if (time.time() - lastTime) < 0.3:\n count += 1\n else:\n count = 0\n lastTime = time.time()\n if self._as.is_preempt_requested():\n rospy.loginfo('Preempted Wait')\n self._as.set_preempted()\n return\n\n rospy.loginfo(\"Found object %s\", goal.object)\n self._as.set_succeeded()\n\nif __name__ == '__main__':\n rospy.init_node('wait')\n server = WaitAction()\n rospy.spin()\n","sub_path":"riptide_controllers/action/wait.py","file_name":"wait.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"23717024","text":"import unittest\nfrom Wpp.WppCore import WppCore\nfrom Wpp.WppModule import WppModule\nfrom Wpp.Context import Context\nfrom out.OutContextMemoryStream import OutContextMemoryStream\nfrom core.ErrorTaxon import ErrorTaxon\n\nclass TestWppModule(unittest.TestCase):\n\tdef testCreateInstance(self):\n\t\tcore = WppCore.createInstance()\n\t\tself.assertEqual(core.getDebugStr(), 'WppCore')\n\t\tmodule = core.creator('module')()\n\t\tself.assertEqual(module.type, 'module')\n\n\tdef testCreateMemModule(self):\n\t\tsource = ''\n\t\tmodule = WppCore.createMemModule(source, 'myModule.mem')\n\t\tself.assertEqual(module.type, 'module')\n\n\tdef testComments(self):\n\t\tsource = \"\"\"\n# Hello!\n# Module description\n\"\"\"\n\t\tmodule = WppCore.createMemModule(source, 'myModule.mem')\n\t\tctx = OutContextMemoryStream()\n\t\tmodule.export(ctx)\n\t\tself.assertEqual(str(ctx), source.strip())\n\n\tdef testDuplicate(self):\n\t\tsource = \"\"\"\nvar public abcd: double = 123\nfunc public abcd: double\n\treturn 123\n\"\"\"\n\t\twith self.assertRaises(ErrorTaxon) as cm:\n\t\t\tmodule = WppCore.createMemModule(source, 'dup.mem')\n\t\tself.assertEqual(cm.exception.args[0], 'Duplicate identifier \"abcd\"')\n\n","sub_path":"src3/Wpp/tests/testWppModule.py","file_name":"testWppModule.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"551920890","text":"\"\"\"\r\n@author: J.W.Spaak\r\nNumerically compute ND and FD for a model\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy.optimize import brentq, fsolve\r\nfrom warnings import warn\r\n\r\ndef NFD_model(f, n_spec = 2, args = (), monotone_f = True, pars = None,\r\n experimental = False, from_R = False, xtol = 1e-5,\r\n estimate_N_star_mono = False):\r\n \"\"\"Compute the ND and FD for a differential equation f\r\n \r\n Compute the niche difference (ND), niche overlapp (NO), \r\n fitnes difference(FD) and conversion factors (c)\r\n \r\n Parameters\r\n -----------\r\n f : callable ``f(N, *args)``\r\n Percapita growth rate of the species.\r\n 1/N dN/dt = f(N)\r\n \r\n n_spec : int, optional, default = 2\r\n number of species in the system\r\n args : tuple, optional\r\n Any extra arguments to `f`\r\n monotone_f : boolean or array of booleans (lenght: n_spec), default = True\r\n Whether ``f_i(N_i,0)`` is monotonly decreasing in ``N_i``\r\n Can be specified for each function separatly by passing an array.\r\n pars : dict, default {}\r\n A dictionary to pass arguments to help numerical solvers.\r\n The entries of this dictionary might be changed during the computation\r\n \r\n ``N_star`` : ndarray (shape = (n_spec, n_spec))\r\n N_star[i] starting guess for equilibrium density with species `i`\r\n absent. N_star[i,i] is set to 0 \r\n ``r_i`` : ndarray (shape = n_spec)\r\n invsaion growth rates of the species\r\n ``c`` : ndarray (shape = (n_spec, n_spec))\r\n Starting guess for the conversion factors from one species to the\r\n other. `c` is assumed to be symmetric an only the uper triangular\r\n values are relevant\r\n experimental: boolean, default False\r\n Automatically set to True when used in combination with data of\r\n experiments. Do not set this to True manually! \r\n from_R: boolean, default False\r\n Set to True if function is called via R by reticulate package.\r\n Converts types of f and equilibria.\r\n xtol: float, default 1e-10\r\n Precision requirement of solving\r\n estimate_N_star_mono: boolean, default False\r\n If True, then N_star[i,j] will be estimated with monoculture \r\n equilibrium density of species j.\r\n Setting to True will potentially reduce speed, but result in more\r\n robust behaviour.\r\n Can only be used if ``f`` is monotone, i.e. monotone_f == True\r\n \r\n Returns\r\n -------\r\n pars : dict\r\n A dictionary with the following keys: \r\n \r\n ``N_star`` : ndarray (shape = (n_spec, n_spec))\r\n N_star[i] equilibrium density with species `i`\r\n absent. N_star[i,i] is 0\r\n ``r_i`` : ndarray (shape = n_spec)\r\n invasion growth rates of the species\r\n ``c`` : ndarray (shape = (n_spec, n_spec))\r\n The conversion factors from one species to the\r\n other. \r\n ``ND`` : ndarray (shape = n_spec)\r\n Niche difference of the species to the other species\r\n ND = (r_i - eta)/(\\mu -eta)\r\n ``NO`` : ndarray (shape = n_spec)\r\n Niche overlapp of the species (NO = 1-ND)\r\n ``FD`` : ndarray (shape = n_spec)\r\n Fitness difference according to Spaak and De Laender 2020\r\n FD = fc/f0\r\n ``f0``: ndarray (shape = n_spec)\r\n no-competition growth rate, f(0)\r\n ``fc``: ndarray (shape = n_spec)\r\n no-niche growth rate f(\\sum c_j^i N_j^(-i),0)\r\n ``eta``: ndarray (shape = n_spec)\r\n no-niche growth rate f(\\sum c_j^i N_j^(-i),0)\r\n eta and fc are identical, but both are maintained for compatibility\r\n ``mu``: ndarray (shape = n_spec)\r\n intrinsic growth rate f(0,0)\r\n mu and f0 are identical, but both are maintained for compatibility\r\n ``F``: Fitness differences according to Spaak, Godoy and DeLaender\r\n F = -eta/(mu - eta)\r\n \r\n Raises:\r\n InputError:\r\n Is raised if system cannot automatically solve equations.\r\n Starting estimates for N_star and c should be passed.\r\n \r\n Examples:\r\n See \"Example,compute NFD.py\" and \"Complicated examples for NFD.py\"\r\n for applications for models\r\n See \"Exp_plots.py\" for application to experimental data\r\n \r\n Debugging:\r\n If InputError is raised the problem causing information is saved in\r\n pars.\r\n To access it rerun the code in the following way (or similar)\r\n \r\n pars = {}\r\n pars = NFD_model(f, pars = pars)\r\n print(pars)\r\n \r\n pars will then contain additional information \r\n \r\n Literature:\r\n \"Intuitive and broadly applicable definitions of \r\n niche and fitness differences\", J.W.Spaak, F. deLaender\r\n DOI: https://doi.org/10.1101/482703 \r\n \"\"\"\r\n if n_spec == 1:\r\n # species case, single species are assumed to have ND = 1\r\n raise InputError(\"ND and FD are not (properly) defined for a single\"\r\n \"species community.\"\r\n \"If needed assign manualy ND = 1 and FD = 0 for this case\")\r\n \r\n if from_R:\r\n if n_spec-int(n_spec) == 0:\r\n n_spec = int(n_spec)\r\n else:\r\n raise InputError(\"Number of species (`n_spec`) must be an integer\")\r\n fold = f\r\n #f(0)\r\n def f(N, *args):\r\n # translate dataframes, matrices etc to np.array\r\n return np.array(fold(N, *args)).reshape(-1)\r\n \r\n if not(pars is None):\r\n try:\r\n for key in pars.keys(): # convert to np array and make writable\r\n pars[key] = np.array(pars[key])\r\n except AttributeError:\r\n raise InputError(\"Argument ``pars`` must be a dictionary or a\"\r\n \"labeled list. e.g. ``pars = list(N_star = N_star)\")\r\n # check input on correctness\r\n monotone_f = __input_check__(n_spec, f, args, monotone_f, pars)\r\n \r\n if experimental:\r\n if not (\"c\" in pars.keys()):\r\n pars[\"c\"] = np.ones((n_spec, n_spec))\r\n if not (\"r_i\" in pars.keys()):\r\n pars[\"r_i\"] = np.array([f(pars[\"N_star\"][i], *args)[i] \r\n for i in range(n_spec)])\r\n if not experimental:\r\n # obtain equilibria densities and invasion growth rates \r\n pars = preconditioner(f, args,n_spec, pars, xtol, monotone_f,\r\n estimate_N_star_mono) \r\n # list of all species\r\n l_spec = list(range(n_spec))\r\n # compute conversion factors\r\n c = np.ones((n_spec,n_spec))\r\n for i in l_spec:\r\n for j in l_spec:\r\n if i>=j: # c is assumed to be symmetric, c[i,i] = 1\r\n continue\r\n c[[i,j],[j,i]] = solve_c(pars,[i,j],\r\n monotone_f[i] and monotone_f[j],xtol=xtol)\r\n\r\n # compute NO and FD\r\n NO = np.empty(n_spec)\r\n FD = np.empty(n_spec)\r\n \r\n for i in l_spec:\r\n # creat a list with i at the beginning [i,0,1,...,i-1,i+1,...,n_spec-1]\r\n sp = np.array([i]+l_spec[:i]+l_spec[i+1:])\r\n # compute NO and FD\r\n if (c[i, sp[1:]] == 0).all():\r\n NO[i] = 0 # species does not interact with each other species\r\n else:\r\n NO[i] = NO_fun(pars, c[i, sp[1:]], sp)\r\n FD[i] = FD_fun(pars, c[i, sp[1:]], sp)\r\n \r\n # prepare returning values\r\n pars[\"NO\"] = NO\r\n pars[\"ND\"] = 1-NO\r\n pars[\"FD\"] = FD\r\n pars[\"c\"] = c\r\n pars[\"f0\"] = pars[\"f\"](np.zeros(n_spec)) # monoculture growth rate\r\n pars[\"fc\"] = FD*pars[\"f0\"] # no niche growth rate\r\n \r\n # add new parameters according to Spaak, Godoy and De Laender 2021\r\n pars[\"eta\"] = pars[\"fc\"]\r\n pars[\"mu\"] = pars[\"f0\"]\r\n pars[\"F\"] = -pars[\"eta\"]/(pars[\"mu\"] - pars[\"eta\"])\r\n return pars\r\n \r\ndef __input_check__(n_spec, f, args, monotone_f, pars):\r\n # check input on (semantical) correctness\r\n if not isinstance(n_spec, int):\r\n raise InputError(\"Number of species (`n_spec`) must be an integer\")\r\n \r\n # check whether `f` is a function and all species survive in monoculutre\r\n try:\r\n f0 = f(np.zeros(n_spec), *args)\r\n if f0.shape != (n_spec,):\r\n if not (pars is None):\r\n pars[\"function_call\"] = \"f(0)\"\r\n pars[\"return_value\"] = f0\r\n raise InputError(\"`f` must return an array of length `n_spec`\") \r\n except TypeError:\r\n print(\"function call of `f` did not work properly\")\r\n raise\r\n except AttributeError:\r\n fold = f\r\n f = lambda N, *args: np.array(fold(N, *args))\r\n f0 = f(np.zeros(n_spec), *args)\r\n warn(\"`f` does not return a proper `np.ndarray`\")\r\n \r\n if (not np.all(np.isfinite(f0))):\r\n raise InputError(\"All species must have positive monoculture growth\"\r\n +\"i.e. `f(0)>0`. Especially this value must be defined\")\r\n # broadcast monotone_f if necessary\r\n return np.logical_and(monotone_f, np.full(n_spec, True, bool))\r\n \r\nclass InputError(Exception):\r\n pass\r\n \r\ndef preconditioner(f, args, n_spec, pars, xtol, monotone_f,\r\n estimate_N_star_mono):\r\n \"\"\"Returns equilibria densities and invasion growth rates for system `f`\r\n \r\n Parameters\r\n -----------\r\n same as `find_NFD`\r\n \r\n Returns\r\n -------\r\n pars : dict\r\n A dictionary with the keys:\r\n \r\n ``N_star`` : ndarray (shape = (n_spec, n_spec))\r\n N_star[i] is the equilibrium density of the system with species \r\n i absent. The density of species i is set to 0.\r\n ``r_i`` : ndarray (shape = n_spec)\r\n invsaion growth rates of the species\r\n \"\"\" \r\n if pars is None:\r\n pars = {}\r\n \r\n # expected shapes of pars\r\n pars_def = {\"c\": np.ones((n_spec,n_spec)),\r\n \"r_i\": np.zeros(n_spec)}\r\n \r\n warn_string = \"pars[{}] must be array with shape {}.\"\\\r\n +\" The values will be computed automatically\"\r\n # check given keys of pars for correctness\r\n for key in pars_def.keys():\r\n try:\r\n if pars[key].shape == pars_def[key].shape:\r\n pass\r\n else: # `pars` doesn't have expected shape\r\n pars[key] = pars_def[key]\r\n warn(warn_string.format(key,pars_def[key].shape))\r\n except KeyError: # key not present in `pars`\r\n pars[key] = pars_def[key]\r\n except AttributeError: #`pars` isn't an array\r\n pars[key] = pars_def[key]\r\n warn(warn_string.format(key,pars_def[key].shape))\r\n \r\n # estimate N_star as monoculture equilibrium densities\r\n try:\r\n if pars[\"N_star\"].shape == (n_spec, n_spec):\r\n pass # correct shape\r\n elif pars[\"N_star\"].shape == (n_spec):\r\n # assume pars[\"N_star\"][i] is monoculture of species i\r\n pars[\"N_star\"] = pars[\"N_star\"]*np.ones((n_spec, n_spec))\r\n else: # `pars` doesn't have expected shape\r\n pars[\"N_star\"] = np.ones(n_spec)\r\n estimate_N_star_mono = True\r\n warn(warn_string.format(key,pars_def[key].shape))\r\n except KeyError: # key not present in `pars`\r\n pars[\"N_star\"] = np.ones(n_spec)\r\n estimate_N_star_mono = True\r\n except AttributeError: #`pars` isn't an array\r\n pars[key] = pars_def[key]\r\n warn(warn_string.format(key,pars_def[key].shape)) \r\n \r\n def save_f(N):\r\n # allow passing infinite species densities to per capita growthrate\r\n if np.isinf(N).any():\r\n return np.full(N.shape, -np.inf)\r\n else:\r\n N = N.copy()\r\n N[N<0] = 0 # function might be undefined for negative densities\r\n return f(N, *args)\r\n pars[\"f\"] = save_f\r\n # monoculture growth rate\r\n pars[\"f0\"] = pars[\"f\"](np.zeros(n_spec))\r\n \r\n if estimate_N_star_mono:\r\n if np.ndim(pars[\"N_star\"])==2:\r\n N_star_mono = np.mean(pars[\"N_star\"], axis = 0)\r\n else:\r\n N_star_mono = pars[\"N_star\"]\r\n \r\n # starting estimates for N_star must be positive real numbers\r\n N_star_mono[N_star_mono<=0] = 1\r\n N_star_mono[~np.isfinite(N_star_mono)] = 1\r\n \r\n # estimate N_star via brentq algorithm\r\n for i in range(n_spec):\r\n if pars[\"f0\"][i]<0:\r\n continue # species can't survive in monoculture\r\n \r\n if monotone_f[i]:\r\n \r\n counter = 0\r\n # to avoid overflow, how often can we double?\r\n max_counter = (np.log(np.finfo(float).max) - \r\n np.log(N_star_mono[i]))/np.log(2)\r\n growth = pars[\"f\"](np.insert(np.zeros(n_spec-1), i,\r\n N_star_mono[i]))[i]\r\n while (growth>0 and counter < max_counter-2):\r\n N_star_mono[i] *= 2\r\n growth = pars[\"f\"](np.insert(np.zeros(n_spec-1), i,\r\n N_star_mono[i]))[i]\r\n counter += 1\r\n if counter >= max_counter-2:\r\n raise InputError(('Monoculture growth rate of species {i} '\r\n 'does not become negative with increasing N_{i}, '\r\n 'i.e. ``f_{i}(N_{i})``>0 for any N').format(i=i))\r\n N_star_mono[i] = brentq(lambda N: pars[\"f\"](\r\n np.insert(np.zeros(n_spec-1), i,N))[i],\r\n 0, N_star_mono[i])\r\n else:\r\n N_star_mono[i] = fsolve(pars[\"f\"](\r\n np.insert(np.zeros(n_spec-1), i,N_star_mono))[i])\r\n # estimate that equilibrium density in each community is the \r\n # monoculture equilibrium\r\n pars[\"N_star\"] = N_star_mono*np.ones((n_spec, n_spec))\r\n # remove species from own resident community\r\n pars[\"N_star\"][np.arange(n_spec), np.arange(n_spec)] = 0\r\n \r\n # c must be a positive real number\r\n if (np.any(~np.isfinite(pars[\"c\"])) or np.any(pars[\"c\"]<=0) \r\n or pars[\"c\"].dtype != float):\r\n warn(\"Some entries in pars['c'] were not positive real numbers.\"\r\n \"These are replaced with 1\")\r\n pars[\"c\"] = np.real(pars[\"c\"])\r\n pars[\"c\"][pars[\"c\"] <= 0] = 1\r\n pars[\"c\"][~np.isfinite(pars[\"c\"])] = 1\r\n \r\n for i in range(n_spec):\r\n # to set species i to 0\r\n ind = np.arange(n_spec) != i\r\n # solve for equilibrium, use equilibrium dens. of previous run\r\n N_pre,info,a ,b = fsolve(lambda N: pars[\"f\"](np.insert(N,i,0))[ind],\r\n pars[\"N_star\"][i,ind], full_output = True,\r\n xtol = xtol)\r\n \r\n # Check stability of equilibrium\r\n # Jacobian of system at equilibrium\r\n r = np.zeros((n_spec-1, n_spec-1))\r\n r[np.triu_indices(n_spec-1)] = info[\"r\"].copy()\r\n jac = np.diag(N_pre).dot(info[\"fjac\"].T).dot(r)\r\n # check whether we found equilibrium\r\n if np.amax(np.abs(info[\"fvec\"]))>xtol:\r\n pars[\"equilibrium found with spec{} absent\".format(i)] = N_pre\r\n pars[\"growth at found equilibrium\"] = info[\"fvec\"]\r\n try:\r\n pars[\"eigenvalues equilibrium\"] = np.linalg.eigvals(jac)\r\n except np.linalg.LinAlgError:\r\n pass\r\n pars[\"fsolve output\"] = info\r\n raise InputError(\"Not able to find resident equilibrium density, \"\r\n + \"with species {} absent.\".format(i)\r\n + \" Please provide manually via the `pars` argument\")\r\n \r\n # check whether equilibrium is feasible, i.e. positive\r\n if not (np.all(N_pre>0) and np.all(np.isfinite(N_pre))):\r\n pars[\"equilibrium found with spec{} absent\".format(i)] = N_pre\r\n pars[\"growth at found equilibrium\"] = info[\"fvec\"]\r\n try:\r\n pars[\"eigenvalues equilibrium\"] = np.linalg.eigvals(jac)\r\n except np.linalg.LinAlgError:\r\n pass\r\n pars[\"fsolve output\"] = info\r\n raise InputError(\"Found equilibrium is not feasible (i.e. N*>0), \"\r\n + \"with species {} absent.\".format(i)\r\n + \" Please provide manually via the `pars` argument\")\r\n \r\n # check whether real part of eigenvalues is negative\r\n if max(np.real(np.linalg.eigvals(jac)))>0:\r\n pars[\"equilibrium found with spec{} absent\".format(i)] = N_pre\r\n pars[\"growth at found equilibrium\"] = info[\"fvec\"]\r\n try:\r\n pars[\"eigenvalues equilibrium\"] = np.linalg.eigvals(jac)\r\n except np.linalg.LinAlgError:\r\n pass\r\n pars[\"fsolve output\"] = info\r\n raise InputError(\"Found equilibrium is not stable, \"\r\n + \"with species {} absent.\".format(i)\r\n + \" Please provide manually via the `pars` argument\") \r\n \r\n # save equilibrium density and invasion growth rate\r\n pars[\"N_star\"][i] = np.insert(N_pre,i,0)\r\n pars[\"r_i\"][i] = pars[\"f\"](pars[\"N_star\"][i])[i]\r\n return pars\r\n \r\ndef solve_c(pars, sp = [0,1], monotone_f = True, xtol = 1e-10):\r\n \"\"\"find the conversion factor c for species sp\r\n \r\n Parameters\r\n ----------\r\n pars : dict\r\n Containing the N_star and r_i values, see `preconditioner`\r\n sp: array-like\r\n The two species to convert into each other\r\n \r\n Returns\r\n -------\r\n c : float, the conversion factor c_sp[0]^sp[1]\r\n \"\"\"\r\n # check for special cases first\r\n if ((pars[\"N_star\"][sp[0], sp[1]] == 0) or\r\n (pars[\"N_star\"][sp[1],sp[0]] == 0)):\r\n return 0,0\r\n \r\n NO_values = [NO_fun(pars,1, sp), NO_fun(pars,1, sp[::-1])]\r\n # do species interact?\r\n if np.isclose(NO_values, [0,0]).any():\r\n return special_case(np.isclose(NO_values, [0,0]), sp)\r\n # has one species reached minimal growth rate?\r\n if np.isinf(NO_values).any():\r\n return special_case_mort(np.isinf(NO_values), sp)\r\n \r\n \r\n \r\n sp = np.asarray(sp)\r\n \r\n def inter_fun(c):\r\n # equation to be solved\r\n NO_ij = np.abs(NO_fun(pars,c, sp))\r\n NO_ji = np.abs(NO_fun(pars,1/c,sp[::-1]))\r\n return NO_ij-NO_ji\r\n \r\n # use a generic numerical solver when `f` is not montone\r\n # potentially there are multiple solutions\r\n if not monotone_f: \r\n print(pars[\"N_star\"].shape)\r\n c = fsolve(inter_fun,pars[\"c\"][sp[0],sp[1]],xtol = xtol)[0]\r\n if np.abs(inter_fun(c))>xtol:\r\n pars[\"c found by fsolve\"] = c\r\n raise InputError(\"Not able to find c_{}^{}.\".format(*sp) +\r\n \"Please pass a better guess for c_i^j via the `pars` argument\")\r\n return c, 1/c\r\n \r\n # if `f` is monotone then the solution is unique, find it with a more\r\n # robust method\r\n \r\n # find interval for brentq method\r\n a = pars[\"c\"][sp[0],sp[1]]\r\n # find which species has higher NO for c0\r\n direction = np.sign(inter_fun(a))\r\n \r\n if direction == 0: # starting guess for c is correct\r\n return a, 1/a\r\n fac = 2**direction\r\n if not np.isfinite(direction):\r\n pars[\"function inputs\"] = [switch_niche(pars[\"N_star\"][es[0]],es,c)\r\n for c in [0,a, 1/a] for es in [sp, sp[::-1]]]\r\n pars[\"function outputs\"] = [pars[\"f\"](inp) for \r\n inp in pars[\"function inputs\"]]\r\n raise InputError(\"function `f` seems to be returning nonfinite values\")\r\n b = float(a*fac)\r\n # change searching range to find c with changed size of NO\r\n while np.sign(inter_fun(b)) == direction:\r\n a = b\r\n b *= fac\r\n # test whether a and be behave as they should (e.g. nonfinite)\r\n if not((2*a == b) or (2*b == a)) or np.sign(b-a) != direction:\r\n raise InputError(\"Not able to find c_{}^{}.\".format(*sp) +\r\n \"Please pass a better guess for c_i^j via the `pars` argument\"+\r\n \". Please also check for non-positive entries in pars[``c``]\")\r\n # solve equation\r\n try:\r\n c = brentq(inter_fun,a,b)\r\n except ValueError:\r\n raise ValueError(\"f does not seem to be monotone. Please run with\"\r\n +\"`monotone_f = False`\")\r\n # test whether c actually is correct\r\n # c = 0 implies issue with brentq\r\n if (c==0) or inter_fun(c)>xtol:\r\n pars[\"c\"][sp[0],sp[1]] = c\r\n raise InputError(\"Not able to find c_{}^{}.\".format(*sp) +\r\n \"Please pass a better guess for c_i^j via the `pars` argument\"+\r\n \". Please also check for non-positive entries in pars[``c``]\")\r\n return c, 1/c # return c_i and c_j = 1/c_i\r\n\r\ndef special_case(no_comp, sp):\r\n # Return c for special case where one spec is not affected by competition\r\n \r\n warn(\"Species {} and {} do not seem to interact.\".format(sp[0], sp[1]) +\r\n \" This may result in nonfinite c, ND and FD values.\")\r\n \r\n if no_comp.all():\r\n return 0, 0 # species do not interact at all, c set to zero\r\n elif (no_comp == [True, False]).all():\r\n return 0, np.inf # only first species affected\r\n elif (no_comp == [False, True]).all():\r\n return np.inf, 0\r\n \r\ndef special_case_mort(mort, sp):\r\n # Return c for special case where one spec is not affected by itself\r\n \r\n warn(\"Species {} or {} reached mortality rate.\".format(sp[0], sp[1]) +\r\n \" This may result in nonfinite c, ND and FD values.\")\r\n \r\n if mort.all():\r\n return 0, 0 # both species have reached mortality rate\r\n elif (mort == [True, False]).all():\r\n return np.inf, 0 # only first species affected\r\n elif (mort == [False, True]).all():\r\n return 0, np.inf\r\n \r\ndef NO_fun(pars,c, sp):\r\n # Compute NO for specis sp and conversion factor c\r\n f0 = pars[\"f\"](switch_niche(pars[\"N_star\"][sp[0]],sp))[sp[0]]\r\n fc = pars[\"f\"](switch_niche(pars[\"N_star\"][sp[0]],sp,c))[sp[0]]\r\n\r\n if f0 == fc:\r\n return np.sign(f0-pars[\"r_i\"])[sp[0]]*np.inf\r\n \r\n return (f0-pars[\"r_i\"][sp[0]])/(f0-fc)\r\n \r\ndef FD_fun(pars, c, sp):\r\n # compute the FD for species sp and conversion factor c\r\n f0 = pars[\"f\"](switch_niche(pars[\"N_star\"][sp[0]],sp))[sp[0]]\r\n fc = pars[\"f\"](switch_niche(pars[\"N_star\"][sp[0]],sp,c))[sp[0]]\r\n \r\n return fc/f0\r\n \r\ndef switch_niche(N,sp,c=0):\r\n # switch the niche of sp[1:] into niche of sp[0]\r\n N = N.copy()\r\n N[sp[0]] += np.nansum(c*N[sp[1:]])\r\n N[sp[1:]] = 0\r\n return N","sub_path":"numerical_NFD.py","file_name":"numerical_NFD.py","file_ext":"py","file_size_in_byte":22718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"573742725","text":"\n#import numpy as np\nimport cv2\nimport os\n\nssdcocoModelPath = os.path.dirname(os.path.abspath(__file__))\n\nprototxt = os.path.join(ssdcocoModelPath, 'deploy.prototxt')\nmodelFile = os.path.join(ssdcocoModelPath, 'VGG_coco_SSD_300x300_iter_400000.caffemodel')\nprototxtFaces = os.path.join(ssdcocoModelPath, 'deploy_Faces.prototxt')\nmodelFileFaces = os.path.join(ssdcocoModelPath, 'res10_300x300_ssd_iter_140000_fp16_Faces.caffemodel')\n\nif not os.path.exists(prototxt) or not os.path.exists(modelFile) or not os.path.exists(prototxtFaces) or not os.path.exists(modelFileFaces):\n\tif not os.path.isfile(prototxt) or not os.path.isfile(modelFile) or not os.path.isfile(prototxtFaces) or not os.path.isfile(modelFileFaces):\n\t\tprint('exiting: could not find ssdcoco models')\n\t\tprint('download the model from: https://drive.google.com/file/d/0BzKzrI_SkD1_dUY1Ml9GRTFpUWc/view')\n\t\tprint('download the model from(for Faces): https://github.com/opencv/opencv_3rdparty/raw/19512576c112aa2c7b6328cb0e8d589a4a90a26d/res10_300x300_ssd_iter_140000_fp16.caffemodel')\n\t\traise SystemExit(1)\n\n#инициализировать ssdcoco модель из prototxt и modelFile для распознавания объектов по классам\nnet = cv2.dnn.readNetFromCaffe(prototxt, modelFile)\n\n#инициализировать ssdcoco модель из prototxtFaces и modelFileFaces для распознавания лиц\nnetFaces = cv2.dnn.readNetFromCaffe(prototxtFaces, modelFileFaces)\n\n","sub_path":"object_detector_cocossd_py/load_cocossd.py","file_name":"load_cocossd.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"109879322","text":"import os\nimport sys\nROOTDIR = os.path.abspath(os.path.join(sys.path[0]))\nsys.path.append(ROOTDIR)\nimport tensorflow as tf\nimport numpy as np\nfrom Dl3dDataset import DataSet\n\n## read numpy data\ndef load_data(filename):\n try:\n data = np.load(filename)\n ds = DataSet(data['imgs'], data['features'], data['labels'])\n except:\n print(\"Can not find data file\")\n ds = None\n finally:\n return ds\n\n# help functions to build graph\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef conv2d(x, W, strides=[1, 1, 1, 1]):\n return tf.nn.conv2d(x, W, strides=strides, padding='SAME')\n\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef single_net(RES):\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32, shape=[None, RES, RES], name='x')\n ft = tf.placeholder(tf.float32, shape=[None, 37], name='ft')\n y_ = tf.placeholder(tf.float32, shape=[None, 5], name='y')\n\n x_image = tf.reshape(x, [-1, RES, RES, 1], name='x-reshape')\n\n # first layer\n with tf.name_scope('layer1'):\n W_conv1 = weight_variable([3, 3, 1, 16])\n b_conv1 = bias_variable([16])\n\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n h_pool1 = max_pool_2x2(h_conv1)\n # [-1, 64, 64, 16]\n\n # second layer\n with tf.name_scope('layer2'):\n W_conv2 = weight_variable([3, 3, 16, 32])\n b_conv2 = bias_variable([32])\n\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, strides=[1, 2, 2, 1]) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n # [-1, 16, 16, 32]\n\n with tf.name_scope('layer3'):\n W_conv3 = weight_variable([3, 3, 32, 64])\n b_conv3 = bias_variable([64])\n\n h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3, strides=[1, 2, 2, 1]) + b_conv3)\n h_pool3 = max_pool_2x2(h_conv3)\n # [-1, 4, 4, 64] = [-1, 1024]\n\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([4 * 4 * 64, 512])\n b_fc1 = bias_variable([512])\n\n h_pool3_flat = tf.reshape(h_pool3, [-1, 4 * 4 * 64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)\n # [-1, 512]\n\n return x, ft, y_, h_fc1\n\nclass DLSpMVModel(object):\n def __init__(self, train_data, test_data, model_data, result_data):\n\n self.RES = 0\n self.mean = 0\n self.std = 1\n\n self.train = load_data(train_data)\n print(self.train.images.shape, self.train.labels.shape)\n\n if self.train:\n self.RES = self.train.images.shape[-1] # 128\n self.mean = np.mean(self.train.images[:,0,:,:], axis=0)\n self.std = np.std(self.train.images[:,0,:,:], axis=0)\n #print(self.train.images)\n self.test = load_data(test_data)\n print(self.test.images.shape, self.test.labels.shape)\n if self.test and self.RES == 0:\n print(self.test.images.shape, self.test.labels.shape)\n self.RES = self.test.images.shape[-1] # 128\n\n self.STEPS = 6000\n self.output = result_data\n self.model = model_data\n\n def build_graph(self):\n pass\n\n\n def training(self):\n\n print(\"Model is in training mode\")\n assert self.train is not None and self.test is not None, \"data not loaded\"\n\n with tf.name_scope('fc_snip1'):\n x, ft, y_, h_fc1_snip1 = single_net(self.RES)\n #[-1, 512]\n\n with tf.name_scope('fc_snip2'):\n x2, ft2, y2_, h_fc1_snip2 = single_net(self.RES)\n\n h_fc1 = tf.concat([h_fc1_snip1, h_fc1_snip2], axis=1)\n # [-1, 512 * 2]\n\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n with tf.name_scope('fc2'):\n W_fc2 = weight_variable([512 * 2, 64])\n b_fc2 = bias_variable([64])\n h1_fc2 = tf.add(tf.matmul(h_fc1_drop, W_fc2), b_fc2)\n\n h_fc2 = tf.concat([h1_fc2, ft], axis=1)\n\n with tf.name_scope('out'):\n W_fc3 = weight_variable([64 + 37, 5])\n b_fc3 = bias_variable([5])\n\n y_conv = tf.add(tf.matmul(h_fc2, W_fc3), b_fc3, name='y_conv_restore')\n\n with tf.name_scope('cross_entropy'):\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=y_, logits=y_conv) # takes unnormalized output\n )\n\n with tf.name_scope('train'):\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(\n tf.cast(correct_prediction, tf.float32), name='acc_to_restore')\n tf.summary.scalar('accuracy', accuracy)\n\n merged = tf.summary.merge_all()\n\n saver = tf.train.Saver() # traditional saving api\n\n # train the model\n with tf.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n for i in range(self.STEPS):\n batch = self.train.next_batch(100)\n if i % 100 == 0:\n train_accuracy = sess.run(accuracy, feed_dict={x: batch[0][:,0,:,:], ft: batch[1], y_: batch[2], x2: batch[0][:,1,:,:], ft2: batch[1], y2_: batch[2], keep_prob: 1.0})\n print('step %d, training accuracy %g' % (i, train_accuracy))\n else:\n _ = sess.run(train_step, feed_dict={x: batch[0][:,0,:,:], ft: batch[1], y_: batch[2], x2: batch[0][:,1,:,:], ft2: batch[1], y2_: batch[2], keep_prob: 0.5})\n # test\n print('test accuracy %g' % accuracy.eval(feed_dict={x: self.test.images[:,0,:,:], ft: self.test.features, y_: self.test.labels, x2: self.test.images[:,1,:,:], ft2: self.test.features, y2_: self.test.labels, keep_prob: 1.0}))\n\n # save model and checkpoint\n save_path = saver.save(sess, os.path.join(ROOTDIR, self.model, \"model-{}.ckpt\".format(self.STEPS)))\n print(\"Model saved in file %s\" % save_path)\n\n def testing(self):\n \"\"\" restore model and checkpoint\n\n [description]\n \"\"\"\n print(\"Model is in testing mode\")\n assert self.test is not None, \"data not loaded\"\n\n tf.reset_default_graph() # the graph is empty now, must build graph before restore value\n\n with tf.Session() as sess:\n # retore graph\n saver = tf.train.import_meta_graph(os.path.join(ROOTDIR, self.model, 'model-{}.ckpt.meta'.format(self.STEPS)))\n # the current graph can be explored by\n graph = tf.get_default_graph()\n # restore value\n saver.restore(sess, tf.train.latest_checkpoint(os.path.join(ROOTDIR, self.model)))\n print(\"Model restored\")\n\n x = graph.get_tensor_by_name(\"fc_snip1/input/x:0\")\n ft = graph.get_tensor_by_name(\"fc_snip1/input/ft:0\")\n y_ = graph.get_tensor_by_name(\"fc_snip1/input/y:0\")\n x2 = graph.get_tensor_by_name(\"fc_snip2/input/x:0\")\n ft2 = graph.get_tensor_by_name(\"fc_snip2/input/ft:0\")\n y2_ = graph.get_tensor_by_name(\"fc_snip2/input/y:0\")\n\n keep_prob = graph.get_tensor_by_name(\"dropout/keep_prob:0\")\n #acc = graph.get_tensor_by_name('train/acc_to_restore:0')\n y_conv = graph.get_tensor_by_name('out/y_conv_restore:0')\n print(\"-------------------------------------------------------\")\n out_y = sess.run(y_conv, feed_dict={x: self.test.images[:,0,:,:], ft: self.test.features, y_: self.test.labels, x2: self.test.images[:,1,:,:], ft2: self.test.features, y2_: self.test.labels, keep_prob: 1.0})\n\n wrongIds = np.zeros((self.test.labels.shape[0], 2), dtype='int32')\n for i in range(self.test.labels.shape[0]):\n wrongIds[i][0] = np.argmax(self.test.labels[i])\n wrongIds[i][1] = np.argmax(out_y[i])\n np.savez('{}'.format(self.output), wrongIds=wrongIds)\n # test\n #print(\"-------------------------------------------------------\")\n #print('Test accuracy %g' % sess.run(acc, feed_dict={x: self.test.images[:,0,:,:], y_: self.test.labels, x2: self.test.images[:,1,:,:], y2_: self.test.labels, keep_prob: 1.0}))\n print(\"-------------------------------------------------------\")\n\n\ndef main():\n if len(sys.argv) < 5:\n print(\"usage: {} flag{train, test} {train data} {test data} {model data} {result data}\")\n exit()\n\n FLAG = sys.argv[1].lower()\n train_data = sys.argv[2]\n test_data = sys.argv[3]\n model_data = sys.argv[4]\n result_data = sys.argv[5]\n\n print(train_data)\n print(test_data)\n print(model_data)\n print(result_data)\n\n model = DLSpMVModel(os.path.join(ROOTDIR, train_data),\n os.path.join(ROOTDIR, test_data),\n os.path.join(ROOTDIR, model_data),\n os.path.join(ROOTDIR, result_data))\n\n if FLAG == 'train':\n model.training()\n elif FLAG == 'test':\n model.testing()\n\nif __name__ == '__main__':\n main()\n","sub_path":"software/supervised/3d-tensor/Dl3dNet.py","file_name":"Dl3dNet.py","file_ext":"py","file_size_in_byte":9343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"82214653","text":"__author__ = 'tfan'\n\nimport pypyodbc\nimport time\nimport os\nfrom datetime import datetime\nimport sys\nimport socket\nimport getpass\n\n#Temp variables to hold connection string\nServer = ''\nUsername = ''\nPassword = ''\nSyslogIP = ''\n\n\n#Variable holds total number of incident processed\n#global Totalincident\n#Totalincident = 0\n\n#Variable holds starting time of script\nTempTimelower = datetime.now()\nTimeLowerboundary = TempTimelower.strftime('%Y-%m-%d %H:%M:%S')\n\n#variable holds the upper limit of the incident time when query\nTimeUpperboundary = 0\n\n#Variable holds total running time\nTempTotalruntime = datetime.now()\nTotalruntime = TempTotalruntime.strftime('%Y-%m-%d %H:%M:%S')\n\n#Variable holds number of iteration\nIterationNum = 0\n\n\nFACILITY = {\n 'kern': 0, 'user': 1, 'mail': 2, 'daemon': 3,\n 'auth': 4, 'syslog': 5, 'lpr': 6, 'news': 7,\n 'uucp': 8, 'cron': 9, 'authpriv': 10, 'ftp': 11,\n 'local0': 16, 'local1': 17, 'local2': 18, 'local3': 19,\n 'local4': 20, 'local5': 21, 'local6': 22, 'local7': 23,\n}\n\nLEVEL = {\n 'emerg': 0, 'alert':1, 'crit': 2, 'err': 3,\n 'warning': 4, 'notice': 5, 'info': 6, 'debug': 7\n}\n\n#Define the function to send syslog message\ndef syslog(message, level=LEVEL['notice'], facility=FACILITY['daemon'], host='localhost', port=514):\n \"\"\" Send syslog UDP packet to given host and port. \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n data = '<%d>%s' % (level + facility*8, message)\n\n #[debug]: print out message send to syslog\n print(data)\n sock.sendto(bytes(data, 'UTF-8'), (host, port))\n sock.close()\n#End of syslog sending function\n\n#Define log file checking function\ndef Logfilechecking (str):\n\n if os.path.exists('./Scriptlog.log') == False:\n\n #Create a log file if does not exist, indicate script run for 1st time\n Log_file = open('Scriptlog.log','w')\n text = \"%s -- New log file created\" %str\n print (\"File does not exist, creating new log file\")\n\n #[Debug]: enable logging debug\n #print (text)\n Log_file.writelines(text)\n text = \"\\n%s -- Script started first time\" %str\n Log_file.writelines(text)\n Log_file.close()\n\n else:\n\n #Log file exists, writing script start time\n Log_file = open('Scriptlog.log','a')\n text = \"\\n%s -- Script started\" % str\n Log_file.writelines(text)\n\n #[Debug] Print text for debug purpose, turn off\n #print (text)\n\n Log_file.close()\n#End of log file checking function\n\n#Define log file insert function, this will write query result to log file, the input is a time stamp and string\ndef Logfilewrite (str1, str2):\n\n Log_file = open('Scriptlog.log','a')\n text = \"\\n%s -- %s\" % (str1, str2)\n Log_file.writelines(text)\n Log_file.close()\n#End of log file write function\n\n#Defination of the DBQuery function with passing parameters, SQL IP, Username and password\ndef SQLQuery (arg1, arg2, arg3, arg4, arg5):\n #Variable holds the name of online_active partition\n partition = 0\n\n # Get a connection to MSSQL ODBC DSN via pypyodbc, and assign it to conn\n conn= pypyodbc.connect(driver='{SQL Server}', server='%s' % arg1, database='wbsn-data-security', uid='%s' % arg2, pwd='%s' % arg3)\n\n # Give me a cursor so I can operate the database with the cursor\n cur = conn.cursor()\n\n # Select current active-online partition\n cur.execute('''select PARTITION_INDEX from dbo.PA_EVENT_PARTITION_CATALOG WHERE STATUS='ONLINE_ACTIVE' ''')\n\n #for d in cur.description:\n\n #[Debug]: print out column name\n #print(d[0], end=\" \")\n\n for row in cur.fetchall():\n for field in row:\n Partition = field\n\n #print (field, end=\" \")\n\n # [Debug]: print current partition name\n #print (Partition)\n\n print('')\n\n\n\n cur.execute('''\n SELECT [APP_version]\n ,PA_EVENTS_%s.ID\n ,[STATUS]\n ,CASE PA_EVENTS_20140526.ACTION_TYPE\n\t\t\t\t\t\tWHEN 1 THEN ' act=Audited'\n\t\t\t\t\t\tWHEN 100 THEN ' act=Quarantined'\n\t\t\t\t\t\tWHEN 2 THEN ' act=Blocked'\n\t\t\t\t\t\tWHEN 3 THEN ' act=Encrypted'\n \t\t\t\t\t\tWHEN 4 THEN ' act=Released'\n\t\t\t\t\t\tWHEN 5 THEN ' act=Run Command'\n\t\t\t\t\t\tWHEN 6 THEN ' act=Permitted'\n\t\t\t\t\t\tWHEN 7 THEN ' act=Notify'\n\t\t\t\t\t\tWHEN 8 THEN ' act=Endpoint Confirm Abort'\n\t\t\t\t\t\tWHEN 9 THEN ' act=Endpoint Confirm Continue'\n\t\t\t\t\t\tWHEN 10 THEN ' act=Endpoint Run Command'\n\t\t\t\t\t\tWHEN 11 THEN ' act=Drop attachments'\n\t\t\t\t\t\tWHEN 13 THEN ' act=Encrypt with Password'\n\t\t\t\tEND\n ,[DESTINATIONS]\n ,[ATT_NAMES]\n ,[SUBJECT]\n ,PA_MNG_USERS.LOGIN_NAME\n ,[POLICY_CATEGORIES]\n ,[ANALYZED_BY]\n\n FROM dbo.PA_EVENTS_%s, [dbo].[PA_MNG_USERS]\n WHERE PA_EVENTS_%s.SOURCE_ID = PA_MNG_USERS.ID AND\n dbo.PA_EVENTS_%s.INSERT_DATE <= '%s'AND\n dbo.PA_EVENTS_%s.INSERT_DATE >= '%s' ''' % (Partition, Partition, Partition, Partition, arg5, Partition,arg4))\n\n # Print the table headers (column descriptions)\n #for d in cur.description:\n # [Debug]: print out column header\n #print(d[0], end=\" \")\n\n # Start a new line\n print('')\n\n message = 'CEF:0|Websense|Data Security|'\n\n # Print the table, one row per line\n for row in cur.fetchall():\n n = 0\n\n for field in row:\n if n == 0 or n == 1:\n message = message + '%s|' % field\n\n elif n == 2:\n message = message + 'DLP Syslog|%s|' % field\n\n elif n == 4:\n message = message + ' duser=%s ' % field\n\n elif n == 5:\n message = message + 'fname=%s ' % field\n\n elif n == 6:\n message = message + 'msg=%s ' % field\n\n elif n == 7:\n message = message + 'suser=%s ' % field\n\n elif n == 8:\n message = message + 'cat=%s ' % field\n\n elif n == 9:\n message = message + 'PE=%s ' % field\n\n\n else:\n message = message + '%s' % field\n\n #print(message + '%s' % field, end=\"|\")\n n += 1\n\n #print(message)\n\n #[Debug]: Write the entry to log file as well\n Logfilewrite(TimeUpperboundary, message)\n\n syslog(message, level=6, facility=1, host='%s'%SyslogIP, port=514)\n #global Totalincident += 1\n\n message = 'CEF:0|Websense|Data Security|'\n\n print('')\n\n # I have done all the things, you can leave me and serve for others!\n\n cur.close()\n conn.close()\n #print(global Totalincident + 'Incident processed')\n\n return\n#End of SQLQuery\n\n#call for function to check the log file, pass current time as parameter\nLogfilechecking(TimeLowerboundary);\n\n#define command line validation\ntotal = len(sys.argv)\n#[Debug]: print out total number of args passed to command\n#print (\"The total numbers of args passed to the script: %d \" % total)\n\n#checking length of cmd string, exit and prompt syntax\nif total != 5:\n print (\"Error, incorrect Syntax!\")\n print (\"Usage: Script \")\n sys.exit()\n\nelse:\n #Ask for password when syntax is correct and continue the rest of program\n Server = str(sys.argv[1])\n Username =str(sys.argv[3])\n Password = getpass.getpass(prompt='Enter password for %s: ' % str(sys.argv[3]))\n SyslogIP = str(sys.argv[2])\n\n #[Debug]: Print out password, this should be comment out\n #print('You entered:', Password)\n\n DelayTimer = float(sys.argv[4])\n\n #Clear the screen\n os.system('cls')\n print('Syslog script started:')\n\n#Infite loop that fire up the query in a configurable interval, changing the sleep timer(in sec)\nwhile True:\n try:\n #To sleep 5 seconds\n time.sleep(DelayTimer)\n\n TempTimeUpper = datetime.now()\n TimeUpperboundary=TempTimeUpper.strftime('%Y-%m-%d %H:%M:%S')\n\n #[DEBUG]: Print out time interval,\n print('Time: '+TimeLowerboundary + ' --- '+ TimeUpperboundary)\n\n #Calling SQL query to output content\n SQLQuery(Server, Username, Password, TimeLowerboundary, TimeUpperboundary)\n\n #Re-assign low with upper value to move on to next iteration\n TimeLowerboundary = TimeUpperboundary\n\n #Keep record of number of loop\n IterationNum += 1\n\n #Catch the pypyodbc database related error\n except pypyodbc.DatabaseError:\n print(\"There was a database error, script terminated\")\n Log_file = open('Scriptlog.log','a')\n text = \"\\n%s -- Script terminated with database error\" %TimeLowerboundary\n Log_file.writelines(text)\n Log_file.close()\n sys.exit()\n\n except KeyboardInterrupt:\n print (\"Ctrl-C detected, script terminated.\")\n Log_file = open('Scriptlog.log','a')\n text = \"\\n%s -- Script terminated by Ctrl-C\" %TimeLowerboundary\n Log_file.writelines(text)\n text = \"\\n%s -- Script fired up %s times, terminated at %s \" %(TimeLowerboundary, IterationNum, TimeUpperboundary)\n Log_file.writelines(text)\n print(\"Script fired up %s times, terminated at %s \" %(IterationNum,TimeUpperboundary))\n Log_file.close()\n sys.exit()\n\n\n\n","sub_path":"MainScript.py","file_name":"MainScript.py","file_ext":"py","file_size_in_byte":9274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"347485092","text":"# Ignore user warnings to keep the terminal clean\nimport warnings\nwarnings.simplefilter(\"ignore\", UserWarning)\n\n# Import the necessary python library modules\nimport numpy as np\nfrom scipy.signal import lsim\nfrom scipy.special import gamma\nfrom scipy import optimize\nimport control\nimport os\nimport sys\nfrom sklearn.neighbors import NearestNeighbors\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nimport pdb\n\n# Add my local path to the relevant modules list\nsys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')\n\n# Import my python modules\nimport InputShaping as shaping\nimport Generate_Plots as genplt\nimport twomass \n\nfolder = 'Figures/{}/'.format(sys.argv[0])\n\nk = (2 * np.pi)**2\nc = 0.1\n\nk_d = 1\nk_p = 30\nm_1 = 1\nm_2 = 1\n\nk_d_vals = np.arange(1,100,1)\nreal_roots = np.zeros([len(k_d_vals),4])\nimag_roots = np.zeros([len(k_d_vals),4])\n\nfor j in range(0,len(k_d_vals)):\n\tk_d = k_d_vals[j]\n\n\tA = np.array([[0,1,0,0],[-(k+k_p)/m_1,-(c+k_d)/m_1,k/m_1,c/m_1],[0,0,0,1],[k/m_2,c/m_2,-k/m_2,-c/m_2]])\n\tB = np.array([[0,1,0,0]]).T\n\tC = np.array([0,0,1,0])\n\tD = 0\n\n\tsys = control.ss(A,B,C,D)\n\n\troots = control.pole(sys)\n\tzetas = np.sqrt(roots.real**2 / (np.abs(roots.imag) + roots.real**2))\n\n\tfor i in range(0,len(roots)):\n\t\t#if zetas[i] < 1:\n\t\treal_roots[j,i] = roots[i].real\n\t\timag_roots[j,i] = roots[i].imag\n\nprint(real_roots.shape)\nreal_kp30 = np.array([real_roots.flatten()])\nimag_kp30 = np.array([imag_roots.flatten()])\n\n#points = np.delete(points,np.where(points[:,1] == 0),axis=0)\n#pdb.set_trace()\n\nk_p = 5\n\nreal_roots = np.zeros([len(k_d_vals),4])\nimag_roots = np.zeros([len(k_d_vals),4])\n\nfor j in range(0,len(k_d_vals)):\n\tk_d = k_d_vals[j]\n\n\tA = np.array([[0,1,0,0],[-(k+k_p)/m_1,-(c+k_d)/m_1,k/m_1,c/m_1],[0,0,0,1],[k/m_2,c/m_2,-k/m_2,-c/m_2]])\n\tB = np.array([[0,1,0,0]]).T\n\tC = np.array([0,0,1,0])\n\tD = 0\n\n\tsys = control.ss(A,B,C,D)\n\n\troots = control.pole(sys)\n\tzetas = np.sqrt(roots.real**2 / (np.abs(roots.imag) + roots.real**2))\n\n\tfor i in range(0,len(roots)):\n\t\t#if zetas[i] < 1:\n\t\treal_roots[j,i] = roots[i].real\n\t\timag_roots[j,i] = roots[i].imag\n\nreal_kp5 = np.array([real_roots.flatten()])\nimag_kp5 = np.array([imag_roots.flatten()])\n\nk_p = 100\n\nreal_roots = np.zeros([len(k_d_vals),4])\nimag_roots = np.zeros([len(k_d_vals),4])\n\nfor j in range(0,len(k_d_vals)):\n\tk_d = k_d_vals[j]\n\n\tA = np.array([[0,1,0,0],[-(k+k_p)/m_1,-(c+k_d)/m_1,k/m_1,c/m_1],[0,0,0,1],[k/m_2,c/m_2,-k/m_2,-c/m_2]])\n\tB = np.array([[0,1,0,0]]).T\n\tC = np.array([0,0,1,0])\n\tD = 0\n\n\tsys = control.ss(A,B,C,D)\n\n\troots = control.pole(sys)\n\tzetas = np.sqrt(roots.real**2 / (np.abs(roots.imag) + roots.real**2))\n\n\tfor i in range(0,len(roots)):\n\t\t#if zetas[i] < 1:\n\t\treal_roots[j,i] = roots[i].real\n\t\timag_roots[j,i] = roots[i].imag\n\nreal_kp100 = np.array([real_roots.flatten()])\nimag_kp100 = np.array([imag_roots.flatten()])\n\nk_p = 60\n\nreal_roots = np.zeros([len(k_d_vals),4])\nimag_roots = np.zeros([len(k_d_vals),4])\n\nfor j in range(0,len(k_d_vals)):\n\tk_d = k_d_vals[j]\n\n\tchar_eq = [ 1,\n\t\t\t\t(c * m_1 + c * m_2 + k_d * m_2) / (m_1 * m_2),\n\t\t\t\t(c * k_d + k * m_1 + k * m_2 + k_p * m_2) / (m_1 * m_2),\n\t\t\t\t(k * k_d + c * k_p) / (m_1 * m_2),\n\t\t\t\t(k * k_p) / (m_1 * m_2)]\n\n\troots = np.roots(char_eq)\n\n\tA = np.array([[0,1,0,0],[-(k+k_p)/m_1,-(c+k_d)/m_1,k/m_1,c/m_1],[0,0,0,1],[k/m_2,c/m_2,-k/m_2,-c/m_2]])\n\tB = np.array([[0,1,0,0]]).T\n\tC = np.array([0,0,1,0])\n\tD = 0\n\n\tsys = control.ss(A,B,C,D)\n\n\t#roots = control.pole(sys)\n\n\tzetas = np.sqrt(roots.real**2 / (np.abs(roots.imag) + roots.real**2))\n\n\tfor i in range(0,len(roots)):\n\t\t#if zetas[i] < 1:\n\t\treal_roots[j,i] = roots[i].real\n\t\timag_roots[j,i] = roots[i].imag\n\nreal_kp60 = np.array([real_roots.flatten()])\nimag_kp60 = np.array([imag_roots.flatten()])\n\nreal_roots = np.concatenate((real_kp5,real_kp30,real_kp60,real_kp100)).T\nimag_roots = np.concatenate((imag_kp5,imag_kp30,imag_kp60,imag_kp100)).T\n\nreal_roots[np.where(imag_roots == 0)] = -20\n\ngenplt.compare_scatter(real_roots,imag_roots,[r'$K_P=5$',r'$K_P=30$',r'$K_P=60$',r'$K_P=100$'],name_append='ScatterTest',\n\t\t\t\t\t\tylabel='Imaginary',xlabel='Real',xmin = -10,xmax=0,\n\t\t\t\t\t\t\t\t\t\t\t folder=folder,showplot=False,grid=True,ncol=2,legend_loc='top',ymax=0.1) ","sub_path":"Code/Paper_Simulations/ACC_ConcurrentDesign_2017/2d_contour.py","file_name":"2d_contour.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"292664681","text":"from setuptools import setup, find_packages\nimport os\nimport sys\n\nfrom version import __version__\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nextra = {}\nrequirements = ['python-ldap'],\ntests_require = ['nose', 'Mock', 'coverage', 'unittest2', 'python-ldap']\n\nsetup(\n name = \"fakeldap\",\n version = __version__,\n #packages = find_packages('fakeldap'),\n #include_package_data=True,\n py_modules = ['fakeldap'],\n install_requires = requirements,\n\n tests_require=tests_require,\n setup_requires='nose',\n test_suite = \"nose.collector\",\n extras_require={'test': tests_require},\n\n author = \"Christo Buschek\",\n author_email = \"crito@30loops.net\",\n url = \"https://github.com/zulip/fakeldap\",\n description = \"An implementation of a LDAPObject to fake a ldap server in unittests.\",\n long_description = read('README.rst'),\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Testing',\n ],\n **extra\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"278304891","text":"import numpy as np\nfrom numpy import array\nfrom scipy import stats\nfrom numpy.testing import assert_almost_equal, assert_array_almost_equal\n\ndef create_dataset():\n # Create a 2x3 grid with hard coded float values for 11 years, from 2012 to 2022\n fp_2022 = np.array([[1.2, 1.3, 1.4],\n [2.2, 2.3, 2.4]])\n\n fp_2021 = np.array([[1.1, 1.2, 1.3],\n [2.1, 2.2, 2.3]])\n\n fp_2020 = np.array([[1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2]])\n\n fp_2019 = np.array([[0.9, 1.0, 1.1],\n [1.9, 2.0, 2.1]])\n\n fp_2018 = np.array([[0.8, 0.9, 1.0],\n [1.8, 1.9, 2.0]])\n\n fp_2017 = np.array([[0.7, 0.8, 0.9],\n [1.7, 1.8, 1.9]])\n\n fp_2016 = np.array([[0.6, 0.7, 0.8],\n [1.6, 1.7, 1.8]])\n\n fp_2015 = np.array([[0.5, 0.6, 0.7],\n [1.5, 1.6, 1.7]])\n\n fp_2014 = np.array([[0.4, 0.5, 0.6],\n [1.4, 1.5, 1.6]])\n\n fp_2013 = np.array([[0.3, 0.4, 0.5],\n [1.3, 1.4, 1.5]])\n\n fp_2012 = np.array([[0.2, 0.3, 0.4],\n [1.2, 1.3, 1.4]])\n \n # Stack the historical data together along a new time axis\n historical_data = np.stack([fp_2012, fp_2013, fp_2014, fp_2015, fp_2016, fp_2017, fp_2018, fp_2019,\n fp_2020, fp_2021, fp_2022], axis=0)\n\n return historical_data\n\ndef create_region_mask():\n # Create a 2x3 mask grid with three cells masked (represented by 1)\n region_mask = np.array([[1, 0, 0]\n , [1, 1, 0]])\n return region_mask\n\ndef create_weights():\n # create a 2x3 grid with hard coded float values to be used as normalized weights for each of the cells\n weights = np.array([[0.1, 0.2, 0.3], [0.2, 0.1, 0.1]])\n return weights\n\n# this is a utility function for testing.\ndef assert_list_dict_almost_equal(list1, list2, decimal=7):\n # check that the lists have the same length\n assert len(list1) == len(list2)\n # iterate over the lists\n for dict1, dict2 in zip(list1, list2):\n # check that the dictionaries have the same keys\n assert dict1.keys() == dict2.keys()\n # iterate over the keys\n for key in dict1.keys():\n # check that the values are numpy arrays\n assert isinstance(dict1[key], np.ndarray)\n assert isinstance(dict2[key], np.ndarray)\n # compare the arrays element-wise with the given precision\n assert_almost_equal(dict1[key], dict2[key], decimal=decimal)\n\n\ndef get_historical_mean_and_percentiles_and_proportions(historical_data, weights, region_mask=None):\n results = []\n\n if region_mask is not None:\n # Apply the region mask to the data\n #data = np.multiply(data, region_mask)\n historical_data = np.where(region_mask == 0, np.nan, historical_data) # Replace masked regions with np.nan instead of with 0\n # Adjust the weights according to the region_mask\n weights = np.multiply(weights, region_mask)\n\n # Calculate the weighted mean for each year\n weighted_mean = np.nansum(historical_data * weights, axis=(1, 2)) / np.nansum(weights, axis=(0, 1))\n\n # Calculate the percentiles for each grid cell over time\n percentiles = np.nanpercentile(historical_data, [0, 5, 20, 80, 95], axis=0, method='linear')# linear interpolation used for percentiles\n\n # Extract percentile values into variables\n zerothPct, fifthPct, twentiethPct, eightiethPct, ninetyfifthPct = percentiles\n\n # Calculate the proportions of the region in each percentile range\n total_cells = np.count_nonzero(~np.isnan(historical_data), axis=0) # Count non-NaN cells\n props = np.zeros((3,) + historical_data.shape[1:])\n for i in range(historical_data.shape[1]):\n for j in range(historical_data.shape[2]):\n props[0, i, j] = np.nansum((historical_data[:, i, j] >= zerothPct[i, j]) & (historical_data[:, i, j] <= twentiethPct[i, j])) / total_cells[i, j]\n props[1, i, j] = np.nansum((historical_data[:, i, j] > twentiethPct[i, j]) & (historical_data[:, i, j] <= eightiethPct[i, j])) / total_cells[i, j]\n props[2, i, j] = np.nansum(historical_data[:, i, j] > eightiethPct[i, j]) / total_cells[i, j]\n\n results.append({\"mean\": weighted_mean, \n \"5th_pct\": fifthPct, \n \"95th_pct\": ninetyfifthPct, \n \"prop_0_20\": props[0], \n \"prop_20_80\": props[1], \n \"prop_80_up\": props[2]})\n\n return results\n\ndef process_historical_data(get_historical_mean_and_percentiles_and_proportions, historical_data, region_mask, weights):\n # Calculate the percentiles and proportions for the historical data without the region mask\n results = get_historical_mean_and_percentiles_and_proportions(historical_data, weights)\n expected_results = [{'mean': array([0.71, 0.81, 0.91, 1.01, 1.11, 1.21, 1.31, 1.41, 1.51, 1.61, 1.71]), \n '5th_pct': array([[0.25, 0.35, 0.45], \n [1.25, 1.35, 1.45]]), \n '95th_pct': array([[1.15, 1.25, 1.35], \n [2.15, 2.25, 2.35]]), \n 'prop_0_20': array([[0.27272727, 0.27272727, 0.27272727], \n [0.27272727, 0.27272727, 0.27272727]]), \n 'prop_20_80': array([[0.54545455, 0.54545455, 0.54545455], \n [0.54545455, 0.54545455, 0.54545455]]), \n 'prop_80_up': array([[0.18181818, 0.18181818, 0.18181818], \n [0.18181818, 0.18181818, 0.18181818]])\n }\n ]\n\n \n assert_list_dict_almost_equal(expected_results, results, decimal=6)\n \n # calcuate the percentiles for the first grid cell values over the years from 2012 to 2022\n first_grd_cell = [1.2,1.1,1.0,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2]\n percentiles = np.nanpercentile(first_grd_cell, [0, 5, 20, 80, 95], axis=0, method='linear')#linear interpolation used for percentiles\n\n assert results[0]['5th_pct'][0,0], percentiles[1]# equal to .25\n assert results[0]['95th_pct'][0,0], percentiles[4]# equal to 1.15\n\n print(f\"Yearly_historical_results_without_mask {results}\")\n \n # Calculate the percentiles and proportions for the historical data with the region mask\n results = get_historical_mean_and_percentiles_and_proportions(historical_data, weights, region_mask)\n expected_results = [{'mean': array([0.975, 1.075, 1.175, 1.275, 1.375, 1.475, 1.575, 1.675, 1.775, 1.875, 1.975]), \n '5th_pct': array([[0.25, np.nan, np.nan],\n [1.25, 1.35, np.nan]]), \n '95th_pct': array([[1.15, np.nan, np.nan],\n [2.15, 2.25, np.nan]]), \n 'prop_0_20': array([[0.27272727, np.nan, np.nan],\n [0.27272727, 0.27272727, np.nan]]), \n 'prop_20_80': array([[0.54545455, np.nan, np.nan], \n [0.54545455, 0.54545455, np.nan]]), \n 'prop_80_up': array([[0.18181818, np.nan, np.nan], \n [0.18181818, 0.18181818, np.nan]])\n }\n ]\n\n\n assert_list_dict_almost_equal(expected_results, results, decimal=6)\n\n print(f\"Yearly_historical_results_with_mask {results}\")\n \n\nhistorical_data = create_dataset()\nregion_mask = create_region_mask()\nweights = create_weights()\n\nprocess_historical_data(get_historical_mean_and_percentiles_and_proportions, historical_data, region_mask, weights)","sub_path":"dews_analysis/ABARES_expectations.py","file_name":"ABARES_expectations.py","file_ext":"py","file_size_in_byte":7929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"473563813","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 24 17:23:22 2021\r\n\r\n@author: zengg\r\n\"\"\"\r\n\r\n\r\nimport os\r\nprint(os.getcwd())#显示当前路径\r\n\r\nos.chdir('D:/ZG/tools/python')#更改路径\r\n\r\n\r\n# 导入柱状图-Bar\r\nfrom pyecharts.charts import Bar\r\n\r\nbar = Bar()\r\nbar.add_xaxis([\"衬衫\", \"羊毛衫\", \"雪纺衫\", \"裤子\", \"高跟鞋\", \"袜子\"])\r\nbar.add_yaxis(\"商家A\", [5, 20, 36, 10, 75, 90])\r\n# render 会生成本地 HTML 文件,默认会在当前目录生成 render.html 文件\r\n# 也可以传入路径参数,如 bar.render(\"mycharts.html\")\r\nbar.render()\r\n\r\n\r\nbar = (\r\n Bar()\r\n .add_xaxis([\"衬衫\", \"羊毛衫\", \"雪纺衫\", \"裤子\", \"��跟鞋\", \"袜子\"])\r\n .add_yaxis(\"商家A\", [5, 20, 36, 10, 75, 90])\r\n)\r\nbar.render()\r\n\r\nfrom pyecharts import options as opts\r\n\r\n# V1 版本开始支持链式调用\r\n# 你所看到的格式其实是 `black` 格式化以后的效果\r\n# 可以执行 `pip install black` 下载使用\r\nbar = (\r\n Bar()\r\n .add_xaxis([\"衬衫\", \"羊毛衫\", \"雪纺衫\", \"裤子\", \"高跟鞋\", \"袜子\"])\r\n .add_yaxis(\"商家A\", [5, 20, 36, 10, 75, 90])\r\n .set_global_opts(title_opts=opts.TitleOpts(title=\"主标题\", subtitle=\"副标题\"))\r\n # 或者直接使用字典参数\r\n # .set_global_opts(title_opts={\"text\": \"主标题\", \"subtext\": \"副标题\"})\r\n)\r\nbar.render()\r\n\r\n# 定制主题\r\nfrom pyecharts.globals import ThemeType\r\n\r\nbar = (\r\n Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))\r\n .add_xaxis([\"衬衫\", \"羊毛衫\", \"雪纺衫\", \"裤子\", \"高跟鞋\", \"袜子\"])\r\n .add_yaxis(\"商家A\", [5, 20, 36, 10, 75, 90])\r\n .add_yaxis(\"商家B\", [15, 6, 45, 20, 35, 66])\r\n .set_global_opts(title_opts=opts.TitleOpts(title=\"主标题\", subtitle=\"副标题\"))\r\n)\r\nbar.render()","sub_path":"pyecharts/pyecharts.py","file_name":"pyecharts.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"572941490","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/mburger/Work/Research/NeutralCloudModel/nexoclom/build/lib/nexoclom/bouncepackets.py\n# Compiled at: 2018-12-18 15:28:51\n# Size of source mod 2**32: 2380 bytes\nimport numpy as np\nfrom .input_classes import AngularDist\nfrom .source_distribution import angular_distribution\n\ndef bouncepackets(self, t1, x1, v1, f1, hhh):\n x0 = x1[(0, hhh)]\n y0 = x1[(1, hhh)]\n z0 = x1[(2, hhh)]\n r0 = np.sqrt(x0 ** 2 + y0 ** 2 + z0 ** 2)\n vx0 = v1[(0, hhh)]\n vy0 = v1[(1, hhh)]\n vz0 = v1[(2, hhh)]\n a = vx0 ** 2 + vy0 ** 2 + vz0 ** 2\n b = 2 * (x0 * vx0 + y0 * vy0 + z0 * vz0)\n c = x0 ** 2 + y0 ** 2 + z0 ** 2 - 1.0\n dd = b ** 2 - 4 * a * c\n assert np.all(dd >= 0)\n t0 = (-b - np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)\n t1 = (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)\n t = (t0 <= 0) * t0 + (t1 < 0) * t1\n x2 = x0 + vx0 * t\n y2 = y0 + vy0 * t\n z2 = z0 + vz0 * t\n assert np.all(np.isfinite(x2))\n assert np.all(np.isfinite(y2))\n assert np.all(np.isfinite(z2))\n lonhit = (np.arctan2(x2, -y2) + 2 * np.pi) % (2 * np.pi)\n lathit = np.arcsin(z2)\n x1[(0, hhh)] = x2\n x1[(1, hhh)] = y2\n x1[(2, hhh)] = z2\n PE = 2 * self.GM * (1.0 / r0 - 1)\n vv02 = a + PE\n vv02[vv02 < 0] = 0.0\n assert np.all(np.isfinite(vv02))\n if self.inputs.sticking_info.emitfn.lower() == 'maxwellian':\n assert 0, 'Not set up yet'\n if self.inputs.sticking_info.Tsurf == 0:\n surftemp = SurfaceTemperatue(self.inputs.geometry, lonhit, lathit)\n else:\n pass\n else:\n if self.inputs.sticking_info.emitfn.lower() == 'elastic scattering':\n vv2 = np.sqrt(vv02)\n else:\n if not 0:\n raise AssertionError('Emit function not set up yet')\n else:\n angdist = AngularDist({'type':'costheta', 'altitude':f\"0,{np.pi / 2}\", \n 'azimuth':f\"0,{2 * np.pi}\"}, None)\n VV = angular_distribution(angdist, x1[:, hhh], vv2)\n v1[:, hhh] = VV\n if self.inputs.sticking_info.stickcoef > 0:\n f1[hhh] *= 1 - self.inputs.sticking_info.stickcoef\n else:\n assert 0","sub_path":"pycfiles/nexoclom-2.0.18-py3.7/bouncepackets.cpython-37.py","file_name":"bouncepackets.cpython-37.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"65153362","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\n\n\ndef output_to_csv(data):\n data.to_csv('./pandas_no_ki.csv')\n\n\ndef main():\n items = ['name', 'phone', 'address']\n df = pd.DataFrame([], columns=items)\n print(df)\n\n sample = [['taro', '090-xxxx-xxxx', 'Tokyo'], ['jiro', '090-xxxx-xxxx', 'Okinawa'], ['saburo', '090-xxxx-xxxx', 'Hokkaido']]\n\n for i in range(len(sample)):\n print(sample[i])\n series = pd.Series(sample[i], index = df.columns)\n print(series)\n df = df.append(series, ignore_index = True)\n\n print(df)\n\n output_to_csv(df)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n#ref: https://qiita.com/567000/items/d8a29bb7404f68d90dd4\n\n\n","sub_path":"python/pandas_no_ki.py","file_name":"pandas_no_ki.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"399501506","text":"#!/usr/bin/env python3\n\"\"\"\n@author: Magnus Erik Hvass Pedersen: https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/02_Convolutional_Neural_Network.ipynb\nModified into a Python script instead of a Jupyter Notebook and uses Tensorflow's MNIST data set.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\ntf.reset_default_graph()\nimport numpy as np\nimport time\nfrom datetime import timedelta\nimport math\n#from PIL import Image, ImageFilter\n\nfrom tensorflow.examples.tutorials.mnist import input_data\ntf.set_random_seed(1234)\nnp.random.seed(1234)\n\nimport pdb #Equivalent of keyboard in MATLAB, just add \"pdb.set_trace()\"\n\n###############################################################################\n# Helper Functions #\n###############################################################################\n#####################\n# Plotting Images #\n#####################\ndef plot_images(images, cls_true, cls_pred=None):\n assert len(images) == len(cls_true) == 9\n \n # Create figure with 3x3 sub-plots.\n fig, axes = plt.subplots(3, 3)\n fig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n for i, ax in enumerate(axes.flat):\n # Plot image.\n ax.imshow(images[i].reshape(img_shape), cmap='binary')\n\n # Show true and predicted classes.\n if cls_pred is None:\n xlabel = \"True: {0}\".format(cls_true[i])\n else:\n xlabel = \"True: {0}, Pred: {1}\".format(cls_true[i], cls_pred[i])\n\n # Show the classes as the label on the x-axis.\n ax.set_xlabel(xlabel)\n \n # Remove ticks from the plot.\n ax.set_xticks([])\n ax.set_yticks([])\n \n # Ensure the plot is shown correctly with multiple plots\n # in a single Notebook cell.\n plt.show()\n\n############################\n# New Weights and Biases #\n############################\ndef new_weights(layer_num, shape):\n #return tf.Variable(tf.random_normal(shape, stddev=0.05))\n return tf.get_variable(name = 'W' + layer_num, shape = shape, initializer = tf.random_normal_initializer(stddev=0.05))\ndef new_biases(layer_num, length):\n #return tf.Variable(tf.constant(0.0, shape=[length]))\n return tf.get_variable(name = 'b' + layer_num, shape = [length], initializer = tf.constant_initializer(0))\n\n########################################\n# Convolutional Neural Network Layer #\n########################################\ndef new_conv_layer(layer_num, # For name of tf.get_variable\n input, # The previous layer.\n num_input_channels, # Num. channels in prev. layer.\n filter_size, # Width and height of each filter.\n num_filters, # Number of filters.\n use_pooling=True, # Use 2x2 max-pooling.\n use_activation=True): # Use activation.\n\n # Shape of the filter-weights for the convolution.\n # This format is determined by the TensorFlow API.\n shape = [filter_size, filter_size, num_input_channels, num_filters]\n\n # Create new weights aka. filters with the given shape.\n weights = new_weights(layer_num=layer_num, shape=shape)\n \n # Create new biases, one for each filter.\n biases = new_biases(layer_num=layer_num ,length=num_filters)\n\n # Create the TensorFlow operation for convolution.\n # Note the strides are set to 1 in all dimensions.\n # The first and last stride must always be 1,\n # because the first is for the image-number and\n # the last is for the input-channel.\n # But e.g. strides=[1, 2, 2, 1] would mean that the filter\n # is moved 2 pixels across the x- and y-axis of the image.\n # The padding is set to 'SAME' which means the input image\n # is padded with zeroes so the size of the output is the same.\n layer = tf.nn.conv2d(input=input,\n filter=weights,\n strides=[1, 1, 1, 1],\n padding='SAME')\n\n # Add the biases to the results of the convolution.\n # A bias-value is added to each filter-channel.\n layer += biases\n\n # Use pooling to down-sample the image resolution?\n if use_pooling:\n # This is 2x2 max-pooling, which means that we\n # consider 2x2 windows and select the largest value\n # in each window. Then we move 2 pixels to the next window.\n layer = tf.nn.max_pool(value=layer,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # Rectified Linear Unit (ReLU).\n # It calculates max(x, 0) for each input pixel x.\n # This adds some non-linearity to the formula and allows us\n # to learn more complicated functions.\n if use_activation:\n layer = tf.nn.relu(layer) \n\n # Note that ReLU is normally executed before the pooling,\n # but since relu(max_pool(x)) == max_pool(relu(x)) we can\n # save 75% of the relu-operations by max-pooling first.\n\n # We return both the resulting layer and the filter-weights\n # because we will plot the weights later.\n return layer, weights\n\n#######################################################\n# Flatten Layer for Input to Fully-Connected Layers #\n#######################################################\ndef flatten_layer(layer):\n # Get the shape of the input layer.\n layer_shape = layer.get_shape()\n\n # The shape of the input layer is assumed to be:\n # layer_shape == [num_images, img_height, img_width, num_channels]\n\n # The number of features is: img_height * img_width * num_channels\n # We can use a function from TensorFlow to calculate this.\n num_features = layer_shape[1:4].num_elements()\n \n # Reshape the layer to [num_images, num_features].\n # Note that we just set the size of the second dimension\n # to num_features and the size of the first dimension to -1\n # which means the size in that dimension is calculated\n # so the total size of the tensor is unchanged from the reshaping.\n layer_flat = tf.reshape(layer, [-1, num_features])\n\n # The shape of the flattened layer is now:\n # [num_images, img_height * img_width * num_channels]\n\n # Return both the flattened layer and the number of features.\n return layer_flat, num_features\n\n############################\n# Fully-Connected Layer #\n############################\ndef new_fc_layer(layer_num, # For name of tf.get_variable\n input, # The previous layer.\n num_inputs, # Num. inputs from prev. layer.\n num_outputs, # Num. outputs.\n use_relu=True): # Use Rectified Linear Unit (ReLU)?\n\n # Create new weights and biases.\n weights = new_weights(layer_num, shape=[num_inputs, num_outputs])\n biases = new_biases(layer_num, length=num_outputs)\n\n # Calculate the layer as the matrix multiplication of\n # the input and weights, and then add the bias-values.\n layer = tf.matmul(input, weights) + biases\n\n # Use ReLU?\n if use_relu:\n layer = tf.nn.relu(layer)\n\n return layer, weights\n\n###########################\n# Train Neural Network #\n###########################\ndef optimize(num_iterations,train_batch_size):\n global total_iterations # Ensure we update the global variable rather than a local copy.\n \n start_time = time.time() # Start-time used for printing time-usage below.\n \n for i in range(total_iterations,\n total_iterations + num_iterations):\n\n # Get a batch of training examples.\n # x_batch now holds a batch of images and\n # y_true_batch are the true labels for those images.\n x_batch, y_true_batch = data.train.next_batch(batch_size=train_batch_size)\n\n # Put the batch into a dict with the proper names\n # for placeholder variables in the TensorFlow graph.\n feed_dict_train = {x: x_batch,\n y_true: y_true_batch}\n\n # Run the optimizer using this batch of training data.\n # TensorFlow assigns the variables in feed_dict_train\n # to the placeholder variables and then runs the optimizer.\n session.run(optimizer, feed_dict=feed_dict_train)\n\n # Print status every 100 iterations.\n if i % 55 == 0:\n # Calculate the accuracy on the training-set.\n acc = session.run(accuracy, feed_dict=feed_dict_train)\n\n # Message for printing.\n msg = \"Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}\"\n\n # Print it.\n print(msg.format(i + 1, acc))\n \n end_time = time.time() # Ending time for 100 batches\n time_dif = end_time - start_time # Difference between start and end-times. \n print(\"Time usage: \" + str(timedelta(seconds=int(round(time_dif))))) # Print the time-usage.\n start_time = time.time() # Start-time used for printing time-usage below.\n\n total_iterations += num_iterations # Update the total number of iterations performed.\n\n\n################################\n# Print Accuracy on Test Set #\n################################\ndef print_test_accuracy():\n\n num_test = len(data.test.images) # Number of images in the test-set.\n\n # Allocate an array for the predicted classes which\n # will be calculated in batches and filled into this array.\n cls_pred = np.zeros(shape=num_test, dtype=np.int)\n\n # Now calculate the predicted classes for the batches.\n # We will just iterate through all the batches.\n\n i = 0 # The starting index for the next batch is denoted i.\n\n while i < num_test:\n j = min(i + test_batch_size, num_test) # The ending index for the next batch is denoted j.\n images = data.test.images[i:j, :] # Get the images from the test-set between index i and j.\n labels = data.test.labels[i:j, :] # Get the associated labels.\n feed_dict = {x: images, y_true: labels} # Create a feed-dict with these images and labels.\n cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict) # Calculate the predicted class using TensorFlow.\n\n # Set the start-index for the next batch to the\n # end-index of the current batch.\n i = j\n\n cls_true = data.test.cls # Convenience variable for the true class-numbers of the test-set.\n correct = (cls_true == cls_pred) # Create a boolean array whether each image is correctly classified.\n\n # Calculate the number of correctly classified images.\n # When summing a boolean array, False means 0 and True means 1.\n correct_sum = correct.sum()\n\n # Classification accuracy is the number of correctly classified\n # images divided by the total number of images in the test-set.\n acc = float(correct_sum) / num_test\n\n # Print the accuracy.\n msg = \"Accuracy on Test-Set: {0:.1%} ({1} / {2})\"\n print(msg.format(acc, correct_sum, num_test))\n\n################################\n# PLot Convolutional Weights #\n################################\ndef plot_conv_weights(weights, input_channel=0):\n # Assume weights are TensorFlow ops for 4-dim variables\n # e.g. weights_conv1 or weights_conv2.\n \n # Retrieve the values of the weight-variables from TensorFlow.\n # A feed-dict is not necessary because nothing is calculated.\n w = session.run(weights)\n\n # Get the lowest and highest values for the weights.\n # This is used to correct the colour intensity across\n # the images so they can be compared with each other.\n w_min = np.min(w)\n w_max = np.max(w)\n\n # Number of filters used in the conv. layer.\n num_filters = w.shape[3]\n\n # Number of grids to plot.\n # Rounded-up, square-root of the number of filters.\n num_grids = math.ceil(math.sqrt(num_filters))\n \n # Create figure with a grid of sub-plots.\n fig, axes = plt.subplots(num_grids, num_grids)\n\n # Plot all the filter-weights.\n for i, ax in enumerate(axes.flat):\n # Only plot the valid filter-weights.\n if i height: # check which dimension is bigger\n # Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\n if (nheight == 0): # rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position\n newImage.paste(img, (4, wtop)) # paste resized image on white canvas\n else:\n # Height is bigger. Heigth becomes 20 pixels.\n nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\n if (nwidth == 0): # rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\n newImage.paste(img, (wleft, 4)) # paste resized image on white canvas\n\n tv = list(newImage.getdata()) # get pixel values\n\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n #tva = [x * 1.0 / 255.0 for x in tv] # Use this for white background\n tva = [(255 - x) * 1.0 / 255.0 for x in tv] # Use this for black background\n return tva\n\n\n###############################################################################\n# Driver #\n###############################################################################\n##################################\n# Display Data Set Information #\n##################################\ndata = input_data.read_data_sets('data/MNIST/', one_hot=True)\nprint(\"Size of:\")\nprint(\"- Training-set:\\t\\t{}\".format(len(data.train.labels)))\nprint(\"- Test-set:\\t\\t{}\".format(len(data.test.labels)))\nprint(\"- Validation-set:\\t{}\".format(len(data.validation.labels)))\ndata.test.cls = np.argmax(data.test.labels,axis=1)\n\n#=== Labels ===#\nimg_size = 28 # The number of pixels in each dimension of an image.\nimg_size_flat = img_size*img_size # The images are stored in one-dimensional arrays of this length.\nimg_shape = (img_size,img_size) # Tuple with height and width of images used to reshape arrays.\nnum_classes = 10 # Number of classes, one class for each of 10 digits.\nnum_channels = 1 # Number of colour channels for the images: 1 channel for gray-scale.\n\n#=== Plot a Few Images ===#\nimages = data.test.images[0:9] # Get the first images from the test-set.\ncls_true = data.test.cls[0:9] # Get the true classes for those images.\nprint(\"\\nNine Digits from Test Set with Labels:\")\nplot_images(images=images, cls_true=cls_true) # Plot the images and labels using our helper-function above.\n\n##########################\n# Network Architecture #\n##########################\n#=== Convolutional Layer 1 ===#\nfilter_size1 = 1 # Convolution filters are 5 x 5 pixels.\nnum_filters1 = 64 # There are 16 of these filters.\n\n#=== Convolutional Layer 2 ===#\nfilter_size2 = 3 # Convolution filters are 5 x 5 pixels.\nnum_filters2 = 64 # There are 36 of these filters.\n\n#=== Convolutional Layer 2 ===#\nfilter_size3 = 1 # Convolution filters are 5 x 5 pixels.\nnum_filters3 = 1 # There are 36 of these filters.\n\n#=== Fully-connected layer ===#\nfc_size = 128 # Number of neurons in fully-connected layer.\n\n#####################\n# Construct Model #\n#####################\nx = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')\nx_image = tf.reshape(x, [-1, img_size, img_size, num_channels])\ny_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')\ny_true_cls = tf.argmax(y_true, axis=1)\n\nlayer_conv1, weights_conv1 = \\\n new_conv_layer(layer_num = '1',\n input=x_image,\n num_input_channels=num_channels,\n filter_size=filter_size1,\n num_filters=num_filters1,\n use_pooling=False,\n use_activation = False)\n \nlayer_conv2, weights_conv2 = \\\n new_conv_layer(layer_num = '2',\n input=layer_conv1,\n num_input_channels=num_filters1,\n filter_size=filter_size2,\n num_filters=num_filters2,\n use_pooling=False,\n use_activation = True)\n \nlayer_conv3, weights_conv3 = \\\n new_conv_layer(layer_num = '3',\n input=layer_conv2,\n num_input_channels=num_filters2,\n filter_size=filter_size3,\n num_filters=num_filters3,\n use_pooling=False,\n use_activation = False)\n \nlayer_flat, num_features = flatten_layer(layer_conv3)\n\nlayer_fc, weights_class = new_fc_layer(layer_num = '4',\n input=layer_flat,\n num_inputs=num_features,\n num_outputs=num_classes,\n use_relu=False)\n\ny_pred = tf.nn.softmax(layer_fc)\ny_pred_cls = tf.argmax(y_pred, axis=1)\n\n#################\n# Train Model #\n#################\n#=== Define Optimizer Properties ===#\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc,labels=y_true)\ncost = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)\ncorrect_prediction = tf.equal(y_pred_cls, y_true_cls)\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n#=== Define Training Parameters ===#\nnum_iterations = 1000\ntrain_batch_size = 1000\ntotal_iterations = 0 # Counter for total number of iterations performed so far.\ntest_batch_size = 1000\n\n#=== Train Network ===#\nsession = tf.Session()\nsession.run(tf.global_variables_initializer())\n\nweights_1 = session.run(weights_conv1)\nweights_2 = session.run(weights_conv2)\nweights_3 = session.run(weights_conv3)\nweights_4 = session.run(weights_class)\n\noptimize(num_iterations,train_batch_size)\nprint_test_accuracy()\n\n################################################\n# Feed Forward and Prediction for Input Data #\n################################################\nimage = data.test.images[1256]\ndigit = imageprepare('./new_images/five_1.png')#file path here\ndigit = np.array(digit)\nimage = digit.reshape((784,))\nplot_image(image)\n\nplot_conv_weights(weights=weights_conv1)\nplot_conv_layer(layer=layer_conv1, image=image)\n\nplot_conv_weights(weights=weights_conv2, input_channel=0)\nplot_conv_layer(layer=layer_conv2, image=image)\n\nprint(session.run(tf.argmax(layer_fc, 1), feed_dict={x:image.reshape(1,784)}))\n\n\n","sub_path":"Codes_TF/Other_Codes/Hvass_CNN.py","file_name":"Hvass_CNN.py","file_ext":"py","file_size_in_byte":21658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15191918","text":"# -*- coding: utf-8 -*-\n\nimport random\n\nfrom flask import Flask, session, render_template, request, flash\n\n\napp = Flask(__name__)\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', title=u'숫자야구 : 인덱스')\n\n\n@app.route('/answer', methods=['GET','POST'])\ndef answer():\n if not session.get('answer'):\n session['answer'] = ''.join(map(str,random.sample(range(0,10),4)))\n session['chance'] = 10\n session['log'] = ''\n flash(u'새 문제가 출제되었습니다.')\n else:\n if 'answer' in request.form.keys():\n input = list(request.form['answer'])\n ans = list(session['answer'])\n if session['chance'] > 1:\n if input == ans:\n flash(u'4 strike. 정답은 ' + request.form['answer'] + u'입니다.')\n session['answer'] = None\n return render_template('alert.html', title=u'숫자야구 : 정답')\n elif len(input) != 4:\n flash(u'4자리 숫자로만 시도해주세요.')\n else:\n session['chance'] -= 1\n \n strike = ball = 0\n for i, c in enumerate(input):\n if c == ans[i]:\n strike += 1\n elif c in ans:\n ball += 1\n\n log = session['log'].split('|')\n for x in log:\n flash(x)\n\n flash(u'%s - %d strike, %d ball, 기회가 %d번 남았습니다.' % (request.form['answer'], strike, ball, session['chance']))\n\n log.append(u'%s - %d strike, %d ball' % (request.form['answer'], strike, ball))\n session['log'] = '|'.join(log)\n else:\n flash(u'기회를 모두 소진하셨습니다. 정답은 ' + session['answer'] + u'입니다.')\n session['answer'] = None\n return render_template('alert.html', title=u'숫자야구 : 패배')\n else:\n flash(u'값을 입력해주세요.')\n return render_template('answer.html', title=u'숫자야구 : 도전')","sub_path":"Python/baseball/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"81094638","text":"# -*- coding: utf-8 -*-\nfrom django.views.generic import CreateView, ListView, DeleteView\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMessage\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.http import HttpResponseRedirect\nfrom braces.views import LoginRequiredMixin, StaffuserRequiredMixin\n\nfrom .models import NotasTecnicas\nfrom .forms import NotasTecnicasCreateForm, NotasTecnicasSearchForm\nfrom ..edificios.models import Edificios\nfrom ..perfiles.models import Perfiles\n\n\nclass NotasTecnicasCreateView(LoginRequiredMixin, StaffuserRequiredMixin, CreateView):\n \"\"\"\n Vista para la Creación de una Nota Técnica\n \"\"\"\n model = NotasTecnicas\n form_class = NotasTecnicasCreateForm\n raise_exception = True\n\n def get_success_url(self):\n messages.success(self.request, \"Se ha creado una nueva Nota Técnica.\")\n return reverse('notas-tecnicas:create')\n\n def form_valid(self, form):\n # -- enviar por mail, solo si desde\n # -- el form se indicar True\n if form.cleaned_data['enviado']:\n if self._enviar_aviso_por_email(form):\n form.instance.mail_recibido = True\n\n return super(NotasTecnicasCreateView, self).form_valid(form)\n\n def form_invalid(self, form):\n return super(NotasTecnicasCreateView, self).form_invalid(form)\n\n def _enviar_aviso_por_email(self, form):\n try:\n # -- obtengo Id de usuario\n if self.request.POST.get('edificio') is not None:\n user = Edificios.usuario_por_edificio(self.request.POST.get('edificio'))\n\n # -- obtengo direccion de mail\n email = Perfiles.obtener_mail_por_usuario(user)\n\n # -- tiene email cargado?\n if len(email) > 0:\n subject = \"[STIB] Nota Técnica - \"\n if self.request.POST.get('edificio') is not None:\n subject += str(form.cleaned_data['edificio'])\n\n ctx = {'link_vista': 'http://google.com'}\n\n return _send_email(email, subject, ctx)\n else:\n return False\n except:\n messages.error(self.request, \"Se produjo un error en el envío del email.\")\n return False\n\n\nclass NotasTecnicasListView(LoginRequiredMixin, StaffuserRequiredMixin, ListView):\n \"\"\" Listado de notas técnicas \"\"\"\n model = NotasTecnicas\n context_object_name = 'notas_tecnicas'\n raise_exception = True\n\n def get_context_data(self, **kwargs):\n ctx = super(NotasTecnicasListView, self).get_context_data(**kwargs)\n ctx['search_form'] = NotasTecnicasSearchForm\n return ctx\n\n def get_queryset(self):\n qs = super(NotasTecnicasListView, self).get_queryset()\n\n # -- busqueda x titulo? --\n titulo = self.request.GET.get('titulo', None)\n if titulo:\n qs = qs.filter(titulo__icontains=titulo)\n # -- busqueda x descripcion? --\n descripcion = self.request.GET.get('descripcion', None)\n if descripcion:\n qs = qs.filter(descripcion__icontains=descripcion)\n # -- leido? --\n leido = self.request.GET.get('leido', None)\n if leido:\n qs = qs.filter(leido=True if leido == \"1\" else False)\n # -- Mail enviado? --\n mail_enviado = self.request.GET.get('mail', None)\n if mail_enviado:\n qs = qs.filter(enviado=True if mail_enviado == \"1\" else False)\n # -- Mail recibido? --\n mail_recibido = self.request.GET.get('mail_recibido', None)\n if mail_recibido:\n qs = qs.filter(mail_recibido=True if mail_recibido == \"1\" else False)\n # -- estado? --\n estado = self.request.GET.get('estado')\n if estado:\n qs = qs.filter(estado=estado)\n # -- fechas desde/hasta --\n fecha_desde = self.request.GET.get('fecha_desde')\n fecha_hasta = self.request.GET.get('fecha_hasta')\n if fecha_desde and fecha_hasta:\n fecha_desde = fecha_desde.split('/')\n fecha_hasta = fecha_hasta.split('/')\n fecha_desde = fecha_desde[2] + \"-\" + fecha_desde[1] + \"-\" + fecha_desde[0]\n fecha_hasta = fecha_hasta[2] + \"-\" + fecha_hasta[1] + \"-\" + fecha_hasta[0]\n qs = qs.filter(creado__gte=fecha_desde, creado__lte=fecha_hasta)\n # -- edificio? --\n edificio = self.request.GET.get('edificio', None)\n if edificio:\n qs = qs.filter(edificio=edificio)\n\n return qs\n\n\nclass NotasTecnicasDeleteView(LoginRequiredMixin, StaffuserRequiredMixin, DeleteView):\n \"\"\" Borrado de Nota Técnica \"\"\"\n model = NotasTecnicas\n raise_exception = True\n\n def get_success_url(self):\n messages.success(self.request, 'La nota técnica fue eliminada.')\n return reverse('notas-tecnicas:list')\n\n\ndef _send_email(email_to, subject, context, *args):\n \"\"\"\n Envío de email\n \"\"\"\n try:\n body = render_to_string(\"emails/email_notas_tecnicas.html\", context)\n msg = EmailMessage(subject=subject,\n body=body,\n from_email='no-reply@stibadministraciones.com',\n to=(email_to, ))\n msg.content_subtype = 'html'\n msg.send()\n return True\n except:\n return False\n\n\n@staff_member_required\ndef reenviar_email(request, pk):\n try:\n # -- obtener informacion de la nota técnica\n nt = NotasTecnicas.objects.get(pk=pk)\n # -- obtener el usuario(administracion) dueño del edificio\n user = Edificios.usuario_por_edificio(nt.edificio.id)\n # -- obtengo direccion de mail\n email = Perfiles.obtener_mail_por_usuario(user)\n\n if len(email) > 0:\n subject = \"[STIB] Nota Técnica - \" + str(nt.edificio)\n ctx = {'link_vista': 'http://google.com'}\n\n if _send_email(email, subject, ctx):\n nt.mail_recibido = True\n nt.enviado = True\n nt.save()\n\n messages.success(request, \"Se ha reenviado el email correctamente.\")\n\n else:\n messages.error(request, 'Error al reenviar el email, verifique el email de la administración'\n ' encargada del edificio.')\n\n return HttpResponseRedirect(reverse(\"notas-tecnicas:list\"))\n except:\n messages.error(request, 'Error inesperado al reenviar el email.')\n return HttpResponseRedirect(reverse(\"notas-tecnicas:list\"))","sub_path":"stib-administraciones/notas_tecnicas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"573156490","text":"import re \r\nfrom os import listdir\r\nimport pandas as pd\r\nimport csv\r\nimport os\r\nfrom pathlib import Path\r\n\r\n\r\nclass merger:\r\n\r\n def __init__(self,path):\r\n self.raw_path=path\r\n \r\n def combine(self):\r\n\r\n if not os.path.exists(\"Data/Combine_Data\"):\r\n os.makedirs(\"Data/Combine_Data\")\r\n with open('Data/Combine_Data/'+ 'input_file' + '.csv', 'a') as csvfile:\r\n wr=csv.writer(csvfile,dialect='excel')\r\n wr.writerow([\"LIMIT_BAL\",\"SEX\",\"EDUCATION\",\"MARRIAGE\",\"AGE\",\"PAY_0\",\"PAY_2\",\"PAY_3\",\"PAY_4\",\"PAY_5\",\"PAY_6\",\"BILL_AMT1\",\"BILL_AMT2\",\"BILL_AMT3\",\"BILL_AMT4\",\"BILL_AMT5\",\"BILL_AMT6\",\"PAY_AMT1\",\"PAY_AMT2\",\"PAY_AMT3\",\"PAY_AMT4\",\"PAY_AMT5\",\"PAY_AMT6\",\"default payment next month\"])\r\n\r\n onlyfiles=[f for f in listdir(self.raw_path)]\r\n for file in onlyfiles:\r\n path=str(self.raw_path) + \"/\" + file\r\n with open(path, \"r\") as f:\r\n next(f)\r\n reader = csv.reader(f, delimiter=\"\\n\")\r\n with open('Data/Combine_Data/'+ 'input_file' + '.csv', 'a') as csvfile:\r\n wr=csv.writer(csvfile,dialect='excel') \r\n for line in reader:\r\n wr.writerow(line) \r\n \r\n ","sub_path":"code_testing.py","file_name":"code_testing.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"344885103","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\n\ndef user_input():\n Review = st.text_input('Enter Your Review here')\n data = {'Your Text':Review}\n features = pd.DataFrame(data,index=[0])\n return features\n\nst.title(\"Machine Learning Model\")\nst.subheader(\"SENTIMENT ANALYSIS OF REVIEW\")\ndframe = user_input()\nst.write(dframe)\n\ndf=pd.read_csv('Reviews.csv')\ndf['Sentiment']= np.where(df['Score']>3,'Positive','Negative')\ndf = df.drop(['ProductId','UserId','ProfileName','Id','HelpfulnessNumerator','HelpfulnessDenominator','Score','Time','Summary'], axis=1)\n\nx=df.iloc[:,0].values\ny=df.iloc[:,1].values\ntext_model = Pipeline([('tfidf',TfidfVectorizer(min_df = 5, ngram_range = (1,2))),('model',LogisticRegression())])\ntext_model.fit(x,y)\ny_pred = text_model.predict(dframe)\nypred= {'Sentiment':y_pred}\nst.write(y_pred)\n","sub_path":"Major-Project.py","file_name":"Major-Project.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"228981106","text":"# ====================================== #\n# COVARIANCE AND CORRELATION CALCULATION #\n# ====================================== #\nimport numpy as np\n\n\ndef covariance(A, B):\n \"\"\"\n Calculates covariance between parameters A and B\n :param A: array-like, parameter 1\n :param B: array-like, parameter 2\n :return: covariance between A and B\n \"\"\"\n _A = np.array(A)\n _B = np.array(B)\n return np.dot((_A - _A.mean()), (_B - _B.mean())) / (len(_A) - 1)\n\n\ndef covariance_matrix(params_list):\n \"\"\"\n Calculates covariance matrix of parameter list\n :param params_list: array-like, 2D, each row is list of parameter values\n :return: covariance matrix\n \"\"\"\n cov_matrix = np.zeros(shape=(len(params_list), len(params_list)))\n for i in range(len(params_list)):\n for j in range(len(params_list)):\n cov_matrix[i][j] = covariance(params_list[i], params_list[j])\n return cov_matrix\n\n\ndef correlation_coef(A, B):\n \"\"\"\n Calculates Pearson correlation coefficient of A and B\n for SAMPLE (not population!)\n :param A: array-like, parameter 1\n :param B: array-like, parameter 2\n :return: Pearson correlation coefficient r(A,B)\n \"\"\"\n _A = np.array(A)\n _B = np.array(B)\n num = np.dot(_A, _B) - _A.mean() * _B.mean() * len(_A)\n den = (len(_A) - 1) * np.std(_A, ddof=1) * np.std(_B, ddof=1)\n return num / den\n\n\ndef correlation_matrix(params_list):\n \"\"\"\n Calculates correlation matrix of parameter list. Calculates for sample,\n not population!\n :param params_list: array-like, 2D, each row is list of parameter values\n :return: correlation matrix\n \"\"\"\n cor_matrix = np.zeros(shape=(len(params_list), len(params_list)))\n for i in range(len(params_list)):\n for j in range(len(params_list)):\n cor_matrix[i][j] = correlation_coef(params_list[i], params_list[j])\n return cor_matrix\n\n\n# ================================ TESTING ================================== #\nP1 = np.array([1, 2, -1, -2, 3, 4, -3, 5, -2])\nP2 = np.array([3, -1, -3, -2, 0, 4, 5, 4, -7])\nP3 = np.array([8, -5, -1, 9, 3, -3, 5, 2, -6])\nP4 = np.array([0, -1, -3, 2, 4, -4, 1, 7, -3])\n\nparams = np.vstack((P1, P2, P3, P4))\n\nprint(\"COVARIATION MATRICES:\\n\")\nprint(\"NumPy covariance matrix:\\n\", np.cov(params), \"\\n\", \"=\" * 40)\nprint(\"This covariance matrix: \\n\", covariance_matrix(params))\n\nprint(\"=\" * 40, \"\\nCORRELATION MATRICES:\\n\")\n\nprint(\"NumPy correlation matrix:\\n\", np.corrcoef(params), \"\\n\", \"=\" * 40)\nprint(\"This correlation matrix: \\n\", correlation_matrix(params))\n","sub_path":"Statistics/Cov_and_Cor.py","file_name":"Cov_and_Cor.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"100013666","text":"import sys\nimport pysnooper\n\nsys.path.append(\"..\")\nfrom appJar import gui\nimport time\nimport threading\nimport traceback\nimport os\nimport json\nfrom tkinter import *\nimport calendar\nimport datetime\nfrom Converter import *\nfrom gernerate_recorder import recoder_process\n\nclass userGui(object):\n def __init__(self):\n # self._app=self._create_gui()\n self._check_tablename = False\n self._check_productversion = False\n self._check_productname = False\n self._check_outputfolder = False\n self._template_file=None\n self._data_h_order=None\n\n self._json_data = {\n \"template_file生成路径\": '',\n \"h_orders生成路径\": '',\n \"生成路径\": '',\n }\n # self._auto_data = {\"run\": 1}\n # 检查是否有config文件夹,没有则创建\n\n cur_path,file=os.path.split(os.path.realpath(sys.argv[0])) \n current_path = os.getcwd()\n check_path = os.path.join(current_path, 'config')\n if not os.path.exists(check_path):\n os.mkdir(check_path)\n self._config_path = os.path.join(check_path, 'config.json')\n self._config_auto = os.path.join(check_path, 'startparm.json')\n self.config()\n\n def read_config(self):\n data = self.read_json(self._config_path)\n self._json_data = data\n if self._json_data[\"template_file生成路径\"].find('template.xls')>-1 and os.path.exists(self._json_data['template_file生成路径']):\n self._app.setEntry('template_user_fileentry',self._json_data[\"template_file生成路径\"])\n if self._json_data['h_orders生成路径'].find('h_orders.csv')>-1 and os.path.exists(self._json_data['h_orders生成路径']):\n self._data_h_order = read_SAK_file(self._json_data['h_orders生成路径'])\n if self._data_h_order is not None:\n self._app.setEntry('h_orders_user_fileentry', self._json_data[\"h_orders生成路径\"])\n self._app.changeOptionBox('TableName_OptionBox', self._data_h_order['AreaId'].drop_duplicates())\n self._app.changeOptionBox('ProductName_OptionBox', self._data_h_order['ProductName'].drop_duplicates())\n self._app.changeOptionBox('ProductVersion_OptionBox',\n self._data_h_order['ProductVersion'].drop_duplicates())\n\n if self._json_data[\"生成路径\"]!='' and os.path.exists(self._json_data['生成路径']):\n self._app.setEntry('ExportFolder',self._json_data[\"生成路径\"])\n def read_json(self, path):\n with open(path, \"r\", encoding='utf-8') as f:\n # data=f.read().decode(encoding='gbk').encoding('utf-8')\n data = json.load(f)\n print(data)\n return data\n\n\n def write_config(self):\n self.write_json(self._config_path, self._json_data)\n print(\"加载入文件完成...\")\n self.stop()\n\n def write_json(self, path, data):\n with open(path, \"w\", encoding='utf-8') as f:\n json.dump(data, f, ensure_ascii=False, sort_keys=True)\n\n def stop(self):\n sys.exit(0)\n\n @property\n def app(self):\n return self._app\n\n def start(self):\n self._app.go(startWindow='login')#\n\n # ---------------------------画面--------------------------\n def config(self):\n row=0\n col=0\n # ----------------------登陆画面-----------------------\n self._app = gui( useTtk=True, showIcon=False)#,\n # self._app.setStopFunction(self.stop)\n self._app.setPollTime(1000)\n self._app.setExpand('both')\n self._app.setSticky(\"news\")\n self._app.startSubWindow('login')\n self._app.setStopFunction(self.write_config)\n with self._app.labelFrame('欢迎使用SAK转换程序', sticky='ew'):\n # self._app.addLabel(\"title\", \"欢迎使用SAK转换程序!\", row, col,2) # Row 0, Column 0, Span 2\n\n self._app.startLabelFrame('template', row + 1, col, 2,label='template文件路径')\n self.add_user_widget_fileentry('template',0,0)\n self._app.stopLabelFrame()\n\n self._app.startLabelFrame('h_order', row + 2, col,2, label='h_order文件路径')\n self.add_user_widget_fileentry('h_orders',0,0)\n self._app.stopLabelFrame()\n\n self._app.startLabelFrame('ExportFolder', row + 3, col,2, label='输出文件保存路径')\n ent = self._app.addDirectoryEntry('ExportFolder', 0, 0)\n ent.bind(\"\", self.set_exportfolder_path)\n self._app.stopLabelFrame()\n\n self._app.startFrame('OptionBoxFrame',row + 6, col,2)\n self._app.addLabel(\"TableName\", \"TableName:\", 0,0)\n # self._app.setLabelAlign(\"TableName\", \"nw\")\n self._app.addOptionBox(\"TableName_OptionBox\", [''], 0,1,1)\n self._app.addLabel(\"ProductName\", \"ProductName:\", 1, 0,1)\n self._app.addOptionBox(\"ProductName_OptionBox\", [''], 1, 1,1)\n self._app.addLabel(\"ProductVersion\", \"ProductVersion:\", 2, 0,1)\n self._app.addOptionBox(\"ProductVersion_OptionBox\", [''], 2, 1,1)\n self._app.stopFrame()\n # 起始日期\n self._app.startLabelFrame('StartDate',row + 4,col,label='起始日期')\n self.add_user_widget_datepick('StartDate',0,0,2)\n self._app.stopLabelFrame()\n # 借宿日期\n self._app.startLabelFrame('EndDate', row + 5, col, label='结束日期')\n self.add_user_widget_datepick('EndDate', 0, 0, 2)\n self._app.stopLabelFrame()\n self._app.setDatePickerChangeFunction('StartDate',self.datechange)\n self._app.setDatePickerChangeFunction('EndDate',self.datechange)\n\n self._app.addButtons([\"转换\", \"关闭\"], self.press, row+7, col,2 ) # Row 3, Column 0, Span 2\n\n\n self._app.stopSubWindow()\n if os.path.exists(self._config_path):\n self.read_config()\n def set_exportfolder_path(self,event):\n self._json_data['生成路径']=self._app.getEntry('ExportFolder')\n def add_user_widget_fileentry(self, title, row=None, column=0, colspan=0, rowspan=0):\n self._app.startFrame(title,row,column,colspan,rowspan)\n ent=self._app.addEntry(title+'_user_fileentry',0,0)\n ent.config(state='disabled')\n self._app.addNamedButton('File',title+'_user_fileentry',self.open_file_dialog,0,1)\n self._app.stopFrame()\n def open_file_dialog(self,title):\n\n # 检查文件是否正确\n check_path=''\n if title.find('template')>-1 or title.find('h_orders')>-1:\n tmp=title.split('_user_fileentry')[0]\n check_path=tmp+'.csv' if tmp=='h_orders' else tmp+'.xls'\n\n if check_path !='':\n dir=self._app.openBox(title)\n\n if dir.find(check_path)>-1:\n\n if check_path=='template.xls':\n self._app.setEntry(title, dir)\n self._template_file=dir\n self._json_data['template_file生成路径']=dir\n \n elif check_path=='h_orders.csv':\n self._data_h_order=read_SAK_file(dir)\n if self._data_h_order is not None:\n self._app.setEntry(title, dir)\n self._app.changeOptionBox('TableName_OptionBox',self._data_h_order['AreaId'].drop_duplicates())\n self._app.changeOptionBox('ProductName_OptionBox',self._data_h_order['ProductName'].drop_duplicates())\n self._app.changeOptionBox('ProductVersion_OptionBox',self._data_h_order['ProductVersion'].drop_duplicates())\n self._json_data['h_orders生成路径'] = dir\n else:\n self._app.warningBox('警告', '文件内容不匹配')\n else:\n if self._app.getEntry(title)=='':\n self._app.warningBox('警告',f'请选择文件{check_path}')\n\n def add_user_widget_datepick(self, name, row=None, column=0, colspan=0, rowspan=0):\n ''' adds a date picker at the specified position '''\n self._app.widgetManager.verify(self._app.Widgets.DatePicker, name)\n # initial DatePicker has these dates\n days = range(1, 32)\n self.MONTH_NAMES = list(range(1,13))\n years = range(2000, 3020)\n # create a frame, and add the widgets\n frame = self._app.startFrame(name, row, column, colspan, rowspan)\n self._app.setExpand(\"none\")\n self._app.addLabel(name + \"_DP_DayLabel\", \"日:\", 0, 4)\n self._app.setLabelAlign(name + \"_DP_DayLabel\", \"w\")\n self._app.addOptionBox(name + \"_DP_DayOptionBox\", days, 0, 5)\n self._app.addLabel(name + \"_DP_MonthLabel\", \"月:\", 0, 2)\n self._app.setLabelAlign(name + \"_DP_MonthLabel\", \"w\")\n self._app.addOptionBox(name + \"_DP_MonthOptionBox\", self.MONTH_NAMES, 0, 3)\n self._app.addLabel(name + \"_DP_YearLabel\", \"年:\", 0, 0)\n self._app.setLabelAlign(name + \"_DP_YearLabel\", \"w\")\n self._app.addOptionBox(name + \"_DP_YearOptionBox\", years, 0, 1)\n self._app.setOptionBoxChangeFunction(\n name + \"_DP_MonthOptionBox\",\n self._updateDatePicker)\n self._app.setOptionBoxChangeFunction(\n name + \"_DP_YearOptionBox\",\n self._updateDatePicker)\n self._app.stopFrame()\n frame.isContainer = False\n self._app.widgetManager.add(self._app.Widgets.DatePicker, name, frame)\n # def set_user_widget_DatePickerChangeFunction(self, title, function):\n # self._app.widgetManager.get(self._app.Widgets.DatePicker, title)\n # cmd = self.MAKE_FUNC(function, title)\n # self._app.setOptionBoxChangeFunction(title + \"_DP_DayOptionBox\", cmd)\n # self._app.widgetManager.get(self._app.Widgets.OptionBox, title + \"_DP_DayOptionBox\").function = cmd\n\n def get_user_widget_DatePicker(self, title):\n self._app.widgetManager.get(self._app.Widgets.DatePicker, title)\n day = int(self._app.getOptionBox(title + \"_DP_DayOptionBox\"))\n month = int(self._app.getOptionBox(title + \"_DP_MonthOptionBox\"))\n year = int(self._app.getOptionBox(title + \"_DP_YearOptionBox\"))\n date = datetime.datetime(year, month, day)\n return date\n def _updateDatePicker(self,title):\n if title.find(\"_DP_MonthOptionBox\") > -1:\n title = title.split(\"_DP_MonthOptionBox\")[0]\n elif title.find(\"_DP_YearOptionBox\") > -1:\n title = title.split(\"_DP_YearOptionBox\")[0]\n else:\n self._app.warn(\"Can't update days in DatePicker:%s\", title)\n return\n\n day = self._app.getOptionBox(title + \"_DP_DayOptionBox\")\n month = int(self._app.getOptionBox(title + \"_DP_MonthOptionBox\"))\n year = int(self._app.getOptionBox(title + \"_DP_YearOptionBox\"))\n days = range(1, calendar.monthrange(year, month)[1] + 1)\n self._app.changeOptionBox(title + \"_DP_DayOptionBox\", days)\n\n # keep previous day if possible\n # with PauseLogger():\n # self.setOptionBox(title + \"_DP_DayOptionBox\", day, callFunction=False)\n\n box = self._app.widgetManager.get(self._app.Widgets.OptionBox, title + \"_DP_DayOptionBox\")\n if hasattr(box, 'function'):\n box.function()\n def set_user_widget_DatePicker(self, title, date=\"today\"):\n self._app.widgetManager.get(self._app.Widgets.DatePicker, title)\n if date == \"today\":\n date = datetime.date.today()\n self._app.setOptionBox(title + \"_DP_YearOptionBox\", str(date.year))\n self._app.setOptionBox(title + \"_DP_MonthOptionBox\", date.month - 1)\n self._app.setOptionBox(title + \"_DP_DayOptionBox\", date.day - 1)\n# event function\n def file_box_stop(self):\n self._app.hideSubWindow('filebox')\n \n def datechange(self):\n StartTime=self.get_user_widget_DatePicker('StartDate')\n EndTime=self.get_user_widget_DatePicker('EndDate')\n if StartTime>EndTime:\n self.set_user_widget_DatePicker('EndDate',StartTime)\n EndTime=StartTime\n tmp=self._data_h_order[\n (self._data_h_order['StartTime']>=StartTime)&\n (self._data_h_order['StartTime']<=EndTime)\n ]\n if not tmp.empty:\n self._app.changeOptionBox('TableName_OptionBox',tmp['AreaId'].drop_duplicates())\n self._app.changeOptionBox('ProductName_OptionBox',tmp['ProductName'].drop_duplicates())\n self._app.changeOptionBox('ProductVersion_OptionBox',tmp['ProductVersion'].drop_duplicates())\n \n\n\n# ##################################\n def show(self, msg):\n self._app.setTextArea(\"log\", msg + '\\n')\n\n @property\n def action(self):\n return self._action\n\n @action.setter\n def action(self, value):\n if isinstance(value, bool):\n self._action = value\n else:\n print('action type must be boolean')\n\n def press(self, btn):\n\n if btn == \"清空\":\n w = self._app.getEntry(\"通讯费存储路径\")\n self._app.setTextArea(\"log\", w + '\\n')\n if btn == \"关闭\":\n self.write_config()\n if btn == \"转换\":\n entries = self._app.getAllEntries()\n StartTime=self.get_user_widget_DatePicker('StartDate')\n EndTime=self.get_user_widget_DatePicker('EndDate')\n if StartTime>EndTime:\n self._app.warningBox('警告','结束日期不能小于起始日期')\n self.set_user_widget_DatePicker('StartDate')\n self.set_user_widget_DatePicker('EndDate')\n return\n if all(entries.values()):\n self._ProductName_Value=self._app.getOptionBox('ProductName_OptionBox')\n self._ProductVersion_Value=self._app.getOptionBox('ProductVersion_OptionBox')\n self._TableName_Value=self._app.getOptionBox('TableName_OptionBox')\n df=get_Completed_Product_Data(self._ProductName_Value,self._ProductVersion_Value,\n self._TableName_Value,StartTime,EndTime,self._data_h_order)\n if df is None:\n self._app.warningBox('警告','未查找到匹配数据!')\n else:\n # print(df['TraceData'])\n # g=df['TraceData']\n a=converter_data_to_RecodeData(df)\n if a.all_data!=[]:\n a.ex_path=self._app.getEntry('ExportFolder')\n a.template_path=self._template_file\n if a.exception[0]:\n print(a.exception[1])\n sys.exit(0)\n a.create_new_file()\n if a.exception[0]:\n print(a.exception[1])\n a.quit()\n sys.exit(0)\n for i in a.all_data:\n a.write_recordersheet(i)\n a.savefile()\n a.quit()\n if a.exception[0]:\n print(a.exception[1])\n \n else:\n self._app.warningBox('警告','转换成功!')\n # rp=recoder_process(self._app.getEntry('ExportFolder'))\n # tmp=rp.current_data\n # a['产品序列号']\n \n else:\n self._app.warningBox('警告','未查询效数据!')\n \n \n else:\n self._app.warningBox('警告','文件路径不能为空')\n return\n\n\n\n\nif __name__ == \"__main__\":\n app = userGui()\n app.start()\n \n\n\n\n\n\n\n\n","sub_path":"temp/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":16114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"534237860","text":"from django.db import models\nfrom django.db.models.sql import compiler\n\nimport caching.base as caching\n\n\nclass IndexQuerySet(caching.CachingQuerySet):\n\n def with_index(self, **kw):\n \"\"\"\n Suggest indexes that should be used with this query as key-value pairs.\n\n qs.with_index(t1='xxx') => INNER JOIN t1 USE INDEX (`xxx`)\n \"\"\"\n q = self._clone()\n if not isinstance(q.query, IndexQuery):\n q.query = self.query.clone(IndexQuery)\n q.query.index_map.update(kw)\n return q\n\n def fetch_missed(self, pks):\n # Remove the indexes before doing the id query.\n if hasattr(self.query, 'index_map'):\n index_map = self.query.index_map\n self.query.index_map = {}\n rv = super(IndexQuerySet, self).fetch_missed(pks)\n self.query.index_map = index_map\n return rv\n else:\n return super(IndexQuerySet, self).fetch_missed(pks)\n\n\nclass IndexQuery(models.query.sql.Query):\n \"\"\"\n Extends sql.Query to make it possible to specify indexes to use.\n \"\"\"\n\n def clone(self, klass=None, **kwargs):\n # Maintain index_map across clones.\n c = super(IndexQuery, self).clone(klass, **kwargs)\n c.index_map = dict(self.index_map)\n return c\n\n def get_compiler(self, using=None, connection=None):\n # Call super to figure out using and connection.\n c = super(IndexQuery, self).get_compiler(using, connection)\n return IndexCompiler(self, c.connection, c.using)\n\n def _setup_query(self):\n if not hasattr(self, 'index_map'):\n self.index_map = {}\n\n def get_count(self, using):\n # Don't use the index for counts, it's slower.\n index_map = self.index_map\n self.index_map = {}\n count = super(IndexQuery, self).get_count(using)\n self.index_map = index_map\n return count\n\n\nclass IndexCompiler(compiler.SQLCompiler):\n\n def get_from_clause(self):\n \"\"\"\n Returns a list of strings that are joined together to go after the\n \"FROM\" part of the query, as well as a list any extra parameters that\n need to be included. Sub-classes, can override this to create a\n from-clause via a \"select\".\n\n This should only be called after any SQL construction methods that\n might change the tables we need. This means the select columns and\n ordering must be done first.\n \"\"\"\n result = []\n qn = self.quote_name_unless_alias\n qn2 = self.connection.ops.quote_name\n index_map = self.query.index_map\n first = True\n from_params = []\n for alias in self.query.tables:\n if not self.query.alias_refcount[alias]:\n continue\n try:\n name, alias, join_type, lhs, join_cols, _, join_field = (\n self.query.alias_map[alias])\n except KeyError:\n # Extra tables can end up in self.tables, but not in the\n # alias_map if they aren't in a join. That's OK. We skip them.\n continue\n alias_str = (alias != name and ' %s' % alias or '')\n ### jbalogh wuz here. ###\n if name in index_map:\n use_index = 'USE INDEX (%s)' % qn(index_map[name])\n else:\n use_index = ''\n if join_type and not first:\n extra_cond = join_field.get_extra_restriction(\n self.query.where_class, alias, lhs)\n if extra_cond:\n extra_sql, extra_params = extra_cond.as_sql(\n qn, self.connection)\n extra_sql = 'AND (%s)' % extra_sql\n from_params.extend(extra_params)\n else:\n extra_sql = \"\"\n result.append('%s %s%s %s ON (' % (join_type, qn(name),\n alias_str, use_index))\n for index, (lhs_col, rhs_col) in enumerate(join_cols):\n if index != 0:\n result.append(' AND ')\n result.append(\n '%s.%s = %s.%s' %\n (qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))\n result.append('%s)' % extra_sql)\n else:\n connector = connector = '' if first else ', '\n result.append('%s%s%s %s' % (connector, qn(name), alias_str,\n use_index))\n ### jbalogh out. ###\n first = False\n for t in self.query.extra_tables:\n alias, unused = self.query.table_alias(t)\n # Only add the alias if it's not already present (the table_alias()\n # calls increments the refcount, so an alias refcount of one means\n # this is the only reference.\n if (alias not in self.query.alias_map or\n self.query.alias_refcount[alias] == 1):\n connector = not first and ', ' or ''\n result.append('%s%s' % (connector, qn(alias)))\n first = False\n return result, from_params\n","sub_path":"src/olympia/addons/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"48904809","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 24 14:03:23 2015\nРазбираются результаты sparql-запросов к dbpedia\n\nна их основе строится граф, вершины - Артисты, группы, жанры\nсвязи - взяты из dbpedia\n\n\n@author: eugene\n\"\"\"\n\nimport pandas as pd\nimport networkx as nx #графы\nimport urllib.parse\nimport pylast\n\ndef evaluate_str(string,dbpedia_set):\n if (string in dbpedia_set):\n return string\n mod_string = 'http://dbpedia.org/resource/'+urllib.parse.quote(string,safe='')\n if (mod_string in dbpedia_set):\n return mod_string\n else:\n return False\n\n\ndef get_lastfm_tags(artist_name,network,API_KEY,API_SECRET):\n try: \n artist = network.get_artist(artist_name)\n topItems = artist.get_top_tags(limit=None)\n return topItems\n except:\n return []\n\nbands_filename = \"res/bands_with_lastfm.csv\"\ngenres_filename = \"res/genres\"\ngenres_names_filename = \"res/music_genre_names.csv\"\nartists_filename = \"res/merged_artists.csv\"\ngraph_filename = \"res/db-lasfm-without-isolated.gexf\"\n\nAPI_KEY='d6bee4b59046bf1a692997828046ce4c'\nAPI_SECRET='6de7b86fc94b7451b4ee58d80d83cb7a'\nnetwork = pylast.LastFMNetwork(api_key = API_KEY, api_secret =\nAPI_SECRET)\n\n \nG=nx.DiGraph()\n\ndf_data = pd.read_csv(genres_names_filename)\nG.add_nodes_from([[x, {'type':'genre'}] for x in df_data['GenreUri'].tolist()]) # Genres\ngenres_uri_set = set(df_data['GenreUri'].tolist())\ngenres_name_uri_dict = dict()\n\n\nfor row in df_data[['GenreUri','GenreNames']].iterrows():\n GenreUri = str(row[1]['GenreUri'])\n GenreNames = str(row[1]['GenreNames'])\n GenreNamesList = [val.lower() for val in GenreNames.split(' | ')]\n for name in GenreNamesList: \n genres_name_uri_dict[name] = GenreUri\n\n\ndf_data_artists = pd.read_csv(artists_filename)\nartists_set = set(df_data_artists['ArtistURI'].tolist())\nG.add_nodes_from([[x, {'type':'artist'}] for x in df_data_artists['ArtistURI'].tolist()]) # Genres\n\n\ndf_data = pd.read_csv(bands_filename)\nbands_set = set(df_data['BandURI'].tolist())\nG.add_nodes_from([[x, {'type':'band'}] for x in df_data['BandURI'].tolist()]) # Genres\n\n\ntags_set = set()\nfor row in df_data.iterrows():\n BandURI = str(row[1]['BandURI'])\n BandAssociatedMusicalArtist = str(row[1]['associatedMusicalArtists'])\n BandName = str(row[1]['BandName']) \n LastFM_uri_tags = str(row[1]['LastFmURITags'])\n LastFM_tags = str(row[1]['LastFmTags'])\n BandAssocBands = str(row[1]['associatedBands'])\n BandGenreURIs = str(row[1]['BandGenreURIs'])\n \n if LastFM_uri_tags != \"nan\":\n LastFM_uri_tags_l = LastFM_uri_tags.split(' | ')\n for tag in LastFM_uri_tags_l:\n G.add_edge(BandURI, tag)\n \n if LastFM_tags != \"nan\":\n LastFM_tags_l = LastFM_tags.split(' | ')\n for tag in LastFM_tags_l:\n if(tag not in tags_set):\n tags_set.add(tag)\n G.add_node(tag,{'type':'lastfm_tag'})\n G.add_edge(BandURI,tag)\n\n if BandGenreURIs != \"nan\":\n BandGenreURIs = BandGenreURIs.split(' | ')\n for genre in BandGenreURIs: #так себе \"сведение\"\n mod_genre=evaluate_str(genre,genres_uri_set) \n if (mod_genre is not False): \n G.add_edge(BandURI, genre)\n \n if BandAssocBands != \"nan\":\n G.add_edges_from([[BandURI,der] for der in BandAssocBands.split(' | ')]) \n \n if BandAssociatedMusicalArtist != \"nan\": #do smth with artists\n BandAssociatedMusicalArtist = BandAssociatedMusicalArtist.split(' | ')\n\n for artist in BandAssociatedMusicalArtist: #так себе \"сведение\"\n mod_artist=evaluate_str(artist,artists_set | bands_set)\n if (mod_artist is not False): \n G.add_edge(BandURI, mod_artist)\n\n \ndf_data = pd.read_csv(genres_filename)\n \n\ninstruments_set = [str(line).split(' | ') for line in df_data['GenreInstruments'].tolist()] \ninstruments_set = set([val for sublist in instruments_set for val in sublist]) # from list_of_lists to list and to set\ninstruments_set.remove('nan')\nG.add_nodes_from([[val,{\"type\":\"instrument\"}] for val in instruments_set]); #instruments\n\n\nfor row in df_data.iterrows():\n GenreURI = str(row[1]['GenreUri'])\n GenreOrigins = str(row[1]['Origins'])\n GenreDer = str(row[1]['Derivatives'])\n GenreInstruments = str(row[1]['GenreInstruments'])\n \n if GenreDer != \"nan\": \n G.add_edges_from([[GenreURI,der] for der in GenreDer.split(' | ')]) \n \n if GenreOrigins != \"nan\": \n G.add_edges_from([[orig,GenreURI] for orig in GenreOrigins.split(' | ')]) \n \n if GenreInstruments != \"nan\": \n G.add_edges_from([[GenreURI,instr] for instr in GenreInstruments.split(' | ')]) \n\n\nfor row in df_data_artists.iterrows():\n ArtistURI = str(row[1]['ArtistURI'])\n ArtistGenre = str(row[1]['ArtistGenres'])\n ArtistAssociatedActs = str(row[1]['ArtistAssociatedActs'])\n ArtistInstruments = str(row[1]['ArtistInstruments'])\n ArtistPast = str(row[1]['ArtistPastMembers'])\n ArtistCurrent = str(row[1]['ArtistCurrentMembers'])\n \n if ArtistGenre != \"nan\":\n ArtistGenres = ArtistGenre.split(' | ')\n for genre in ArtistGenres: #\n if (genre in genres_uri_set): \n G.add_edge(ArtistURI, genre)\n else:\n genres_uri_set.add(genre) \n G.add_node(genre,{'type':'genre'})#если тут нет жанра - он хороший и надо добавить\n G.add_edge(ArtistURI, genre)\n \n if ArtistAssociatedActs!= 'nan':\n ArtistActs = ArtistAssociatedActs.split(' | '); # separator\n for act in ArtistActs:\n mod_act=evaluate_str(act,artists_set | bands_set)\n if (mod_act is not False): \n G.add_edge(ArtistURI, mod_act)\n ArtistActs = ArtistAssociatedActs.split(' , '); #\"natural\" separator\n for act in ArtistActs:\n mod_act=evaluate_str(act,artists_set | bands_set)\n if (mod_act is not False): \n G.add_edge(ArtistURI, mod_act) \n \n if ArtistInstruments!= 'nan':\n ArtistInstruments = ArtistInstruments.split(' | ')\n for instrument in ArtistInstruments:\n instrument_mod=evaluate_str(instrument,instruments_set)\n if (instrument_mod is not False): \n G.add_edge(ArtistURI, instrument_mod)\n \n if ArtistPast!= 'nan':\n ArtistPast = ArtistPast.split(' | ')\n for past in ArtistPast:\n mod_past=evaluate_str(past,bands_set)\n if (mod_past is not False): \n G.add_edge(ArtistURI, mod_past)\n \n if ArtistCurrent!= 'nan':\n ArtistCurrent = ArtistCurrent.split(' | ')\n for cur in ArtistCurrent:\n mod_cur=evaluate_str(past,bands_set)\n if (mod_cur is not False): \n G.add_edge(ArtistURI, mod_cur)\n\n\nG = G.to_undirected()\nG.remove_nodes_from(nx.isolates(G))\nnx.write_gexf(G,'res/db-lasfm-without-isolated.gexf')\n ","sub_path":"create_graph.py","file_name":"create_graph.py","file_ext":"py","file_size_in_byte":7230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"614688429","text":"import quandl as quandl\nimport pandas as pd\n\n#Api Key\nquandl.ApiConfig.api_key = 'KiR8KAswuFtb_FFyj-mN'\n\n#13 company Tickers\ncompanies = {'AAPL': 'Apple Inc', 'MSFT': 'Microsoft Corp', 'AMZN': 'Amazon.com Inc', 'FB': 'Facebook Inc', 'GOOGL': 'Alphabet Inc', 'GOOG': 'Alphabet Inc', 'INTC': 'Intel Corp', 'NVDA': 'NVIDIA Corp', 'CSCO': 'Cisco Systems Inc', 'CMCSA': 'Comcast Corp', 'ADBE': 'Adobe Inc', 'NFLX': 'Netflix Inc', 'PEP': 'PepsiCo Inc', 'PYPL': 'PayPal Holdings Inc', 'TSLA': 'Tesla Inc', 'COST': 'Costco Wholesale Corp', 'AMGN': 'Amgen Inc', 'TMUS': 'T-Mobile US Inc', 'AVGO': 'Broadcom Inc', 'TXN': 'Texas Instruments Inc', 'CHTR': 'Charter Communications Inc', 'QCOM': 'QUALCOMM Inc', 'SBUX': 'Starbucks Corp', 'GILD': 'Gilead Sciences Inc', 'MDLZ': 'Mondelez International Inc', 'INTU': 'Intuit Inc', 'BKNG': 'Booking Holdings Inc', 'FISV': 'Fiserv Inc', 'ADP': 'Automatic Data Processing Inc', 'ISRG': 'Intuitive Surgical Inc', 'VRTX': 'Vertex Pharmaceuticals Inc', 'REGN': 'Regeneron Pharmaceuticals Inc'}\n \nfinalData = pd.DataFrame()\n\nfor key in companies.keys():\n\n data = quandl.get('WIKI/' + key, start_date = \"2000-01-01\")\n\n data['Company'] = companies[key]\n finalData = finalData.append(data)\n\n#print (finalData)\n\nfinalData.to_csv('Data/dataQuandlLarger.csv')\n\n","sub_path":"Phase2/Zip/Group1_Phase2/Code/GatherDataQuandl.py","file_name":"GatherDataQuandl.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"55158314","text":"# Introsort\n#\n# directement adapté de l'implémentation en C++ de la STL\n\n# Définitions utilitaires\n\n# Seuil de passage au tri par insertion\nthreshold = 16\n\n\n# Logarithme entier en base 2\ndef int_log2(n):\n k = 0\n while n:\n n >>= 1\n k += 1\n return k - 1\n\n\n# Tri par tas\n\n\ndef sift_down(l, start, first, end):\n curr = first\n child = 2 * curr + 1\n while child < end:\n next = curr\n if l[start + child] > l[start + next]:\n next = child\n child += 1\n if child < end and l[start + child] > l[start + next]:\n next = child\n if next == curr:\n break\n l[start + curr], l[start + next] = l[start + next], l[start + curr]\n curr = next\n child = 2 * curr + 1\n\n\ndef heap_sort(l, first, last):\n size = last - first\n for i in range((size - 1) // 2, -1, -1):\n sift_down(l, first, i, size)\n for i in range(size - 1, 0, -1):\n l[first + 0], l[first + i] = l[first + i], l[first + 0]\n sift_down(l, first, 0, i)\n\n\n# Tri par insertion\n\n\ndef unguarded_linear_insert(l, last):\n val = l[last]\n prev = last - 1\n while val < l[prev]:\n l[last] = l[prev]\n last = prev\n prev -= 1\n l[last] = val\n\n\ndef unguarded_insertion_sort(l, first, last):\n for i in range(first, last):\n unguarded_linear_insert(l, i)\n\n\ndef insertion_sort(l, first, last):\n if first == last:\n return\n for i in range(first + 1, last):\n if l[i] < l[first]:\n val = l[i]\n for j in range(i, 0, -1):\n l[j] = l[j - 1]\n l[0] = val\n else:\n unguarded_linear_insert(l, i)\n\n\ndef final_insertion_sort(l, first, last):\n if last - first > threshold:\n insertion_sort(l, first, first + threshold)\n unguarded_insertion_sort(l, first + threshold, last)\n else:\n insertion_sort(l, first, last)\n\n\n# Tri rapide\n\n\ndef move_median_first(l, a, b, c):\n if l[a] < l[b]:\n if l[b] < l[c]:\n # l[a] < l[b] < l[c]\n l[a], l[b] = l[b], l[a]\n elif l[a] < l[c]:\n # l[a] < l[c] <= l[b]\n l[a], l[c] = l[c], l[a]\n else:\n # l[c] <= l[a] < l[b]\n pass\n else:\n if l[a] < l[c]:\n # l[b] <= l[a] < l[c]\n pass\n elif l[b] < l[c]:\n # l[b] < l[c] <= l[a]\n l[a], l[c] = l[c], l[a]\n else:\n # l[c] <= l[b] <= l[a]\n l[a], l[b] = l[b], l[a]\n\n\ndef unguarded_partition(l, first, last, pivot):\n while True:\n while l[first] < pivot:\n first += 1\n last -= 1\n while pivot < l[last]:\n last -= 1\n if first >= last:\n return first\n l[first], l[last] = l[last], l[first]\n first += 1\n\n\ndef unguarded_partition_pivot(l, first, last):\n mid = first + (last - first) // 2\n move_median_first(l, first, mid, last - 1)\n return unguarded_partition(l, first + 1, last, l[first])\n\n\n# Boucle principale d'introsort\n\n\ndef introsort_loop(l, first, last, depth):\n while (last - first > threshold):\n if depth == 0:\n heap_sort(l, first, last)\n return\n depth -= 1\n cut = unguarded_partition_pivot(l, first, last)\n introsort_loop(l, cut, last, depth)\n last = cut\n\n\n# Fonction principale\n\n\ndef sort(l):\n if len(l) <= 1:\n return\n s = len(l)\n introsort_loop(l, 0, s, 2 * int_log2(s))\n final_insertion_sort(l, 0, s)\n","sub_path":"src/content/articles/quicksort_3/introsort.py","file_name":"introsort.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"505524737","text":"import glob\nimport os\nimport re\n\ntranslations = {}\nBASE_LANG = \"en\"\nfiles = glob.glob(\"translations-spiritual/*.txt\")\nfor path in files:\n with open(path, \"r\", encoding=\"utf-8\") as f:\n content = f.read()\n lang = os.path.splitext(os.path.basename(path))[0]\n pairs = {}\n # match key value pairs and add them to pairs unless they have a \"Needs translation\" comment\n for m in re.findall(r\"(\\S+)\\s*=\\s*\\[\\[([\\S\\s]*?)\\]\\](?!\\s*# Needs translation)\", content):\n key = m[0]\n val = m[1]\n pairs.update({key: val})\n\n translations.update({lang: pairs})\nbase_stream = open(\"translations-spiritual/\"+BASE_LANG+\".txt\", \"r\", encoding=\"utf-8\")\nbase_lines = base_stream.read().splitlines()\nfor lang in translations:\n if lang == BASE_LANG:\n continue\n \n print(\"start writing\")\n os.remove(\"translations/\"+lang+\".txt\")\n target_lang = open(\"translations/\"+lang+\".txt\", \"a\", encoding=\"utf-8\")\n ignore = False\n for line in base_lines:\n if ignore:\n if \"]]\" in line:\n ignore = False\n continue\n m = re.search(r\"(\\S+)\\s*=\", line)\n if m:\n if not \"]]\" in line:\n ignore = True\n key = m.group(1)\n if key in translations[lang]:\n target_lang.write(\"{} = [[{}]]\\n\".format(key, translations[lang][key]))\n else:\n target_lang.write(\"{} = [[{}]] # Needs translation\\n\".format(key, translations[BASE_LANG][key]))\n else:\n target_lang.write(line+\"\\n\")\n target_lang.close()\n\nbase_stream.close()\n\n'''issues = {}\nfor lang, t in translations.items():\n if lang == BASE_LANG:\n continue\n\n issues[lang] = {}\n print(\"checking:\", lang)\n for bkey, bval in translations[BASE_LANG].items():\n if bkey not in translations[lang]:\n print(\"missing translation:\", bkey)\n translations[lang].update({bkey: bval})\n issues[lang].update({bkey: \"Needs translation\"})\n\nfor lang, langt in issues.items():\n if len(langt) > 0:\n for key, issue in langt.items():\n print(\"{} = \\\"{}\\\" # {}\".format(key, translations[lang][key], issue))\n \nprint(translations)'''\n","sub_path":"regen-translations.py","file_name":"regen-translations.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"558407085","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimage = mpimg.imread('DrawRectangleOnCars/bbox-example-image.jpg')\n\n# Define a function that takes an image, a list of bounding boxes,\n# and optional color tuple and line thickness as inputs\n# then draws boxes in that color on the output\n\ndef draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n # make a copy of the image\n draw_img = np.copy(img)\n # draw each bounding box on your image copy using cv2.rectangle()\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return draw_img # Change this line to return image copy with boxes\n# Add bounding boxes\nbboxes = [((275, 572), (380, 510)), ((488, 563), (549, 518)), ((554, 543), (582, 522)),\n ((601, 555), (646, 522)), ((657, 545), (685, 517)), ((849, 678), (1135, 512))]\nresult = draw_boxes(image, bboxes)\nplt.imshow(result)\nplt.savefig('DrawRectangleOnCars/RectanglesDrawn.jpg')\n# plt.show()\n\n\n\nimage = mpimg.imread('DrawRectangleOnCars/bbox-example-image.jpg')\n#image = mpimg.imread('temp-matching-example-2.jpg')\ntemplist = ['DrawRectangleOnCars/cutout1.jpg', 'DrawRectangleOnCars/cutout2.jpg', 'DrawRectangleOnCars/cutout3.jpg',\n 'DrawRectangleOnCars/cutout4.jpg', 'DrawRectangleOnCars/cutout5.jpg', 'DrawRectangleOnCars/cutout6.jpg']\n\n# Here is your draw_boxes function from the previous exercise\ndef draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy\n\n\n# Define a function that takes an image and a list of templates as inputs\n# then searches the image and returns the a list of bounding boxes\n# for matched templates\ndef find_matches(img, template_list):\n # Define matching method\n # Other options include: cv2.TM_CCORR_NORMED', 'cv2.TM_CCOEFF', 'cv2.TM_CCORR',\n # 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED'\n method = cv2.TM_CCOEFF_NORMED\n # Make a copy of the image to draw on\n copyimg = np.copy(img)\n # Define an empty list to take bbox coords\n bbox_list = []\n # Iterate through template list\n for temp in template_list:\n # Read in templates one by one\n tmp = mpimg.imread(temp)\n # Use cv2.matchTemplate() to search the image\n result = cv2.matchTemplate(img, tmp, method)\n # Use cv2.minMaxLoc() to extract the location of the best match\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\n # Determine bounding box corners for the match\n w, h = (tmp.shape[1], tmp.shape[0])\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n top_left = min_loc\n else:\n top_left = max_loc\n bottom_right = (top_left[0] + w, top_left[1] + h)\n # Append bbox position to list\n bbox_list.append((top_left, bottom_right))\n # Return the list of bounding boxes\n return bbox_list\n\nbboxes = find_matches(image, templist)\nresult = draw_boxes(image, bboxes)\nplt.imshow(result)\nplt.show()\n","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"617747825","text":"import datetime\n\n\ndef subday(hour):\n if hour >= 0 and hour < 6:\n return 0;\n if hour >= 6 and hour < 12:\n return 0.25;\n if hour >= 12 and hour < 18:\n return 0.5;\n if hour >= 18 and hour < 24:\n return 0.75;\n\ndef subyear(day_score):\n if day_score >= 0 and day_score < 91.25:\n return 1;\n if day_score >= 91.25 and day_score < 182.5:\n return 2;\n if day_score >= 182.5 and day_score < 273.75:\n return 3;\n if day_score >= 273.75 and day_score < 365:\n return 4;\n\ndef ingame_day(x):\n irl_date = float(datetime.date.today().strftime(\"%j\"))\n sub_irl_day = float(subday(float(datetime.datetime.now().hour)))\n var_1 = irl_date + sub_irl_day\n if var_1 >= 0 and var_1 < 91.25:\n var_2 = var_1;\n if var_1 >= 91.25 and var_1 < 182.5:\n var_2 = var_1 - 91.5;\n if var_1 >= 182.5 and var_1 < 273.75:\n var_2 = var_1 - 182.5;\n if var_1 >= 273.75 and var_1 < 365:\n var_2 = var_1 - 273.75;\n return (var_2 * 4) + 4 ;\n\ndef ingame_year(x):\n if datetime.date.today().strftime(\"%Y\") == \"2019\":\n return int(subyear(ingame_day(1)));\n if datetime.date.today().strftime(\"%Y\") == \"2020\":\n return 4 + int(subyear(ingame_day(1)));\n if datetime.date.today().strftime(\"%Y\") == \"2021\":\n return 8 + int(subyear(ingame_day(1)));\n if datetime.date.today().strftime(\"%Y\") == \"2022\":\n return 12 + int(subyear(ingame_day(1)));\n\n\ndef presenter(x):\n year = ingame_year(2)\n daynum = ingame_day(1)\n mydate = datetime.date(year, 1, 1) + datetime.timedelta(daynum-1)\n return (mydate.strftime(\"%A-%d-%B-%Y\"));\n","sub_path":"time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"559306822","text":"\"\"\"\nExample showing how to communicate with an Optronic Laboratories 83A DC Current Source.\n\"\"\"\nfrom msl.equipment import ConnectionRecord\nfrom msl.equipment import EquipmentRecord\n\nrecord = EquipmentRecord(\n manufacturer='Optronic Laboratories',\n model='83A',\n connection=ConnectionRecord(\n address='COM3',\n # properties={'address': 1, 'delay': 0.1}, # optional settings\n timeout=2\n )\n)\n\n# connect to the current source\nol83a = record.connect()\n\n# turn the output off\nol83a.turn_off()\n\n# select a lamp\nol83a.select_lamp(9)\n\n# get target information: lamp number, target value, target unit\nprint('target info: {}'.format(ol83a.target_info()))\n\n# get the output state (on or off)\nprint('is the output on? {}'.format(ol83a.state()))\n\n# set the target current\nol83a.set_current(0.2345)\n\n# get the system status byte of the latest command that was executed\nprint('system status byte: {:b}'.format(ol83a.system_status_byte))\n\n# read the output current, voltage and wattage\ncurrent = ol83a.get_current()\nvoltage = ol83a.get_voltage()\nwattage = ol83a.get_wattage()\nprint('output current is {} A'.format(current))\nprint('output voltage is {} V'.format(voltage))\nprint('output wattage is {} W'.format(wattage))\n\n# get the number of hours for lamp 9\nhours = ol83a.get_setup(9, 40)\nprint('hours: {}'.format(hours))\n\n# disconnect from the current source\nol83a.disconnect()\n","sub_path":"msl/examples/equipment/optronic_laboratories/ol83a.py","file_name":"ol83a.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230370051","text":"import contextlib\nimport datetime\nimport itertools\nimport sys\n\nfrom django.contrib.auth.models import Group, Permission\nfrom django.contrib.sites.models import Site\nfrom django_webtest import WebTest\nimport webtest.forms\n\nfrom workshops.models import (\n Airport,\n Award,\n Badge,\n Event,\n Lesson,\n Person,\n Qualification,\n Organization,\n Role,\n Tag,\n Language,\n)\nfrom workshops.util import universal_date_format\n\n\n@contextlib.contextmanager\ndef dummy_subTest():\n yield\n\n\nclass DummySubTestWhenTestsLaunchedInParallelMixin:\n def subTest(self, *args, **kwargs):\n # If you launch tests in parallel, subTest is not supported yet. To\n # fix that, we provide a dummy subTest implementation in that case.\n if \"--parallel\" in sys.argv:\n return dummy_subTest()\n else:\n return super().subTest(*args, **kwargs)\n\n\nclass SuperuserMixin:\n def _setUpSuperuser(self):\n \"\"\"Set up admin account that can log into the website.\"\"\"\n password = \"admin\"\n self.admin = Person.objects.create_superuser(\n username=\"admin\",\n personal=\"Super\",\n family=\"User\",\n email=\"sudo@example.org\",\n password=password,\n )\n self.admin.data_privacy_agreement = True\n self.admin.save()\n\n def _logSuperuserIn(self):\n \"\"\"Log in superuser (administrator) account.\"\"\"\n password = \"admin\"\n self.client.login(username=self.admin.username, password=password)\n\n\nclass TestBase(\n SuperuserMixin, DummySubTestWhenTestsLaunchedInParallelMixin, WebTest\n): # Support for functional tests (django-webtest)\n \"\"\"Base class for AMY test cases.\"\"\"\n\n def setUp(self):\n \"\"\"Create standard objects.\"\"\"\n\n # self.clear_sites_cache()\n self._setUpOrganizations()\n self._setUpAirports()\n self._setUpLessons()\n self._setUpBadges()\n self._setUpInstructors()\n self._setUpNonInstructors()\n self._setUpPermissions()\n\n from django.conf import settings\n\n Site.objects.clear_cache()\n Site.objects.get_or_create(\n pk=settings.SITE_ID,\n defaults=dict(domain=\"amy.carpentries.org\", name=\"AMY server\"),\n )\n\n def clear_sites_cache(self):\n # we need to clear Sites' cache, because after post_migration signal,\n # there's some junk in the cache that prevents from adding comments\n # (the site in CACHE is not a real Site)\n Site.objects.clear_cache()\n\n def _setUpLessons(self):\n \"\"\"Set up lesson objects.\"\"\"\n\n # we have some lessons in the database because of the migration\n # '0012_auto_20150612_0807.py'\n self.git, _ = Lesson.objects.get_or_create(name=\"swc/git\")\n self.sql, _ = Lesson.objects.get_or_create(name=\"dc/sql\")\n self.matlab, _ = Lesson.objects.get_or_create(name=\"swc/matlab\")\n self.r, _ = Lesson.objects.get_or_create(name=\"swc/r\")\n\n def _setUpOrganizations(self):\n \"\"\"Set up organization objects.\"\"\"\n\n self.org_alpha = Organization.objects.create(\n domain=\"alpha.edu\", fullname=\"Alpha Organization\", country=\"Azerbaijan\"\n )\n\n self.org_beta = Organization.objects.create(\n domain=\"beta.com\", fullname=\"Beta Organization\", country=\"Brazil\"\n )\n\n def _setUpAirports(self):\n \"\"\"Set up airport objects.\"\"\"\n\n self.airport_0_10 = Airport.objects.create(\n iata=\"ZZZ\", fullname=\"Airport 0x10\", latitude=0.0, longitude=10.0,\n )\n self.airport_0_0 = Airport.objects.create(\n iata=\"AAA\",\n fullname=\"Airport 0x0\",\n country=\"AL\", # AL for Albania\n latitude=0.0,\n longitude=0.0,\n )\n self.airport_0_50 = Airport.objects.create(\n iata=\"BBB\",\n fullname=\"Airport 0x50\",\n country=\"BG\", # BG for Bulgaria\n latitude=0.0,\n longitude=50.0,\n )\n self.airport_50_100 = Airport.objects.create(\n iata=\"CCC\",\n fullname=\"Airport 50x100\",\n country=\"CM\", # CM for Cameroon\n latitude=50.0,\n longitude=100.0,\n )\n self.airport_55_105 = Airport.objects.create(\n iata=\"DDD\",\n fullname=\"Airport 55x105\",\n country=\"CM\",\n latitude=55.0,\n longitude=105.0,\n )\n\n def _setUpLanguages(self):\n \"\"\"Set up language objects.\"\"\"\n\n self.english, _ = Language.objects.get_or_create(name=\"English\",)\n self.french, _ = Language.objects.get_or_create(name=\"French\",)\n self.latin, _ = Language.objects.get_or_create(name=\"Latin\",)\n\n def _setUpBadges(self):\n \"\"\"Set up badge objects.\"\"\"\n\n self.swc_instructor, _ = Badge.objects.get_or_create(\n name=\"swc-instructor\",\n defaults=dict(\n title=\"Software Carpentry Instructor\", criteria=\"Worked hard for this\"\n ),\n )\n self.dc_instructor, _ = Badge.objects.get_or_create(\n name=\"dc-instructor\",\n defaults=dict(\n title=\"Data Carpentry Instructor\", criteria=\"Worked hard for this\"\n ),\n )\n # lc-instructor is provided via a migration\n self.lc_instructor = Badge.objects.get(name=\"lc-instructor\")\n\n def _setUpInstructors(self):\n \"\"\"Set up person objects representing instructors.\"\"\"\n\n self.hermione = Person.objects.create(\n personal=\"Hermione\",\n family=\"Granger\",\n email=\"hermione@granger.co.uk\",\n gender=\"F\",\n may_contact=True,\n airport=self.airport_0_0,\n github=\"herself\",\n twitter=\"herself\",\n url=\"http://hermione.org\",\n username=\"granger_hermione\",\n country=\"GB\",\n )\n\n # Hermione is additionally a qualified Data Carpentry instructor\n Award.objects.create(\n person=self.hermione,\n badge=self.swc_instructor,\n awarded=datetime.date(2014, 1, 1),\n )\n Award.objects.create(\n person=self.hermione,\n badge=self.dc_instructor,\n awarded=datetime.date(2014, 1, 1),\n )\n Award.objects.create(\n person=self.hermione,\n badge=self.lc_instructor,\n awarded=datetime.date(2018, 12, 25),\n )\n Qualification.objects.create(person=self.hermione, lesson=self.git)\n Qualification.objects.create(person=self.hermione, lesson=self.sql)\n\n self.harry = Person.objects.create(\n personal=\"Harry\",\n family=\"Potter\",\n email=\"harry@hogwarts.edu\",\n gender=\"M\",\n may_contact=True,\n airport=self.airport_0_50,\n github=\"hpotter\",\n twitter=None,\n username=\"potter_harry\",\n country=\"GB\",\n )\n\n # Harry is additionally a qualified Data Carpentry instructor\n Award.objects.create(\n person=self.harry,\n badge=self.swc_instructor,\n awarded=datetime.date(2014, 5, 5),\n )\n Award.objects.create(\n person=self.harry,\n badge=self.dc_instructor,\n awarded=datetime.date(2014, 5, 5),\n )\n Qualification.objects.create(person=self.harry, lesson=self.sql)\n\n self.ron = Person.objects.create(\n personal=\"Ron\",\n family=\"Weasley\",\n email=\"rweasley@ministry.gov.uk\",\n gender=\"M\",\n may_contact=False,\n airport=self.airport_50_100,\n github=None,\n twitter=None,\n url=\"http://geocities.com/ron_weas\",\n username=\"weasley_ron\",\n country=\"GB\",\n )\n\n Award.objects.create(\n person=self.ron,\n badge=self.swc_instructor,\n awarded=datetime.date(2014, 11, 11),\n )\n Qualification.objects.create(person=self.ron, lesson=self.git)\n\n def _setUpNonInstructors(self):\n \"\"\"Set up person objects representing non-instructors.\"\"\"\n\n self.spiderman = Person.objects.create(\n personal=\"Peter\",\n middle=\"Q.\",\n family=\"Parker\",\n email=\"peter@webslinger.net\",\n gender=\"O\",\n gender_other=\"Spider\",\n may_contact=True,\n username=\"spiderman\",\n airport=self.airport_55_105,\n country=\"US\",\n )\n\n self.ironman = Person.objects.create(\n personal=\"Tony\",\n family=\"Stark\",\n email=\"me@stark.com\",\n gender=\"M\",\n may_contact=True,\n username=\"ironman\",\n airport=self.airport_50_100,\n country=\"US\",\n )\n\n self.blackwidow = Person.objects.create(\n personal=\"Natasha\",\n family=\"Romanova\",\n email=None,\n gender=\"F\",\n may_contact=False,\n username=\"blackwidow\",\n airport=self.airport_0_50,\n country=\"RU\",\n )\n\n def _setUpUsersAndLogin(self):\n \"\"\"Log superuser in.\"\"\"\n self._setUpSuperuser() # creates self.admin\n # log admin in\n # this user will be authenticated for all self.client requests\n return self._logSuperuserIn()\n\n def _setUpPermissions(self):\n \"\"\"Set up permission objects for consistent form selection.\"\"\"\n badge_admin = Group.objects.create(name=\"Badge Admin\")\n badge_admin.permissions.add(\n *Permission.objects.filter(codename__endswith=\"_badge\")\n )\n add_badge = Permission.objects.get(codename=\"add_badge\")\n self.ironman.groups.add(badge_admin)\n self.ironman.user_permissions.add(add_badge)\n self.ron.groups.add(badge_admin)\n self.ron.user_permissions.add(add_badge)\n self.spiderman.groups.add(badge_admin)\n self.spiderman.user_permissions.add(add_badge)\n\n def _setUpTags(self):\n \"\"\"Set up tags (the same as in production database, minus some added\n via migrations).\"\"\"\n Tag.objects.bulk_create(\n [\n Tag(name=\"TTT\", details=\"\"),\n Tag(name=\"WiSE\", details=\"\"),\n Tag(name=\"LC\", details=\"\"),\n Tag(name=\"DC\", details=\"\"),\n Tag(name=\"SWC\", details=\"\"),\n ]\n )\n\n def _setUpEvents(self):\n \"\"\"Set up a bunch of events and record some statistics.\"\"\"\n\n today = datetime.date.today()\n\n # Create a test host\n test_host = Organization.objects.create(\n domain=\"example.com\", fullname=\"Test Organization\"\n )\n\n # Create one new published event for each day in the next 10 days.\n for t in range(1, 11):\n event_start = today + datetime.timedelta(days=t)\n date_string = universal_date_format(event_start)\n slug = \"{0}-upcoming\".format(date_string)\n url = \"http://example.org/\" + (\"{0}\".format(t) * 20)\n e = Event.objects.create(\n start=event_start,\n slug=slug,\n host=test_host,\n admin_fee=100,\n url=url,\n invoice_status=\"not-invoiced\",\n country=\"US\",\n venue=\"School\",\n address=\"Overthere\",\n latitude=1,\n longitude=2,\n )\n\n # Create one new event for each day from 10 days ago to\n # 3 days ago, half invoiced\n invoice = itertools.cycle([\"invoiced\", \"not-invoiced\"])\n for t in range(3, 11):\n event_start = today + datetime.timedelta(days=-t)\n date_string = universal_date_format(event_start)\n Event.objects.create(\n start=event_start,\n slug=\"{0}-past\".format(date_string),\n host=test_host,\n admin_fee=100,\n invoice_status=next(invoice),\n )\n\n # create a past event that has no admin fee specified, yet it needs\n # invoice\n event_start = today + datetime.timedelta(days=-4)\n Event.objects.create(\n start=event_start,\n end=today + datetime.timedelta(days=-1),\n slug=\"{}-past-uninvoiced\".format(universal_date_format(event_start)),\n host=test_host,\n admin_fee=None,\n invoice_status=\"not-invoiced\",\n )\n\n # Create an event that started yesterday and ends tomorrow\n # with no fee, and without specifying whether they've been\n # invoiced.\n event_start = today + datetime.timedelta(days=-1)\n event_end = today + datetime.timedelta(days=1)\n Event.objects.create(\n start=event_start,\n end=event_end,\n slug=\"ends-tomorrow-ongoing\",\n host=test_host,\n admin_fee=0,\n url=\"http://example.org/ends-tomorrow-ongoing\",\n country=\"US\",\n venue=\"School\",\n address=\"Overthere\",\n latitude=1,\n longitude=2,\n )\n\n # Create an event that ends today with no fee, and without\n # specifying whether the fee has been invoiced.\n event_start = today + datetime.timedelta(days=-1)\n event_end = today\n Event.objects.create(\n start=event_start,\n end=event_end,\n slug=\"ends-today-ongoing\",\n host=test_host,\n admin_fee=0,\n url=\"http://example.org/ends-today-ongoing\",\n country=\"US\",\n venue=\"School\",\n address=\"Overthere\",\n latitude=1,\n longitude=2,\n )\n\n # Create an event that starts today with a fee, and without\n # specifying whether the fee has been invoiced.\n event_start = today\n event_end = today + datetime.timedelta(days=1)\n Event.objects.create(\n start=event_start,\n end=event_end,\n slug=\"starts-today-ongoing\",\n host=test_host,\n admin_fee=100,\n )\n\n # create a full-blown event that got cancelled\n e = Event.objects.create(\n start=event_start,\n end=event_end,\n slug=\"starts-today-cancelled\",\n url=\"http://example.org/cancelled-event\",\n latitude=-10.0,\n longitude=10.0,\n country=\"US\",\n venue=\"University\",\n address=\"Phenomenal Street\",\n host=test_host,\n )\n tags = Tag.objects.filter(name__in=[\"SWC\", \"cancelled\"])\n e.tags.set(tags)\n\n # Record some statistics about events.\n self.num_uninvoiced_events = 0\n self.num_upcoming = 0\n for e in Event.objects.all():\n e.is_past_event = e.start < today and (e.end is None or e.end < today)\n if e.invoice_status == \"not-invoiced\" and e.is_past_event:\n self.num_uninvoiced_events += 1\n if e.url and (e.start > today):\n self.num_upcoming += 1\n\n def _setUpRoles(self):\n \"\"\"Before #626, we don't have a migration that introduces roles that\n are currently in the database. This is an auxiliary method for adding\n them to the tests, should one need them.\"\"\"\n Role.objects.bulk_create(\n [\n Role(name=\"helper\", verbose_name=\"Helper\"),\n Role(name=\"instructor\", verbose_name=\"Instructor\"),\n Role(name=\"host\", verbose_name=\"Host\"),\n Role(name=\"learner\", verbose_name=\"Learner\"),\n Role(name=\"organizer\", verbose_name=\"Organizer\"),\n Role(name=\"tutor\", verbose_name=\"Tutor\"),\n ]\n )\n\n def saveResponse(self, response, filename=\"error.html\"):\n content = response.content.decode(\"utf-8\")\n with open(filename, \"w\") as f:\n f.write(content)\n\n # Web-test helpers\n def assertSelected(self, field, expected):\n if not isinstance(field, webtest.forms.Select):\n raise TypeError\n\n expected_value = field._get_value_for_text(expected)\n got_value = field.value\n\n # field.options is a list of (value, selected?, verbose name) triples\n selected = [o[2] for o in field.options if o[1]]\n\n self.assertEqual(\n expected_value,\n got_value,\n msg='Expected \"{}\" to be selected '\n \"while {} is/are selected.\".format(expected, selected),\n )\n\n def passCaptcha(self, data_dictionary):\n \"\"\"Extends provided `data_dictionary` with RECAPTCHA pass data.\"\"\"\n data_dictionary.update(\n {\"g-recaptcha-response\": \"PASSED\"} # to auto-pass RECAPTCHA\n )\n\n\nclass FormTestHelper:\n def _test_field_other(\n self,\n Form,\n first_name,\n other_name,\n valid_first,\n valid_other,\n empty_first=\"\",\n empty_other=\"\",\n first_when_other=\"\",\n blank=False,\n ):\n \"\"\"Universal way of testing field `name` and it's \"_other\" counterpart\n `other_name`.\n\n 4 test scenarios are implemented:\n 1) no data in either field - first field throws error if required by\n `blank`\n 2) valid entry in first, requiring no input in the other\n 3) valid entry in second, requiring no input in the first one\n 4) both entries filled, error in the second\"\"\"\n\n # 1: data required\n data = {\n first_name: empty_first,\n other_name: empty_other,\n }\n form = Form(data)\n if blank:\n self.assertNotIn(first_name, form.errors)\n self.assertNotIn(other_name, form.errors)\n else:\n self.assertIn(first_name, form.errors)\n self.assertNotIn(other_name, form.errors)\n\n # 2: valid entry (original field only)\n data = {\n first_name: valid_first,\n other_name: empty_other,\n }\n form = Form(data)\n self.assertNotIn(first_name, form.errors)\n self.assertNotIn(other_name, form.errors)\n\n # 3: valid entry (\"other\" field only)\n data = {\n first_name: first_when_other,\n other_name: valid_other,\n }\n form = Form(data)\n self.assertNotIn(first_name, form.errors)\n self.assertNotIn(other_name, form.errors)\n\n # 4: invalid entry, data in \"other\" is not needed\n data = {\n first_name: valid_first,\n other_name: valid_other,\n }\n form = Form(data)\n self.assertIn(first_name, form.errors)\n self.assertNotIn(other_name, form.errors)\n","sub_path":"amy/workshops/tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":18767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"50786348","text":"\n\n# name= input(\"enter a name \")\n# i=0\n# while i 0 and state[-3] > 0:\n action[4] = +1.0\n else:\n action[4] = 0.0\n\n pos = (state[0], state[1])\n vel = (state[2], state[3])\n angle = state[4]\n angular = state[5]\n state = torch.tensor(state).to(device).unsqueeze(0).double()\n self.state = state\n sample = random.random()\n eps_threshold = EPS_END + (EPS_START - EPS_END) * \\\n math.exp(-1. * self.steps_done / EPS_DECAY)\n if is_test or sample > eps_threshold:\n with torch.no_grad():\n value_map = self.policy_net(state)[0][0]\n value_map *= self.grid\n # plt.imshow(value_map.numpy())\n # plt.show()\n col_max, col_max_indice = value_map.max(dim=0)\n max_col_max, max_col_max_indice = col_max.max(dim=0)\n x = max_col_max_indice.item()\n y = col_max_indice[x].item()\n x = x/40.0*8.0\n y = y/25.0*5.0\n else:\n value_map = torch.randn(25, 40).double().to(device)\n value_map *= self.grid\n # plt.imshow(value_map.numpy())\n # plt.show()\n col_max, col_max_indice = value_map.max(0)\n max_col_max, max_col_max_indice = col_max.max(0)\n x = max_col_max_indice.item()\n y = col_max_indice[x].item()\n x = x/40.0*8.0\n y = y/25.0*5.0\n #x, y = random.random()*8.0, random.random()*5.0\n\n self.target = (x, y)\n try:\n self.move.setGoal(pos, self.target)\n except:\n return action\n self.steps_done += 1\n\n action = self.move.moveTo(pos, vel, angle, angular, action)\n return action\n\n def push(self, next_state, reward):\n device = self.device\n target = torch.tensor(self.target, device=device).double()\n next_state = torch.tensor(next_state).to(device).unsqueeze(0).double()\n reward = torch.tensor([reward], device=device).double()\n self.memory.push(self.state, target, next_state, reward)\n\n def optimize_model(self):\n if len(self.memory) < BATCH_SIZE:\n return\n device = self.device\n transitions = self.memory.sample(BATCH_SIZE)\n # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for\n # detailed explanation). This converts batch-array of Transitions\n # to Transition of batch-arrays.\n batch = Transition(*zip(*transitions))\n\n # Compute a mask of non-final states and concatenate the batch elements\n # (a final state would've been the one after which simulation ended)\n non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,\n batch.next_state)), device=device, dtype=torch.uint8)\n non_final_next_states = torch.cat([s for s in batch.next_state\n if s is not None])\n state_batch = torch.cat(batch.state)\n action_batch = torch.cat(batch.action)\n reward_batch = torch.cat(batch.reward)\n\n # Compute Q(s_t, a) - the model computes Q(s_t), then we select the\n # columns of actions taken. These are the actions which would've been taken\n # for each batch state according to policy_net\n state_action_values = self.policy_net(state_batch)\n state_action_values = state_action_values.reshape(\n [BATCH_SIZE, -1]).max(dim=1)[0]\n\n # Compute V(s_{t+1}) for all next states.\n # Expected values of actions for non_final_next_states are computed based\n # on the \"older\" target_net; selecting their best reward with max(1)[0].\n # This is merged based on the mask, such that we'll have either the expected\n # state value or 0 in case the state was final.\n next_state_values = torch.zeros(BATCH_SIZE, device=device).double()\n value = self.target_net(non_final_next_states)\n value = value.reshape([BATCH_SIZE, -1]).max(dim=1)[0].detach()\n next_state_values[non_final_mask] = value\n # Compute the expected Q values\n expected_state_action_values = (\n next_state_values * GAMMA) + reward_batch\n\n # Compute Huber loss\n loss = F.smooth_l1_loss(state_action_values,\n expected_state_action_values)\n\n # Optimize the model\n self.optimizer.zero_grad()\n loss.backward()\n for param in self.policy_net.parameters():\n if(param.grad is None): continue\n param.grad.data.clamp_(-1, 1)\n self.optimizer.step()\n\n def update_target_net(self):\n self.target_net.load_state_dict(self.policy_net.state_dict())\n\n def save(self):\n torch.save(self.policy_net.state_dict(), \"ICRA.model\")\n\n def load(self):\n self.policy_net.load_state_dict(torch.load(\n \"ICRA.model\", map_location=self.device))\n self.target_net.load_state_dict(self.policy_net.state_dict())\n","sub_path":"Agent/DQNAgent.py","file_name":"DQNAgent.py","file_ext":"py","file_size_in_byte":7254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"594786950","text":"# -*- coding: utf-8 -*-\n#\n# Author: jinlong.yang\n#\n\nfrom osmo.core import log as logging\n\nLOG = logging.getLogger(__name__)\n\n\nclass Worker(object):\n\n def run(self, data):\n result = \"thread handle: %s\" % data\n return result\n","sub_path":"newblance/daemon/core/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"135798676","text":"from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter\n\n\nclass Command(BaseXpressDemocracyClubCsvImporter):\n council_id = \"SKE\"\n addresses_name = \"2021-10-07/SouthKestevenDemocracy_Club__28October2021.tsv\"\n stations_name = \"2021-10-07/SouthKestevenDemocracy_Club__28October2021.tsv\"\n elections = [\"2021-10-28\"]\n csv_delimiter = \"\\t\"\n\n def address_record_to_dict(self, record):\n uprn = record.property_urn.strip().lstrip(\"0\")\n\n if uprn in [\n \"100030946733\", # TOLL BAR HOUSE UFFINGTON ROAD, STAMFORD\n \"100030901277\", # 64 BURGHLEY STREET, BOURNE\n \"100030901276\", # 60 BURGHLEY STREET, BOURNE\n \"10007275381\", # FLAT 59 LONDON ROAD, GRANTHAM\n \"10007276237\", # 86 THE DEEPINGS CARAVAN PARK TOWNGATE EAST, MARKET DEEPING\n ]:\n return None\n\n if record.addressline6 in [\n \"NG32 1AT\",\n \"NG33 4JQ\",\n \"NG33 4HE\",\n \"NG33 4SP\",\n \"PE9 4PE\",\n \"PE10 0AA\",\n \"PE10 9RP\",\n \"PE9 2XG\",\n \"NG32 3AU\",\n \"NG32 3AY\",\n \"NG23 5HN\",\n \"NG32 2LW\",\n \"NG31 8RJ\",\n \"NG31 9JZ\",\n \"NG31 7QP\",\n \"NG31 8AB\",\n \"NG31 8NH\",\n ]:\n return None\n\n return super().address_record_to_dict(record)\n","sub_path":"polling_stations/apps/data_importers/management/commands/import_south_kesteven.py","file_name":"import_south_kesteven.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"626096972","text":"from pymongo import MongoClient\nimport uuid\n\nclient = MongoClient('localhost', 27017)\ndb = client.contact\ncoll = db.users\nchange_flag = str(uuid.uuid4()).replace('-', '_')\n\n_garbage = \"\"\nfor i in range(200):\n _garbage += \"this only to occupy the space for user, avoid mongo to move the data when it get bigger. \"\n\ndef format_userid(str_old):\n str_fmt = \"10010000000\"\n if not str_old:\n return \"\"\n\n copy_len = len(str_fmt) - len(str_old)\n if copy_len <= 0:\n return str_old\n\n str_new = str_fmt[0:copy_len] + str_old\n return str_new\n\ndef format_contact(userid):\n if not userid:\n return\n\n coll.remove( {\"id\":userid}, True)\n\n\nfor line in open('users_ids.txt'):\n format_contact(str(line).strip())\n\n\n","sub_path":"testscript/userdel.py","file_name":"userdel.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"249754174","text":"\"\"\"SyncService Tests\"\"\"\nimport os\nimport pytest\nfrom hamcrest import contains_string, assert_that\n\n# pylint: disable=redefined-outer-name\n@pytest.fixture()\ndef get_ansible_vars(host):\n \"\"\"Define get_ansible_vars\"\"\"\n java_role = \"file=../../../java/vars/main.yml name=java_role\"\n common_vars = \"file=../../../common/vars/main.yml name=common_vars\"\n common_defaults = \"file=../../../common/defaults/main.yml name=common_defaults\"\n common_hosts = \"../../group_vars/all.yml name=common_hosts\"\n syncservices = \"file=../../vars/main.yml name=syncservices\"\n ansible_vars = host.ansible(\"include_vars\", java_role)[\"ansible_facts\"][\"java_role\"]\n ansible_vars.update(host.ansible(\"include_vars\", java_role)[\"ansible_facts\"][\"java_role\"])\n ansible_vars.update(host.ansible(\"include_vars\", common_vars)[\"ansible_facts\"][\"common_vars\"])\n ansible_vars.update(host.ansible(\"include_vars\", common_hosts)[\"ansible_facts\"][\"common_hosts\"])\n ansible_vars.update(host.ansible(\"include_vars\", common_defaults)[\"ansible_facts\"][\"common_defaults\"])\n ansible_vars.update(host.ansible(\"include_vars\", syncservices)[\"ansible_facts\"][\"syncservices\"])\n return ansible_vars\n\ntest_host = os.environ.get('TEST_HOST')\n\ndef test_sync_log_exists(host, get_ansible_vars):\n \"\"\"Check that Sync Service log exists\"\"\"\n assert_that(host.file(\"{}/sync-service.log\".format(get_ansible_vars[\"logs_folder\"])).exists, get_ansible_vars[\"logs_folder\"])\n\ndef test_sync_service(host, get_ansible_vars):\n \"Check that Sync Service is enabled and running\"\n assert_that(host.service(\"alfresco-sync\").is_running)\n assert_that(host.service(\"alfresco-sync\").is_enabled)\n\ndef test_sync_health(host, get_ansible_vars):\n \"\"\"Check Sync Service health\"\"\"\n cmd = host.run(\"curl -iL http://{}:9090/alfresco/healthcheck\".format(test_host))\n assert_that(cmd.stdout, contains_string(\"ActiveMQ connection Ok\"))\n assert_that(cmd.stdout, contains_string(\"Database connection Ok\"))\n assert_that(cmd.stdout, contains_string(\"Repository connection Ok\"))\n assert_that(cmd.stdout, contains_string(\"HTTP/1.1 200\"))\n\ndef test_environment_jvm_opts(host, get_ansible_vars):\n \"Check that overwritten JVM_OPTS are taken into consideration\"\n pid = host.run(\"/opt/openjdk*/bin/jps -lV | grep SyncService | awk '{print $1}'\")\n process_map = host.run(\"/opt/openjdk*/bin/jhsdb jmap --heap --pid {}\".format(pid.stdout))\n assert_that(process_map.stdout, contains_string(\"MaxHeapSize = 943718400 (900.0MB)\"))\n","sub_path":"roles/sync/molecule/default/tests/test_sync.py","file_name":"test_sync.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"633302","text":"from flask import Blueprint, render_template, request, session, redirect, url_for\nfrom os import path\nfrom cpatcha.routes import current_user, login_required\n\nhome_blueprint = Blueprint(\n 'home',\n __name__,\n # path.pardir ==> ../\n template_folder=path.join(path.pardir, path.pardir, 'templates', 'home'),\n # Prefix of Route URL\n url_prefix='/home',\n)\n\n\n@home_blueprint.route(\"/\")\n@login_required\ndef home():\n return render_template(\"home.html\")\n\n","sub_path":"cpatcha/routes/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"525974437","text":"# The sum of the squares of the first ten natural numbers is 385\n#\n# The square of the sum of the first ten natural numbers is 3025\n#\n# Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.\n#\n# Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.\n\nsum = 0\nsumOfSquares = 0\n\nfor n in range(1, 101):\n sum += n\n sumOfSquares += n**2\n\nresult = sum**2 - sumOfSquares\n\nprint(\"The difference between sum of the squares and the square of the sum for the first one hundred natural numbers is: %d\" % result)","sub_path":"006. Sum Square Difference/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"320804413","text":"'''\nCARD\nSUIT,RANK,VALUE\n'''\nimport random\nsuits=('Hearts','Diamonds','Spades','Clubs')\nranks=('Two','Three','Four','Five','Six','Seven','Eight','Nine','Ten','Jack',\"Queen\",'King','Ace')\nvalue = {'Two':2,'Three':3,'Four':4,'Five':5,'Six':6,'Seven':7,'Eight':8,'Nine':9,'Ten':10,'Jack':11,\"Queen\":12,'King':13,'Ace':14}\nclass Card:\n def __init__(self,suit,rank):\n self.suit = suit\n self.rank = rank\n self.value = value[rank]\n\n def __str__(self):\n return self.rank + \" of \" + self.suit\n\n\n","sub_path":"card_game/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"1196220","text":"from flask import Flask\nimport flask_admin as admin\n\nfrom app.views import UserView, InstitutesView, AnalyticsView, StatisticView, IndexView, CoursesView, GroupsView, BotSendMessageView, \\\n ScheduleView, ParserStatusView, VkUserView, TG_check_status, VK_check_status\nfrom app.storage import db\n\n# Создаём приложение\napp = Flask(__name__,\n static_folder='static', )\n\n# ==========Настройки==========\n# Create dummy secrey key so we can use sessions\napp.config['SECRET_KEY'] = '123456790'\n\n# ==========URLs==========\napp.add_url_rule('/', view_func=IndexView.as_view('index'))\napp.add_url_rule('/status/parser', view_func=ParserStatusView.as_view('parser_status'))\n\napp.add_url_rule('/status/vk_reminders', view_func=VK_check_status.as_view('vk_reminders'))\napp.add_url_rule('/status/tg_reminders', view_func=TG_check_status.as_view('tg_reminders'))\n\n\n# ==========Админ панель==========\nadmin = admin.Admin(app, name='Умное расписание ИРНИТУ ', template_mode='bootstrap3')\n\n# Добавляем views\nadmin.add_view(UserView(db.users, 'Пользователи Telegram', category='База данных'))\nadmin.add_view(VkUserView(db.VK_users, 'Пользователи Vk', category='База данных'))\n\n\nadmin.add_view(InstitutesView(db.institutes, 'Институты', category='База данных'))\n\nadmin.add_view(AnalyticsView(name='Аналитика', endpoint='analytics'))\nadmin.add_view(StatisticView(db.tg_statistics, 'Статистика Telegram', endpoint='statistics',category='База данных'))\nadmin.add_view(BotSendMessageView(name='Отправка сообщений',\n endpoint='tg_bot_send_messages', category='Телеграм бот'))\n\nadmin.add_view(CoursesView(db.courses, 'Курсы', category='База данных'))\nadmin.add_view(GroupsView(db.groups, 'Группы', category='База данных'))\nadmin.add_view(ScheduleView(db.schedule, 'Расписание', category='База данных'))\n\n","sub_path":"web_manager/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"399721054","text":"import tensorflow as tf\nimport math\nimport numpy as np\nimport tensorflow_probability as tfp\n\nclass Sequential(tf.keras.models.Sequential):\n \"\"\"\n Class that extends ``torch.nn.Sequential`` for computing the output of\n the function alongside with the log-det-Jacobian of such transformation.\n \"\"\"\n \n # def call(self, inputs: tf.Tensor):\n def call(self, inputs, training=None, mask=None):\n \"\"\"\n Parameters\n ----------\n inputs : ``torch.Tensor``, required.\n The input tensor.\n Returns\n -------\n The output tensor and the log-det-Jacobian of this transformation.\n \"\"\"\n \n log_det_jacobian = 0.\n # for i, module in enumerate(self._modules.values()):\n for i, layer in enumerate(self.layers):\n inputs, log_det_jacobian_ = layer(inputs)\n log_det_jacobian = log_det_jacobian + log_det_jacobian_\n return inputs, log_det_jacobian\n\n\nclass BNAF(tf.keras.models.Sequential):\n \"\"\"\n Class that extends ``torch.nn.Sequential`` for constructing a Block Neural \n Normalizing Flow.\n \"\"\"\n\n def __init__(self, layers=None, name=None, res: str = None):\n # def __init__(self, *args, res: str = None):\n \"\"\"\n Parameters\n ----------\n *args : ``Iterable[torch.nn.Module]``, required.\n The modules to use.\n res : ``str``, optional (default = None).\n Which kind of residual connection to use. ``res = None`` is no residual \n connection, ``res = 'normal'`` is ``x + f(x)`` and ``res = 'gated'`` is\n ``a * x + (1 - a) * f(x)`` where ``a`` is a learnable parameter.\n \"\"\"\n \n # super(BNAF, self).__init__(*args)\n super(BNAF, self).__init__(name=name)\n self.supports_masking = True\n self._build_input_shape = None\n self._compute_output_and_mask_jointly = True\n\n # Add to the model any layers passed to the constructor.\n if layers:\n for layer in layers:\n self.add(layer)\n\n self.res = res\n \n if res == 'gated':\n self.gate = tf.get_variable(name='gate', shape=1, initializer=tf.initializers.random_normal)\n # self.gate = torch.nn.Parameter(torch.nn.init.normal_(torch.Tensor(1)))\n \n # def forward(self, inputs : tf.Tensor):\n # def call(self, inputs: tf.Tensor):\n def call(self, inputs, training=None, mask=None):\n\n \"\"\"\n Parameters\n ----------\n inputs : ``torch.Tensor``, required.\n The input tensor.\n Returns\n -------\n The output tensor and the log-det-Jacobian of this transformation.\n \"\"\"\n\n outputs = inputs\n grad = None\n\n ### apply layers in TF and get gradients...module in pytorch == layers in tf\n # for module in self._modules.values(): #pytorch implementation\n for layer in self.layers:\n outputs, grad = layer(outputs, grad) #not sure if use \"layer\" or \"layer.call\"\n grad = grad if len(grad.shape) == 4 else tf.reshape(grad, (grad.shape + [1, 1]))\n\n # return outputs, grad ## debug\n\n assert inputs.shape[-1] == outputs.shape[-1]\n grad = tf.squeeze(grad)\n reduce_sum = len(grad.shape) > 1\n\n if reduce_sum:\n if self.res == 'normal':\n return inputs + outputs, tf.reduce_sum(tf.keras.activations.softplus(tf.squeeze(grad)), axis=-1)\n elif self.res == 'gated':\n return tf.nn.sigmoid(self.gate) * outputs + (1 - tf.nn.sigmoid(self.gate)) * inputs, \\\n tf.reduce_sum(tf.nn.softplus(tf.squeeze(grad) + self.gate) - \\\n tf.nn.softplus(self.gate), axis=-1)\n else:\n return outputs, tf.reduce_sum(tf.squeeze(grad), axis=-1)\n else:\n if self.res == 'normal':\n return inputs + outputs, tf.keras.activations.softplus(grad)\n elif self.res == 'gated':\n return tf.nn.sigmoid(self.gate) * outputs + (1 - tf.nn.sigmoid(self.gate)) * inputs, \\\n tf.nn.softplus(grad + self.gate) - tf.nn.softplus(self.gate)\n else:\n return outputs, grad\n \n def _get_name(self):\n return 'BNAF(res={})'.format(self.res)\n \n \n# class Permutation(torch.nn.Module):\nclass Permutation(tf.keras.layers.Layer):\n \"\"\"\n Module that outputs a permutation of its input.\n \"\"\"\n \n def __init__(self, in_features : int, p : list = None):\n \"\"\"\n Parameters\n ----------\n in_features : ``int``, required.\n The number of input features.\n p : ``list`` or ``str``, optional (default = None)\n The list of indices that indicate the permutation. When ``p`` is not a\n list, if ``p = 'flip'``the tensor is reversed, if ``p = None`` a random \n permutation is applied.\n \"\"\"\n \n super(Permutation, self).__init__()\n \n self.in_features = in_features\n \n if p is None:\n self.p = tfp.bijectors.Permute(np.random.permutation(in_features))\n elif p == 'flip':\n self.p = tfp.bijectors.Permute(list(reversed(range(in_features))))\n else:\n self.p = tfp.bijectors.Permute(p)\n \n def call(self, inputs : tf.Tensor, **kwargs):\n \"\"\"\n Parameters\n ----------\n inputs : ``torch.Tensor``, required.\n The input tensor.\n Returns\n -------\n The permuted tensor and the log-det-Jacobian of this permutation.\n \"\"\"\n \n # return inputs[:,self.p], 0\n return self.p.forward(inputs), 0\n \n def __repr__(self):\n return 'Permutation(in_features={}, p={})'.format(self.in_features, self.p)\n \n \n# class MaskedWeight(torch.nn.Module):\nclass MaskedWeight(tf.keras.layers.Layer):\n \"\"\"\n Module that implements a linear layer with block matrices with positive diagonal blocks.\n Moreover, it uses Weight Normalization (https://arxiv.org/abs/1602.07868) for stability.\n \"\"\"\n \n def __init__(self, in_features : int, out_features : int, dim : int, bias : bool = True):\n \"\"\"\n Parameters\n ----------\n in_features : ``int``, required.\n The number of input features per each dimension ``dim``.\n out_features : ``int``, required.\n The number of output features per each dimension ``dim``.\n dim : ``int``, required.\n The number of dimensions of the input of the flow.\n bias : ``bool``, optional (default = True).\n Whether to add a parametrizable bias.\n \"\"\"\n\n super(MaskedWeight, self).__init__()\n self.in_features, self.out_features, self.dim = in_features, out_features, dim\n\n weight = np.zeros((out_features, in_features))\n\n ## tensorflow init\n for i in range(dim):\n weight[(i * out_features // dim):((i + 1) * out_features // dim), 0:((i + 1) * in_features // dim)] = \\\n tf.get_variable(\"w\", shape=[out_features // dim, (i + 1) * in_features // dim], initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32, trainable=False).numpy()\n # ## torch init\n # for i in range(dim):\n # weight[(i * out_features // dim):((i + 1) * out_features // dim), 0:((i + 1) * in_features // dim)] = torch.nn.init.xavier_uniform_(\n # torch.Tensor(out_features // dim, (i + 1) * in_features // dim)).numpy()\n\n with tf.variable_scope(\"params\", reuse=False):\n self._weight = tf.get_variable(\"off_diagonal\", initializer=tf.cast(weight, dtype=tf.float32), dtype=tf.float32)\n ## tf init\n self._diag_weight = tf.get_variable(\"diag\", initializer=np.log(np.random.uniform(0,1, size=(out_features, 1))).astype(np.float32), dtype=tf.float32) #maybe takes log because we're going to take exp later?\n self.bias = tf.get_variable(\"bias\", shape=out_features, initializer=tf.initializers.random_uniform(-1 / math.sqrt(out_features), 1 / math.sqrt(out_features))) if bias else 0\n # ## torch init\n # self._diag_weight = tf.get_variable(\"diag\", initializer=torch.nn.init.uniform_(torch.Tensor(out_features, 1)).log().numpy(), dtype=tf.float32) #maybe takes log because we're going to take exp later?\n # self.bias = tf.get_variable(\"bias\", initializer=torch.nn.init.uniform_(torch.Tensor(out_features),\n # -1 / math.sqrt(out_features),\n # 1 / math.sqrt(out_features)).numpy()) if bias else 0\n\n mask_d = np.zeros_like(weight)\n for i in range(dim):\n mask_d[i * (out_features // dim):(i + 1) * (out_features // dim),\n i * (in_features // dim):(i + 1) * (in_features // dim)] = 1\n\n # self.register_buffer('mask_d', mask_d)\n self.mask_d = tf.constant(name='mask_d', value=mask_d, dtype=tf.float32)\n\n mask_o = np.ones_like(weight)\n for i in range(dim):\n mask_o[i * (out_features // dim):(i + 1) * (out_features // dim),\n i * (in_features // dim):] = 0\n \n # self.register_buffer('mask_o', mask_o)\n self.mask_o = tf.constant(name='mask_o', value=mask_o, dtype=tf.float32)\n\n def get_weights(self):\n \"\"\"\n Computes the weight matrix using masks and weight normalization.\n It also compute the log diagonal blocks of it.\n \"\"\"\n\n # error in original here i think -- should be self._diag_weight or w_squared_norm is not correct\n w = tf.multiply(tf.exp(self._weight), self.mask_d) + tf.multiply(self._weight, self.mask_o)\n # w = tfp.bijectors.transform_diagonal(self._weight)\n # w = tf.multiply(tf.exp(self._diag_weight), self.mask_d) + tf.multiply(self._weight, self.mask_o)\n\n w_squared_norm = tf.reduce_sum(tf.math.square(w), axis=-1, keepdims=True)\n \n w = tf.exp(self._diag_weight) * w / tf.sqrt(w_squared_norm)\n\n ## this piece feeds the log-determinant of the jacobian -- the diagonals are all that are needed\n # and they are extracted with the boolean_mask in the return argument below\n wpl = self._diag_weight + self._weight - 0.5 * tf.log(w_squared_norm)\n\n # return tf.transpose(w), tf.transpose(wpl)[self.mask_d.byte().t()].view(\n # self.dim, self.in_features // self.dim, self.out_features // self.dim)\n\n return tf.transpose(w), tf.reshape(tf.boolean_mask(tf.transpose(wpl),tf.transpose(tf.cast(self.mask_d, tf.bool))),(\n self.dim, self.in_features // self.dim, self.out_features // self.dim))\n\n\n # def forward(self, inputs, grad : torch.Tensor = None):\n def call(self, inputs: tf.Tensor, grad: tf.Tensor = None):\n \"\"\"\n Parameters\n ----------\n inputs : ``torch.Tensor``, required.\n The input tensor.\n grad : ``torch.Tensor``, optional (default = None).\n The log diagonal block of the partial Jacobian of previous transformations.\n Returns\n -------\n The output tensor and the log diagonal blocks of the partial log-Jacobian of previous \n transformations combined with this transformation.\n \"\"\"\n \n w, wpl = self.get_weights()\n \n # g = wpl.transpose(-2, -1).unsqueeze(0).repeat(inputs.shape[0], 1, 1, 1)\n grad_perm = list(range(len(wpl.shape)))\n grad_perm[-1] = len(grad_perm)-2\n grad_perm[-2] = len(grad_perm)-1\n g = tf.tile(tf.expand_dims(tf.transpose(wpl, perm=grad_perm), axis=0), (inputs.shape[0], 1, 1, 1))\n \n # return inputs.matmul(w) + self.bias, torch.logsumexp(\n # g.unsqueeze(-2) + grad.transpose(-2, -1).unsqueeze(-3), -1) if grad is not None else g\n\n if grad is not None:\n grad_perm = list(range(len(grad.shape)))\n grad_perm[-1] = len(grad_perm)-2\n grad_perm[-2] = len(grad_perm)-1\n\n return tf.matmul(inputs, w) + self.bias, tf.reduce_logsumexp(\n tf.expand_dims(g, axis=-2) + tf.expand_dims(tf.transpose(grad, perm=grad_perm), axis=-3), axis=-1) if grad is not None else g\n\n def __repr__(self):\n return 'MaskedWeight(in_features={}, out_features={}, dim={}, bias={})'.format(\n self.in_features, self.out_features, self.dim, not isinstance(self.bias, int))\n\n \nclass Tanh(tf.keras.layers.Layer):\n \"\"\"\n Class that extends ``torch.nn.Tanh`` additionally computing the log diagonal\n blocks of the Jacobian.\n \"\"\"\n\n def call(self, inputs, grad : tf.Tensor = None):\n \"\"\"\n Parameters\n ----------\n inputs : ``torch.Tensor``, required.\n The input tensor.\n grad : ``torch.Tensor``, optional (default = None).\n The log diagonal blocks of the partial Jacobian of previous transformations.\n Returns\n -------\n The output tensor and the log diagonal blocks of the partial log-Jacobian of previous \n transformations combined with this transformation.\n \"\"\"\n \n # g = - 2 * (inputs - tf.math.log(2) + tf.keras.activations.softplus(- 2 * inputs))\n # return tf.tanh(inputs), (g.view(grad.shape) + grad) if grad is not None else g\n\n g = - 2 * (inputs - tf.math.log(2.) + tf.keras.activations.softplus(- 2. * inputs))\n return tf.tanh(inputs), (tf.reshape(g,grad.shape) + grad) if grad is not None else g\n\n\n## tensorflow probability implementation\n# class MaskedWeight_tfp(tfp.bijectors.Bijector):\n#\n# def __init__(self, validate_args=False, name=\"MaskedWeight_tfp\", Nin, Nout, init='glorot'):\n# super(MaskedWeight_tfp, self).__init__(\n# validate_args=validate_args,\n# forward_min_event_ndims=0,\n# name=name)\n#\n# self.diag_transform = tfp.bijectors.TransformDiagonal(diag_bijector=tfp.bijectors.Exp())\n# if init=='glorot':\n# pass\n# elif init=='he':\n# pass\n#\n# def _forward(self, x):\n# return tf.matmul(x, self._weights)\n#\n# def _inverse(self, y):\n# return tf.log(y)\n#\n# def _inverse_log_det_jacobian(self, y):\n# return -self._forward_log_det_jacobian(self._inverse(y))\n#\n# def _forward_log_det_jacobian(self, x):\n# # Notice that we needn't do any reducing, even when`event_ndims > 0`.\n# # The base Bijector class will handle reducing for us; it knows how\n# # to do so because we called `super` `__init__` with\n# # `forward_min_event_ndims = 0`.\n# return x","sub_path":"bnaf.py","file_name":"bnaf.py","file_ext":"py","file_size_in_byte":14653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"182218335","text":"#############################################################\n#\n#\tProperty of HABET.\n#\n# Authors:\n# Jared Danner\n#\n#############################################################\n\n# System user id.\nSYSTEM_USER = None\n\n# Serial port objects. Object class defined at bottom of communication.py.\n# If let None, it will dynamically find its correct port #.\nPORT_MISSION_CONTROL_LORA = None\nPORT_PAYLOAD_LORA = None\nPORT_RECOVERY_LORA = None\nPORT_ROTOR_CONTROLLER = None\n\n# Node ID's.\nNODE_MISSION_CONTROL_ID = 0\nNODE_PAYLOAD_ID = 1\nNODE_RECOVERY_ID = 2\n\n# Tkinter frame objects.\nmc_class_reference = None\npayload_class_reference = None\n\n# Threaded timer objects.\ntimer_mission_control_lora = None\ntimer_payload_lora = None\ntimer_recovery_lora = None\ntimer_mission_control_contact_timer = None\ntimer_payload_contact_timer = None\ntimer_recovery_contact_timer = None","sub_path":"GUI/Release/L-151-A/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"294415017","text":"from concurrent import futures\nfrom jproperties import Properties\nfrom zeroconf import IPVersion, ServiceInfo, Zeroconf\n\nimport logging\nimport rental_pb2_grpc\nimport rental_pb2\nimport grpc\nimport socket\nimport queue\n\n\nclass RentalService(rental_pb2_grpc.RentalServiceServicer): \n \n rentals = ['Hertz', 'Avis Car Rental', 'Alamo', 'Budget Car Rental']\n \n def rentalList(self, request, context):\n self.rentals\n n1 = \"\\n\"\n print(f\"{n1}Getting list of rentals from server ...\")\n for rental in self.rentals:\n yield rental_pb2.RentalListResponse(result=rental)\n print(\"Rental: %s\" % rental)\n\n list = []\n \n def rentalBooking(self, request_iterator, context):\n self.list\n rental = \"\"\n car = \"\"\n rentalDate = \"\"\n returnDate = \"\"\n userInput = \"\"\n n1 = \"\\n\"\n \n for value in request_iterator:\n self.list.append(value)\n if (len(self.list)) == 1:\n userInput = value.text\n if (userInput == 'Hertz' or userInput == 'Avis Car Rental' or userInput == 'Alamo' or userInput == 'Budget Car Rental'):\n rental = value.text\n print(\"Choose a car from the following list: Bmw, Volkswagan, Audi, Toyota or Honda\")\n else:\n print(f\"{n1}Error, please enter one of the four rental companies\")\n self.list.remove(value)\n elif (len(self.list)) == 2:\n userInput = value.text\n if(userInput == 'Bmw' or userInput == 'Volkswagan' or userInput == 'Audi' or userInput == 'Toyota' or userInput == 'Honda'):\n car = value.text\n print(f\"{n1}Enter the date you want to rent the car\")\n else:\n print(f\"{n1}Error, only cars available are: Bmw, Volkswagan, Audi, Toyota and Honda\")\n self.list.remove(value)\n elif (len(self.list)) == 3:\n rentalDate = value.text\n print(f\"{n1}Enter the date you wish to return the car to the rental\")\n elif (len(self.list)) == 4:\n returnDate = value.text\n print(f\"{n1}Rental company: %s\" % rental)\n print(f\"Car: %s\" % car)\n print(f\"Rental date: %s\" % rentalDate)\n print(f\"Return date: %s\" % returnDate)\n return rental_pb2.RentalBookingResponse(rental=rental, car=car, rentalDate=rentalDate, returnDate=returnDate)\n \n \ndef rentalServer():\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n rental_pb2_grpc.add_RentalServiceServicer_to_server(RentalService(), server)\n server.add_insecure_port('[::]:60003')\n server.start()\n n1 = \"\\n\"\n print(f'{n1}Rental Server Started')\n server.wait_for_termination()\n\n \ndef rentalRegister():\n global zeroconf\n desc = {'path': 'rental.properties'}\n \n info = ServiceInfo(\"_http._tcp.local.\", \"rental._http._tcp.local.\", addresses=[socket.inet_aton(\"192.168.156.161\")], port=60003, properties=desc, server=\"rental.local.\",)\n zeroconf = Zeroconf()\n zeroconf.register_service(info)\n print('registering rental service ...')\n\n \ndef rentalProperties():\n configs = Properties()\n with open('rental.properties', 'rb')as config_file:\n configs.load(config_file)\n print('Service properties')\n print(configs.get(\"rental_service_type\"))\n print(configs.get(\"rental_service_name\"))\n print(configs.get(\"rental_service_description\"))\n print(configs.get(\"rental_service_port\"))\n\n \nif __name__ == \"__main__\":\n logging.basicConfig()\n rentalProperties()\n rentalRegister()\n rentalServer()\n \n","sub_path":"src/main/java/com.alexeyre.grpc.rental/RentalServer.py","file_name":"RentalServer.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25038280","text":"\ndef is_academic_phrase(phrase):\n if not phrase:\n return False\n\n # if you have academic-sounding tags, you're academic\n sciency_words = [\n \"chemi\", \n \"scien\", \n \"bio\", \n \"econo\", \n \"omics\",\n \"sociology\",\n \"physics\", \n \"psych\", \n \"math\", \n # \"statistics\",\n \"ecolog\", \n \"genetics\",\n # \"analysis\", \n \"department\",\n \"dept of\",\n \"university\",\n \"formatics\",\n \"evolution\",\n\n \"professor\",\n \"doctoral\", \n \"phd\", \n \"postdoc\", \n \"post-doc\",\n\n # \"chemphys\", #cran tag \n # \"experimentaldesign\", \n # \"clinicaltrials\", \n # \"research\", \n # \"medicalimaging\", \n # \"differentialequations\", \n # \"pharmacokinetics\", \n # \"environmetrics\" \n ]\n\n phase_lower = phrase.lower()\n for sciency_word in sciency_words:\n if sciency_word in phase_lower:\n return True\n\n return False\n","sub_path":"models/academic.py","file_name":"academic.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"52576056","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass RegistryCreateParameters(Model):\n \"\"\"The parameters for creating a container registry.\n\n :param tags: The tags for the container registry.\n :type tags: dict[str, str]\n :param location: The location of the container registry. This cannot be\n changed after the resource is created.\n :type location: str\n :param sku: The SKU of the container registry.\n :type sku: ~azure.mgmt.containerregistry.v2017_03_01.models.Sku\n :param admin_user_enabled: The value that indicates whether the admin user\n is enabled. Default value: False .\n :type admin_user_enabled: bool\n :param storage_account: The parameters of a storage account for the\n container registry. If specified, the storage account must be in the same\n physical location as the container registry.\n :type storage_account:\n ~azure.mgmt.containerregistry.v2017_03_01.models.StorageAccountParameters\n \"\"\"\n\n _validation = {\n 'location': {'required': True},\n 'sku': {'required': True},\n 'storage_account': {'required': True},\n }\n\n _attribute_map = {\n 'tags': {'key': 'tags', 'type': '{str}'},\n 'location': {'key': 'location', 'type': 'str'},\n 'sku': {'key': 'sku', 'type': 'Sku'},\n 'admin_user_enabled': {'key': 'properties.adminUserEnabled', 'type': 'bool'},\n 'storage_account': {'key': 'properties.storageAccount', 'type': 'StorageAccountParameters'},\n }\n\n def __init__(self, location, sku, storage_account, tags=None, admin_user_enabled=False):\n self.tags = tags\n self.location = location\n self.sku = sku\n self.admin_user_enabled = admin_user_enabled\n self.storage_account = storage_account\n","sub_path":"azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_03_01/models/registry_create_parameters.py","file_name":"registry_create_parameters.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"295811946","text":"D = {}\ninFile = open('/g/steinmetz/hsun/TIFproteome/Database/KnownProtein/Saccharomyces_cerevisiae_protein.fa')\nwhile True:\n line1 = inFile.readline().strip()\n line2 = inFile.readline().strip()\n if line1:\n D[line1] = line2\n else:\n break\ninFile.close()\n\ndef protein(inF):\n inFile = open(inF)\n ouFile = open(inF + '-Protein', 'w')\n head = inFile.readline()\n ouFile.write('ProteinPos\\tProteinSeq\\tProteinType\\t' + head)\n for line in inFile:\n fields = line.split('\\t')\n pep = fields[3]\n for k in D:\n if pep in D[k]:\n info = k.split(':')\n ch = info[3]\n start = info[4]\n end = info[5]\n strand = info[6].split()[0]\n gene = info[7].split()[0]\n if strand == '-1':\n strand = '-'\n else:\n strand = '+'\n\n width = int(end) - int(start) + 1\n w = width/3.0\n ProteinType = ''\n if w == len(D[k]) + 1:\n ProteinType = 'ProteinLengthExactMatch'\n ouFile.write(':'.join([ch,strand,start,end,gene]) + '\\t' + D[k]+'\\t'+ProteinType+'\\t' + line)\n break\n inFile.close()\n ouFile.close()\n\nprotein('Yeast-Peptide-N_Terminal-Candidates')\n","sub_path":"TIFproteome/N-Terminal_MSGFPlus_updated/TIFSeq/03-protein.py","file_name":"03-protein.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"388503154","text":"# -*- coding: utf-8 -*-\nfrom openerp import http\nfrom openerp.http import request\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom openerp.addons.website.controllers.main import Website\n\n\nclass WebsiteFsoBase(Website):\n\n # Overwrite original robots controller ('/robots.txt') to use our template and because of 'noupdate' in our xml!\n @http.route()\n def robots(self, **kwargs):\n logger.info(\"Rendering alternative robots.txt from fso_base_website!\")\n cr, uid, context = request.cr, request.uid, request.context\n\n website_id = request.registry['website'].search(cr, uid, [], limit=1, context=context)\n website = request.registry['website'].browse(cr, uid, website_id[0], context=context)\n\n return request.render('fso_base_website.robots_txt_template',\n {'url_root': request.httprequest.url_root,\n 'robots_txt': website.robots_txt if website else '',\n },\n mimetype='text/plain')\n","sub_path":"addons-own/fso_base_website/controller/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"577860108","text":"#!/usr/bin/env python\n\nimport rospy\nimport rospkg\nimport numpy as np\n\nfrom sensor_msgs.msg import LaserScan\nfrom geometry_msgs.msg import Twist\n\ndef follow_wall(lidar_scan):\n min_range, max_range = 0,len(lidar_scan.ranges)-1\n \n d_right_list, d_left_list = lidar_scan.ranges[min_range:min_range+100], lidar_scan.ranges[max_range-100:max_range]\n d_left, d_right = np.median(d_left_list)<1, np.median(d_right_list)<1\n \n cmd_vel_msg.angular.z = - 0.6*d_left + 0.6*d_left\n cmd.publish(cmd_vel_msg)\n\n\nif __name__ == '__main__':\n rospy.init_node(\"bang_controller\")\n\n rospy.loginfo('Cave Exploration Starting')\n\n cmd_vel_msg = Twist()\n\n cmd_vel_msg.linear.x = 0.4\n\n rospy.sleep(1)\n cmd = rospy.Publisher(\"/phantomx/cmd_vel\",Twist, queue_size=5)\n lidar_scan = rospy.Subscriber(\"/phantomx/scan\", LaserScan, follow_wall)\n\n\n while not rospy.is_shutdown():\n rospy.sleep(1)\n\n rospy.loginfo('Cave Exploration Finished')\n","sub_path":"workspaceRos/src/phantomx/phantomx_control/src/bang_controller.py","file_name":"bang_controller.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"573451545","text":"# -*- coding: utf-8 -*-\n# @Time  : 2021/2/18 2:48 下午\n# @Author : NewmanZhou\n# @Desc : ==============================================\n# Life is Short I Use Python!!!                      ===\n# If this runs wrong,don't ask me,I don't know why;  ===\n# If this runs right,thank god,and I don't know why. ===\n# Maybe the answer,my friend,is blowing in the wind. ===\n# ======================================================\n# @Project : StudySpace\n# @FileName: Xdaili.py\n# @Software: PyCharm\nimport requests\nimport urllib3\nimport sys, time, hashlib\n\n\ndef XdailiTest():\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n _version = sys.version_info\n\n is_python3 = (_version[0] == 3)\n\n orderno = \"ZF20212183665Ed5nW3\"\n secret = \"929b51ac17ac4b11b70aac963e31dd50\"\n\n ip = \"forward.xdaili.cn\"\n port = \"80\"\n\n ip_port = ip + \":\" + port\n\n timestamp = str(int(time.time()))\n string = \"\"\n string = \"orderno=\" + orderno + \",\" + \"secret=\" + secret + \",\" + \"timestamp=\" + timestamp\n\n string = string.encode()\n\n md5_string = hashlib.md5(string).hexdigest()\n sign = md5_string.upper()\n # print(sign)\n auth = \"sign=\" + sign + \"&\" + \"orderno=\" + orderno + \"&\" + \"timestamp=\" + timestamp\n\n # print(auth)\n proxy = {\"http\": \"http://\" + ip_port, \"https\": \"https://\" + ip_port}\n\n headers = {\n 'Proxy-Authorization': auth,\n 'sec-ch-ua': '\"Chromium\";v=\"88\", \"Google Chrome\";v=\"88\", \";Not A Brand\";v=\"99\"',\n 'sec-ch-ua-mobile': '?0',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36',\n 'Referer': 'https://www.dalipan.com/',\n }\n\n url = \"https://www.dalipan.com/search?keyword=康宝莱\"\n # url = \"http://www.dalipan.com/search?keyword=三一重工&page=1\"\n response = requests.get(url, headers=headers, proxies=proxy, verify=False, allow_redirects=False)\n\n print(response.text)\n\n\nif __name__ == '__main__':\n XdailiTest()\n","sub_path":"ThreatBook/Xdaili.py","file_name":"Xdaili.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"602416035","text":"from bs4 import BeautifulSoup\nimport requests\nimport datetime\n\nimport pandas as pd\ndf = pd.read_excel('d:/code/Screener.xlsx')\nsymbols = list(df['SYMBOL'])\n# print(symbols)\nprice = {}\nfrom_date = '05-Oct-2017'\nto_date = '04-Oct-2018'\nprint('Here are the returns for stocks during period %s to %s:\\n' %(from_date, to_date))\n# Fetch Nifty Price Change in given period\ntry:\n symbol = 'NIFTY'\n url = 'https://www.nseindia.com/products/dynaContent/equities/indices/historicalindices.jsp?indexType=NIFTY%2050&fromDate=' \\\n + to_date + '&toDate=' + to_date\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n table = soup.find('table')\n tr0 = table.findAll('tr')[3]\n url = 'https://www.nseindia.com/products/dynaContent/equities/indices/historicalindices.jsp?indexType=NIFTY%2050&fromDate=' \\\n + from_date + '&toDate=' + from_date\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n table = soup.find('table')\n tr1 = table.findAll('tr')[-2]\n close0 = float(tr0.findAll('td')[4].text.replace(',',''))\n close1 = float(tr1.findAll('td')[4].text.replace(',',''))\n nifty_change = round(((close0 - close1) * 100 / close1), 2)\n print('%s: (%.2f - %.2f)-> %.2f%%' %(symbol, close0, close1, nifty_change))\n beta = 1\n price.setdefault(symbol, {'CHANGE':nifty_change, 'BETA':beta})\nexcept Exception as err:\n print('ERR %s: '%err)\n\n# Fetch Stock Price Change in given period\n\nfor symbol in symbols:\n try:\n \n if symbol == 'NIFTY' or symbol == 'BANKNIFTY':\n continue\n symbol = symbol.upper()\n url = 'https://www.nseindia.com/live_market/dynaContent/live_watch/get_quote/getHistoricalData.jsp?symbol=' \\\n + symbol.upper() + '&series=EQ&fromDate=' + from_date + '&toDate=' + to_date + '&datePeriod=undefined'\n\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n table = soup.find('table')\n\n tr0 = table.findAll('tr')[1]\n tr1 = table.findAll('tr')[-1]\n close0 = float(tr0.findAll('td')[7].text.replace(',',''))\n close1 = float(tr1.findAll('td')[7].text.replace(',',''))\n diff = round(((close0 - close1) * 100 / close1), 2)\n beta = round(diff/nifty_change, 2)\n print('%s: (%.2f - %.2f)-> %.2f%% => %.1f' %(symbol, close0, close1, diff, beta))\n price.setdefault(symbol, {'CHANGE':diff, 'BETA':beta})\n except Exception as err:\n print('ERR %s: '%err)\n continue\ndf = pd.DataFrame(price)\ndf.to_excel('d:\\code\\portfolio\\stocks_return_beta.xlsx')\n\n\nprint('\\n', price)\n","sub_path":"stock_return_beta.py","file_name":"stock_return_beta.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"76688549","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport json\nfrom optparse import OptionParser\n\n\nimport HiggsAnalysis.HeavyChHiggsToTauNu.tools.LandSTools as lands\n\ndef main(opts):\n bkg = lands.parseLandsMLOutput(opts.bkg)\n sbkg = lands.parseLandsMLOutput(opts.signal)\n\n content = {}\n if not opts.truncate and os.path.exists(opts.output):\n f = open(opts.output)\n content = json.load(f)\n f.close()\n\n content[opts.mass] = {\n \"background\": bkg,\n \"signal+background\": sbkg,\n }\n\n f = open(opts.output, \"w\")\n json.dump(content, f, sort_keys=True, indent=2)\n f.close() \n\n return 0\n\nif __name__ == \"__main__\":\n parser = OptionParser(usage=\"Usage: %prog [options]\")\n parser.add_option(\"-b\", dest=\"bkg\", default=None,\n help=\"File with output of background-only fit\")\n parser.add_option(\"-s\", dest=\"signal\", default=None,\n help=\"File with output of signal+background fit\")\n parser.add_option(\"-m\", dest=\"mass\", default=None,\n help=\"H+ mass point\")\n parser.add_option(\"-o\", dest=\"output\", default=None,\n help=\"Output JSON file name (by default the mass point information is updated, see --truncate\")\n parser.add_option(\"--truncate\", dest=\"truncate\", default=False, action=\"store_true\",\n help=\"Truncate the output JSON file\")\n\n (opts, args) = parser.parse_args()\n\n if opts.bkg is None:\n parser.error(\"-b is missing\")\n if opts.signal is None:\n parser.error(\"-s is missing\")\n if opts.mass is None:\n parser.error(\"-m is missing\")\n if opts.output is None:\n parser.error(\"-o is missing\")\n\n sys.exit(main(opts))\n","sub_path":"HeavyChHiggsToTauNu_REMOVEME/scripts/landsReadMLFit.py","file_name":"landsReadMLFit.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"644199729","text":"'''\n Author: Chen Lin\n Email: chen.lin@emory.edu\n Date created: 2020/2/4 \n Python Version: 3.6\n'''\n\n'''\n Author: Chen Lin\n Email: chen.lin@emory.edu\n Date created: 2020/1/31 \n Python Version: 3.6\n'''\n\nimport sys\nsys.path.append('.')\nfrom airpolnowcast.models.lstm import LSTMModel\n\nimport datetime\n\nimport numpy as np\nimport os\nfrom tensorflow.keras.callbacks import EarlyStopping, TensorBoard\nfrom tensorflow.keras.initializers import he_normal\nfrom tensorflow.keras.layers import Dense, Input, concatenate, Dropout, LSTM\nfrom tensorflow.keras.models import Model\n\nn_tasks = 10\n\n\nclass MTLSTM(LSTMModel):\n\n def build_sensor_branch(self, sensor_input_list):\n neuron_num = 128\n\n lstm = LSTM(neuron_num, dropout=0.5, recurrent_dropout=0.5, kernel_initializer=he_normal(seed=1))\n outs = []\n for i, input_i in enumerate(sensor_input_list):\n outs.append(lstm(input_i))\n\n dense = Dense(64, activation='relu', kernel_initializer=he_normal(seed=1))\n outs_2 = []\n for i, out in enumerate(outs):\n outs_2.append(dense(out))\n\n dropout = Dropout(0.5)\n outs_3 = []\n for i, out in enumerate(outs_2):\n outs_3.append(dropout(out))\n return outs_3\n\n def build(self):\n\n sensor_input_list = [Input(shape=(self.seq_length, self.embedding_dim)) for _ in range(n_tasks)]\n\n sensor_output_list = self.build_sensor_branch(sensor_input_list)\n\n outputs = []\n for i in range(n_tasks):\n task_i_predictors = sensor_output_list[i]\n task_i_predictions = Dense(1, activation='sigmoid', kernel_initializer=he_normal(seed=1))(task_i_predictors)\n outputs.append(task_i_predictions)\n\n model = Model(inputs=sensor_input_list, outputs=outputs)\n return model\n\n def fit(self, x_train, x_valid, y_train, y_valid):\n l2 = y_valid.shape[0]\n l = y_train.shape[0]\n\n def make_tl(x):\n l = x.shape[0]\n return [x[int(i):int(i + l / n_tasks)] for i in range(0, int(l / n_tasks) * 10, int(l / n_tasks))]\n\n x_train_list = make_tl(x_train)\n x_valid_list = make_tl(x_valid)\n y_train_list = [y_train[int(i):int(i + l / n_tasks)] for i in range(0, int(l / n_tasks) * 10, int(l / n_tasks))]\n y_valid_list = [y_valid[int(i):int(i + l2 / n_tasks)] for i in\n range(0, int(l2 / n_tasks) * 10, int(l2 / n_tasks))]\n\n # Rest of function assumes input is in tuple of lists format:\n # ([..., task_i_search_input, ...], [...,task_i_sensor_input,...])\n def arr_concate(train_arr, valid_arr):\n return np.concatenate([train_arr, valid_arr], axis=0)\n\n x_train_valid_list = []\n y_train_valid_list = []\n for i in range(len(x_train_list)):\n x_train_valid_list.append(arr_concate(x_train_list[i], x_valid_list[i]))\n y_train_valid_list.append(arr_concate(y_train_list[i], y_valid_list[i]))\n\n # patient early stopping\n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1,\n patience=self.patience)\n\n logdir = os.path.join(self.log_dir, datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tb = TensorBoard(log_dir=logdir)\n\n # save initial weights\n tmp_model_path = os.path.join('models/interim', str(os.getpid()) + 'model.h5')\n self.model.save_weights(tmp_model_path)\n\n # calculate the weights\n class_weights = []\n for y in y_train_valid_list:\n (_, data_count) = np.unique(y, return_counts=True)\n class_weights.append({0: sum(data_count) / data_count[0], 1: sum(data_count) / data_count[1]})\n\n class_weight = {k: class_weights[i] for i, k in enumerate(\n ['dense_1', 'dense_2', 'dense_3', 'dense_4', 'dense_5', 'dense_6', 'dense_7', 'dense_8', 'dense_9',\n 'dense_10'])}\n print(class_weight)\n max_epochs = 1000\n min_epochs = 15\n\n history = self.model.fit(x_train_list, y_train_list, batch_size=self.batch_size,\n epochs=max_epochs,\n validation_data=(x_valid_list, y_valid_list),\n class_weight=class_weight,\n verbose=1,\n callbacks=[tb, es], shuffle=True)\n\n epochs = max(len(history.epoch) - self.patience, min_epochs)\n # restore initial weights\n self.model.load_weights(tmp_model_path)\n\n self.model.fit(x_train_valid_list, y_train_valid_list,\n batch_size=self.batch_size,\n epochs=epochs, class_weight=class_weight,\n verbose=1)\n # remove model file\n os.remove(tmp_model_path)\n\n def predict(self, x_test):\n def make_tl(x):\n l = x.shape[0]\n return [x[int(i):int(i + l / n_tasks)] for i in range(0, int(l / n_tasks) * 10, int(l / n_tasks))]\n\n x_test_list = make_tl(x_test)\n pred_score_list = self.model.predict(x_test_list)\n pred_class_list = [[0 if i < 0.5 else 1 for i in pred_score] for pred_score in pred_score_list]\n\n return pred_class_list, pred_score_list\n","sub_path":"airpolnowcast/models/multitask_lstm.py","file_name":"multitask_lstm.py","file_ext":"py","file_size_in_byte":5272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"620631739","text":"#!/usr/bin/python\n\n\"\"\"\nmenu with message view and physical steering\n\"\"\"\n\nfrom rpilcdmenu import *\nfrom rpilcdmenu.items import *\n\nimport digitalio\nimport board\nimport adafruit_mcp3xxx.mcp3008 as MCP\nfrom adafruit_mcp3xxx.analog_in import AnalogIn\nfrom RPi import GPIO\nimport adafruit_bitbangio as bitbangio\nimport time\n\n\ndef main():\n # create menu as in example3\n menu = RpiLCDMenu(7, 8, [25, 24, 23, 15])\n\n menu.append_item(\n MessageItem('message item',\n 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut '\n 'labore et dolore magna aliqua.',\n menu,\n True)\n )\n\n menu.start()\n\n # setup the rotary encoder pins\n rot_clk = 17\n rot_dt = 27\n # set GPIO pins\n GPIO.setmode(GPIO.BCM)\n # set to GPIO.PUD_UP as the TEAC panel uses an encoder that is grounded\n GPIO.setup(rot_clk, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.setup(rot_dt, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n # set default counter variable\n counter = 0\n clkLastState = GPIO.input(rot_clk)\n\n # mcp3008 button reader setup\n # create software spi\n spi = bitbangio.SPI(board.D11, MISO=board.D9, MOSI=board.D10)\n # create the cs (chip select)\n cs = digitalio.DigitalInOut(board.D22)\n # create the mcp object\n mcp = MCP.MCP3008(spi, cs)\n # create analog input channels on pins 6 and 7 of the mcp3008\n chan1 = AnalogIn(mcp, MCP.P6)\n chan2 = AnalogIn(mcp, MCP.P7)\n\n # main loop\n while True:\n # read button states\n if 0 <= chan1.value <= 1000:\n print(\"Timer button pressed\" + str(chan1.value))\n time.sleep(0.5)\n if 5900 <= chan1.value <= 7000:\n print(\"Time Adj button pressed\" + str(chan1.value))\n time.sleep(0.5)\n if 12000 <= chan1.value <= 13000:\n print(\"Daily button pressed\" + str(chan1.value))\n time.sleep(0.5)\n if 0 <= chan2.value <= 1000:\n print(\"Power button pressed\" + str(chan2.value))\n time.sleep(0.5)\n if 5800 <= chan2.value <= 6100:\n print(\"Band button pressed \" + str(chan2.value))\n time.sleep(0.5)\n if 13000 <= chan2.value <= 14000:\n print(\"Function button pressed\" + str(chan2.value))\n time.sleep(0.5)\n if 26000 <= chan2.value <= 27000:\n print(\"Enter button pressed\" + str(chan2.value))\n menu = menu.processEnter()\n time.sleep(0.5)\n if 19000 <= chan2.value <= 21000:\n print(\"Info button pressed\" + str(chan2.value))\n time.sleep(0.5)\n if 39000 <= chan2.value <= 41000:\n print(\"Auto Tuning button pressed\" + str(chan2.value))\n time.sleep(0.5)\n if 33000 <= chan2.value <= 34000:\n print(\"Memory button pressed\" + str(chan2.value))\n time.sleep(0.5)\n if 44000 <= chan2.value <= 46000:\n print(\"Dimmer button pressed\" + str(chan2.value))\n time.sleep(0.5)\n # buttons depressed\n\n # read rotary encoder states\n clkState = GPIO.input(rot_clk)\n dtState = GPIO.input(rot_dt)\n if clkState != clkLastState:\n if dtState != clkState:\n counter += 1\n menu = menu.processUp()\n else:\n counter -= 1\n menu = menu.processDown()\n print(counter)\n clkLastState = clkState\n\ndef exit_sub_menu(submenu):\n return submenu.exit()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"examples/rpi-lcd-menu-scroller.py","file_name":"rpi-lcd-menu-scroller.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"469793751","text":"import sys\nsys.path.append('..')\nimport numpy as np\nfrom flightSim.envs import transfer_function_coef as TF\nfrom flightSim.envs import aerosonde_parameters as MAV\nimport math\n\ngravity = MAV.gravity\nrho = MAV.rho\nsigma = 0.000001 \nVa0 = MAV.Va0\n\nzeta = 0.707\n\n# Tune throttle first by setting everything to 0 in main\n\n#----------roll loop-------------\nw_phi = np.sqrt(TF.a_phi2) # 6.\ndelta_a_max, error_phi_max = np.radians(30), 0.2 # np.radians(30) # Arbitrary\nroll_kp = delta_a_max / error_phi_max\nroll_kd = (2.0 * zeta * w_phi - TF.a_phi1) / TF.a_phi2\n\n#----------course loop-------------\nbandwidth_separation_chi = 1 \nVg = Va0 \nw_chi = 0.5 # (1.0 / bandwidth_separation_chi) * w_phi # 0.5\ncourse_kp = (2.5 * zeta * w_chi * Vg) / gravity \ncourse_ki = 0.001 # (w_chi**2) * Vg / gravity # 0.001\n\n#----------sideslip loop-------------\ndelta_r_max, error_beta_max = np.radians(30), 1. # Chosen arbitrarily\nsideslip_kp = delta_r_max / error_beta_max\nw_beta = (TF.a_beta1 + TF.a_beta2 * sideslip_kp) / (2.0 * zeta)\nsideslip_ki = (w_beta**2) / TF.a_beta2 #1\n\n#----------yaw damper-------------\nyaw_damper_tau_r = 0.05\nyaw_damper_kp = 0.05\n\n#----------pitch loop-------------\nerror_theta_max = np.radians(30) # Chosen arbitrarily\ndelta_e_max = np.radians(30) # Chosen to be 30 \npitch_kp = np.sign(TF.a_theta3) * delta_e_max / error_theta_max \nw_theta = 0.2 # math.sqrt(TF.a_theta2 + pitch_kp * TF.a_theta3) # 0.2\npitch_kd = (2.0 * zeta * w_theta - TF.a_theta1) / (TF.a_theta3)\nK_theta_DC = (pitch_kp * TF.a_theta3) / (TF.a_theta2 + pitch_kp * TF.a_theta3)\n\n#----------altitude loop-------------\nbandwidth_separation_altitude = 3.\nVa = Va0 \nw_altitude = 0.1 # (1.0/bandwidth_separation_altitude) * w_theta # 0.3\naltitude_kp = (2.0 * zeta * w_altitude) / (K_theta_DC * Va)\naltitude_ki = (w_altitude**2) / (K_theta_DC * Va)\naltitude_zone = 20.0 ######\n\n#---------airspeed hold using throttle---------------\nw_v = 2.0 # np.sqrt(TF.a_V2) # 0.3\nzeta_throttle = 0.707\nairspeed_throttle_ki = (w_v**2) / TF.a_V2\nairspeed_throttle_kp = (2. * zeta_throttle * w_v - TF.a_V1 ) / TF.a_V2\n","sub_path":"flightSim/flightSim/envs/control_parameters.py","file_name":"control_parameters.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"618659063","text":"import tensorflow as tf\nimport numpy\nimport valohai\n\ndef logMetadata(epoch, logs):\n with valohai.metadata.logger() as logger:\n logger.log(\"epoch\", epoch)\n logger.log(\"accuracy\", logs['accuracy'])\n logger.log(\"loss\", logs['loss'])\n\nmy_parameters = {\n 'epoch': 5\n}\n\nmy_inputs = {\n 'mnist': 's3://onboard-sample/tf-sample/mnist.npz'\n}\n\nvalohai.prepare(\n step=\"train-model\",\n image='tensorflow/tensorflow:2.4.1',\n default_parameters=my_parameters,\n default_inputs=my_inputs\n)\n\nmnist_file_path = valohai.inputs('mnist').path()\n\nwith numpy.load(mnist_file_path, allow_pickle=True) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10)\n])\n\npredictions = model(x_train[:1]).numpy()\npredictions\n\ntf.nn.softmax(predictions).numpy()\n\nloss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\nloss_fn(y_train[:1], predictions).numpy()\n\nmodel.compile(optimizer='adam',\n loss=loss_fn,\n metrics=['accuracy'])\n\nmetadataCallback = tf.keras.callbacks.LambdaCallback(on_epoch_end=logMetadata)\nmodel.fit(x_train, y_train, epochs=valohai.parameters('epoch').value, callbacks=[metadataCallback])\n\nsave_path = valohai.outputs().path('model.h5')\nmodel.save(save_path)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"199878172","text":"'''\nSentinel 2 Geomedian generation using the DEA\ntiles processing method.\nTakes a list of tiles in TILELIST.\n'''\n\n#What can be deleted here?\n#import warnings; warnings.simplefilter('ignore')\nimport datacube\n#import fnmatch\nimport os\nimport pandas as pd\nimport geopandas as gpd\nfrom odc.algo import to_f32, xr_geomedian, int_geomedian\n\n#get the DEA version of the plotting functions\nimport sys\n#sys.path.append(os.path.abspath('/g/data/r78/DPIPWE_lm/repos/dea-notebooks/Scripts'))\n#from dea_datahandling import load_ard\nsys.path.append(os.path.abspath('/g/data/r78/DPIPWE_lm/repos/dea-notebooks/Tools'))\nfrom dea_tools.datahandling import load_ard\nfrom datacube_stats.statistics import GeoMedian\nfrom datacube.utils.cog import write_cog\nfrom datacube.drivers.netcdf import write_dataset_to_netcdf\nimport xarray as xr\n\n#Specify output directory\noutputdir = '/g/data/r78/DPIPWE_lm/output_data/'\n#outputdir = './'\nif not os.path.exists(outputdir):\n print(\"output directory doesn't exist\")\n exit()\n \n# Connect to datacube containing Sentinel 2 data\ndc = datacube.Datacube(app='load_ard_and_geomedian')\n\n#########################################################\n\nsubset = True\nlabel = None\nalbers = gpd.read_file('/g/data/r78/DPIPWE_lm/test_burn_mapping/reference_data/Albers_Australia_Coast_Islands_Reefs.shp')\n\nif len(sys.argv)==2:\n label = sys.argv[1]\nelif len(sys.argv)==3:\n label = \"{},{}\".format(sys.argv[1], sys.argv[2])\n\nif label:\n index = albers[albers['label']==label].index[0]\n x = (albers.loc[index]['X_MIN'], albers.loc[index]['X_MAX'])\n y = (albers.loc[index]['Y_MIN'], albers.loc[index]['Y_MAX'])\n #output_filename = outputdir + '/month_gm_2016-2017_'+'_'.join(label.split(','))+'.nc'\n output_filename = outputdir + '/month_gm_2016-2017_'+'_'.join(label.split(','))\n print(\"Working on tile {}...\".format(label))\nelse:\n x, y = (1385000.0, 1375000.0), (-4570000.0, -4580000.0)\n if subset:\n #output_filename = 'S2_ARD_gm_2021_test_subset.nc'\n output_filename = 'S2_ARD_gm_2021_test_subset'\n else:\n #output_filename = 'S2_ARD_gm_2021_test_one.nc'\n output_filename = 'S2_ARD_gm_2021_test_one'\n\nif os.path.exists(output_filename):\n print(\"output file already exists.\")\n exit()\n\n#####################################################\n\ndef load_ds(x, y):\n query = {'x': x,\n 'y': y,\n 'crs': 'EPSG:3577',\n 'time': ('2021'),\n 'measurements': ['nbart_blue', 'nbart_green', 'nbart_red', 'nbart_nir_1'], # Can add nbart_swir2 for true flase colour but change res to 20\n 'resolution': (-20, 20),\n 'group_by': 'solar_day',\n 'output_crs': 'EPSG:3577'}\n \n # Load available data from both Sentinel 2 satellites\n ds = load_ard(dc=dc,\n products=['s2a_ard_granule', 's2b_ard_granule'],\n dask_chunks={'time':1},\n **query)\n\n '''\n # function to return months of interest (cm = composite month)\n def is_cm(month):\n return (month >= 11) | (month <= 4)\n\n # extract just the months of interest\n ds_cm = ds.sel(time=is_cm(ds['time.month']))\n\n ds_cm['time.month'] #take a look at what months we have...\n '''\n\n '''\n Alternate method for extracting months via list\n c_months = [11,12,1,2,3]\n ds_cm = ds.sel(time=(ds['time.month'].isin(c_months)).dropna(dim='time'))\n '''\n\n '''\n # Compute geomedian here is necessary - either for dataset or subset months\n ds_gm = GeoMedian().compute(ds)\n return ds_gm.copy()\n '''\n \n #geomedian = int_geomedian(ds)\n #geomedian = geomedian.compute()\n #return geomedian\n \n # compute geomedian\n ds_gm = GeoMedian().compute(ds)\n return ds_gm.copy()\n \n#####################################################\nxm, ym = (x[0]+x[1])/2, (y[0]+y[1])/2\nx1, x2 = (x[0], xm), (xm, x[1])\ny1, y2 = (y[0], ym), (ym, y[1])\nif subset:\n out1 = load_ds(x1, y)\n out2 = load_ds(x2, y)\n out = xr.concat([out1, out2], dim='x')\nelse:\n out = load_ds(x, y)\n\n \n''' \n# Output to netcdf\ndatacube.storage.storage.write_dataset_to_netcdf(out, output_filename)\n'''\n\n# Here we can export the geomedian\n# for COG we need an array not a dataset\nout_da = out.to_array()\n\n# Write multi-band GeoTIFF to a location\nwrite_cog(geo_im=out_da,\n fname=output_filename,\n overwrite=True)\n","sub_path":"NCI_scripts/NCI_S2_ARD_Geomedian_tiles.py","file_name":"NCI_S2_ARD_Geomedian_tiles.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636249997","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\n\n__all__ = [\"build\"]\n\nimport os\nimport glob\nimport h5py\nimport fitsio\nimport logging\nimport numpy as np\nfrom scipy.linalg import cho_factor, cho_solve\n\nfrom ..pcp import pcp\n\n\ndef build(lc_pattern, outfile, sigma_maxiter=50, sigma_clip=7.0,\n pcp_mu=1e-2, pcp_maxiter=10):\n # Loop over the files and load the aperture photometry.\n print(\"Loading light curves...\")\n lcs = []\n for fn in glob.iglob(lc_pattern):\n hdr = fitsio.read_header(fn)\n\n # Skip custom targets.\n if hdr[\"KEPLERID\"] < 201000000:\n continue\n if not os.path.exists(fn):\n logging.warn(\"{0} doesn't exist\".format(fn))\n continue\n try:\n aps = fitsio.read(fn, 2)\n except ValueError:\n logging.warn(\"{0} is corrupted\".format(fn))\n continue\n i = np.argmin(np.abs(aps[\"cdpp6\"]))\n data = fitsio.read(fn)\n lcs.append(data[\"flux\"][:, i])\n print(\"Found {0} light curves...\".format(len(lcs)))\n\n # Sigma clip then normalize the data.\n X = np.empty((len(lcs), len(lcs[0])), dtype=np.float64)\n x = np.arange(len(lcs[0]))\n for i, l in enumerate(lcs):\n m = np.isfinite(l)\n m0 = np.ones_like(m)\n m0[~m] = False\n for j in range(sigma_maxiter):\n mu = np.mean(l[m & m0])\n std = np.std(l[m & m0])\n count = m0.sum()\n m0[m] = np.abs(l[m] - mu) < sigma_clip * std\n if count == m0.sum():\n break\n mu = np.mean(l[m0])\n std = np.std(l[m0])\n X[i] = (l - mu) / std\n X[i, ~m0] = np.nan\n\n # Run robust PCA.\n print(\"Running PCA...\")\n L, S, (u, s, v) = pcp(X, verbose=True, maxiter=pcp_maxiter, mu=pcp_mu,\n svd_method=\"exact\")\n\n # Fit the light curves to build an empirical prior.\n print(\"Generating empirical prior...\")\n factor = cho_factor(np.dot(v, v.T))\n weights = np.empty((len(lcs), len(v)))\n for i, lc in enumerate(lcs):\n m = np.isfinite(lc)\n lc = 1e3 * (lc / np.median(lc[m]) - 1.0)\n lc[~m] = 0.0\n weights[i] = cho_solve(factor, np.dot(v, lc))\n\n # Normalize the basis so that it has a unit Gaussian prior in PPT.\n basis = np.array(v)\n basis *= np.sqrt(np.median(weights**2, axis=0))[:, None]\n\n # Save the basis.\n print(\"Saving to {0}...\".format(outfile))\n with h5py.File(outfile, \"w\") as f:\n f.create_dataset(\"basis\", data=basis, compression=\"gzip\")\n # f.create_dataset(\"power\", data=s, compression=\"gzip\")\n\n\"lightcurves/c1/*/*/*.fits\"\n\"lightcurves/c1-basis.h5\"\n","sub_path":"ketu/k2/basis.py","file_name":"basis.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"353883143","text":"import torch.multiprocessing\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\nimport csv\nimport pickle\nimport PIL\nimport pprint\nimport random\nimport argparse\nimport os,sys,inspect\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom glob import glob\nfrom os.path import exists, join\nfrom tqdm import tqdm as tqdm_base\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torchvision, torchvision.transforms\nimport skimage.transform\nimport sklearn.metrics\nimport sklearn, sklearn.model_selection\nfrom sklearn.metrics import roc_auc_score, accuracy_score\n\nimport torchxrayvision as xrv\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\nparser = argparse.ArgumentParser(description='X-RAY Pathology Detection')\nparser.add_argument('--seed', type=int, default=0, help='')\nparser.add_argument('--dataset_dir', type=str, default=\"./data/\")\nparser.add_argument('--dataset_name', type=str, default=\"nih\")\n\n### Data loader\nparser.add_argument('--cuda', type=bool, default=True, help='')\nparser.add_argument('--batch_size', type=int, default=64, help='')\nparser.add_argument('--shuffle', type=bool, default=False, help='')\nparser.add_argument('--num_workers', type=int, default=0, help='')\nparser.add_argument('--num_batches', type=int, default=430, help='')\n\n### Data Augmentation \nparser.add_argument('--data_aug_rot', type=int, default=45, help='')\nparser.add_argument('--data_aug_trans', type=float, default=0.15, help='')\nparser.add_argument('--data_aug_scale', type=float, default=0.15, help='')\n\n\ncfg = parser.parse_args()\nprint(cfg)\n\ndef tqdm(*args, **kwargs):\n if hasattr(tqdm_base, '_instances'):\n for instance in list(tqdm_base._instances):\n tqdm_base._decr_instances(instance)\n return tqdm_base(*args, **kwargs)\n\ndevice = 'cuda' if cfg.cuda else 'cpu'\nif not torch.cuda.is_available() and cfg.cuda:\n device = 'cpu'\n print(\"WARNING: cuda was requested but is not available, using cpu instead.\")\nprint(f'Using device: {device}')\n\n\ntransforms = torchvision.transforms.Compose([xrv.datasets.XRayCenterCrop(), xrv.datasets.XRayResizer(112)])\n\nif \"nih\" in cfg.dataset_name:\n ### Load NIH Dataset ### \n NIH_dataset = xrv.datasets.NIH_Dataset(\n imgpath=cfg.dataset_dir + \"/images-224-NIH\", \n csvpath=cfg.dataset_dir + \"/Data_Entry_2017_v2020.csv.gz\",\n bbox_list_path=cfg.dataset_dir + \"/BBox_List_2017.csv.gz\",\n transform=transforms, data_aug=None, unique_patients=False)\n xrv.datasets.relabel_dataset(xrv.datasets.default_pathologies, NIH_dataset)\n test_data = NIH_dataset\n\nif \"mc\" in cfg.dataset_name:\n # ### Load MIMIC_CH Dataset ###\n MIMIC_CH_dataset = xrv.datasets.MIMIC_Dataset(\n imgpath=cfg.dataset_dir + \"/images-224-MIMIC/files\",\n csvpath=cfg.dataset_dir + \"/MIMICCXR-2.0/mimic-cxr-2.0.0-chexpert.csv.gz\",\n metacsvpath=cfg.dataset_dir + \"/MIMICCXR-2.0/mimic-cxr-2.0.0-metadata.csv.gz\",\n transform=transforms, data_aug=None, unique_patients=False)\n xrv.datasets.relabel_dataset(xrv.datasets.default_pathologies, MIMIC_CH_dataset)\n test_data = MIMIC_CH_dataset \n\nif \"cx\" in cfg.dataset_name:\n ## Load CHEXPERT Dataset ###\n CHEX_dataset = xrv.datasets.CheX_Dataset(\n imgpath=cfg.dataset_dir + \"/CheXpert-v1.0-small\",\n csvpath=cfg.dataset_dir + \"/CheXpert-v1.0-small/train.csv\",\n transform=transforms, data_aug=None, unique_patients=False)\n xrv.datasets.relabel_dataset(xrv.datasets.default_pathologies, CHEX_dataset)\n test_data = CHEX_dataset\n\nif \"pc\" in cfg.dataset_name:\n ### Load PADCHEST Dataset ###\n PC_dataset = xrv.datasets.PC_Dataset(\n imgpath=cfg.dataset_dir + \"/PC/images-224\",\n csvpath=cfg.dataset_dir + \"/PC/PADCHEST_chest_x_ray_images_labels_160K_01.02.19.csv\",\n transform=transforms, data_aug=None, unique_patients=False)\n xrv.datasets.relabel_dataset(xrv.datasets.default_pathologies, PC_dataset)\n test_data = PC_dataset\n\nif \"gg\" in cfg.dataset_name:\n ### Load GOOGLE Dataset ###\n GOOGLE_dataset = xrv.datasets.NIH_Google_Dataset(\n imgpath=cfg.dataset_dir + \"/images-224-NIH\",\n csvpath=cfg.dataset_dir + \"/google2019_nih-chest-xray-labels.csv.gz\",\n transform=transforms, data_aug=None\n )\n xrv.datasets.default_pathologies = ['Pneumothorax', 'Fracture']\n xrv.datasets.relabel_dataset(xrv.datasets.default_pathologies, GOOGLE_dataset)\n test_data = GOOGLE_dataset\n\nif \"op\" in cfg.dataset_name:\n ### Load OPENI Dataset ###\n OPENI_dataset = xrv.datasets.Openi_Dataset(\n imgpath=cfg.dataset_dir + \"/images-openi/\",\n xmlpath=cfg.dataset_dir + \"/NLMCXR_reports.tgz\", \n dicomcsv_path=cfg.dataset_dir + \"/nlmcxr_dicom_metadata.csv.gz\",\n tsnepacsv_path=cfg.dataset_dir + \"/nlmcxr_tsne_pa.csv.gz\",\n transform=transforms, data_aug=None\n )\n xrv.datasets.default_pathologies = ['Effusion', 'Cardiomegaly', 'Edema']\n xrv.datasets.relabel_dataset(xrv.datasets.default_pathologies, OPENI_dataset)\n test_data = OPENI_dataset\n\nif \"rs\" in cfg.dataset_name: \n ### Load RSNA Dataset ###\n RSNA_dataset = xrv.datasets.RSNA_Pneumonia_Dataset(\n imgpath=cfg.dataset_dir + \"/kaggle-pneumonia-jpg/stage_2_train_images_jpg\",\n csvpath=cfg.dataset_dir + \"/kaggle-pneumonia-jpg/stage_2_train_labels.csv\",\n dicomcsvpath=cfg.dataset_dir + \"/kaggle_stage_2_train_images_dicom_headers.csv.gz\",\n transform=transforms, data_aug=None, unique_patients=False\n )\n xrv.datasets.default_pathologies = ['Lung Opacity', 'Pneumonia']\n xrv.datasets.relabel_dataset(xrv.datasets.default_pathologies, RSNA_dataset)\n test_data = RSNA_dataset\n\nprint(f\"Common pathologies among all train and validation datasets: {xrv.datasets.default_pathologies}\")\n \nnp.random.seed(cfg.seed)\nrandom.seed(cfg.seed)\ntorch.manual_seed(cfg.seed)\n\nif cfg.cuda:\n torch.cuda.manual_seed_all(cfg.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n \ntest_loader = DataLoader(test_data,\n batch_size=cfg.batch_size,\n shuffle=False,\n num_workers=cfg.num_workers,\n pin_memory=True,\n drop_last=True)\n\n###################################### Test ######################################\ndef inference(name, model, device, data_loader, criterion, limit=None):\n model.eval()\n\n avg_loss = []\n task_outputs={}\n task_targets={}\n for task in range(data_loader.dataset[0][\"lab\"].shape[0]):\n task_outputs[task] = []\n task_targets[task] = []\n \n with torch.no_grad():\n t = tqdm(data_loader)\n for batch_idx, samples in enumerate(t):\n\n if limit and (batch_idx >= limit):\n print(\"breaking out\")\n break\n \n images = samples[\"img\"].to(device)\n targets = samples[\"lab\"].to(device)\n\n outputs = model(images)\n \n loss = torch.zeros(1).to(device).double()\n for task in range(targets.shape[1]):\n task_output = outputs[:,task]\n task_target = targets[:,task]\n mask = ~torch.isnan(task_target)\n task_output = task_output[mask]\n task_target = task_target[mask]\n if len(task_target) > 0:\n loss += criterion(task_output.double(), task_target.double())\n \n task_outputs[task].append(task_output.detach().cpu().numpy())\n task_targets[task].append(task_target.detach().cpu().numpy())\n\n loss = loss.sum()\n \n avg_loss.append(loss.detach().cpu().numpy())\n \n for task in range(len(task_targets)):\n task_outputs[task] = np.concatenate(task_outputs[task])\n task_targets[task] = np.concatenate(task_targets[task])\n \n task_aucs = []\n for task in range(len(task_targets)):\n if len(np.unique(task_targets[task]))> 1:\n task_auc = sklearn.metrics.roc_auc_score(task_targets[task], task_outputs[task])\n task_aucs.append(task_auc)\n else:\n task_aucs.append(np.nan)\n\n task_aucs = np.asarray(task_aucs)\n auc = np.mean(task_aucs[~np.isnan(task_aucs)])\n print(f'{name} - Avg AUC = {auc:4.4f}')\n\n return auc, np.mean(avg_loss), task_aucs\n\nmodel = xrv.models.DenseNet(weights=\"all\")\nmodel = model.to(device)\n\ncriterion = torch.nn.BCEWithLogitsLoss()\n\ntest_auc, test_loss, task_aucs = inference(name='Test',\n model=model,\n device=device,\n data_loader=test_loader,\n criterion=criterion,\n limit=cfg.num_batches//2)\n\nprint(f\"Average AUC for all pathologies {test_auc:4.4f}\")\nprint(f\"Test loss: {test_loss:4.4f}\") \nprint(f\"AUC for each task {[round(x, 4) for x in task_aucs]}\")\n\n","sub_path":"xrv_test.py","file_name":"xrv_test.py","file_ext":"py","file_size_in_byte":9270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"119941783","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n# %matplotlib notebook\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot') # ggplot seaborn-poster\n# basic handling\nimport os\nimport glob\nimport pickle\nimport h5py\nimport numpy as np\nimport sklearn\n# audio\nimport librosa\nimport librosa.display\nimport IPython.display\n\nget_ipython().run_line_magic('load_ext', 'autoreload')\nget_ipython().run_line_magic('autoreload', '2')\n\nprint(os.getcwd())\n\n\n# # Prepare\n\n# In[ ]:\n\n\nimport sys\nsys.path.append('..')\nfrom speechemotion.mlcode.helper_functions import *\n\n\n# In[ ]:\n\n\nfrom speechemotion.mlcode.merge_feature import merge_all\n\nproj_root_path = '../'\n\ncsv_label_path = proj_root_path + 'data/emodb/datalist.csv'\n\nfrom speechemotion.mlcode.data_manager import MLDataSet\n\nCLASS_COL_NAME = 'emotion_en'\nCLASS_NAMES=(\"neutral\", \"angry\", \"happy\", \"sad\", \"afraid\", \"boring\", \"disgust\")\n\nfile_path = '../fusion/tmp_merged.csv'\nFE_file_path = '../fusion/temp_data_after_FE.csv'\nser_datasets = MLDataSet(file_path)\nser_datasets.feature_engineering(class_col_name=CLASS_COL_NAME, class_namelist=CLASS_NAMES, drop_cols=None)\nser_datasets.feature_filter(feature_regex='^%s_*' % 'CPE16')\nser_datasets.write_tmp_df(FE_file_path)\nprint()\nser_datasets.df.iloc[:, 0:16].describe()\n\n\n# In[ ]:\n\n\nimport os\nfrom speechemotion.mlcode.data_splitter import KFoldSplitter\ndata_splitter = KFoldSplitter()\n\n\n# In[ ]:\n\n\n\n\n\n# # Deep Learning Dataset\n\n# In[ ]:\n\n\nget_ipython().system('ls ../data/emodb/')\nget_ipython().system('ls ../fusion/')\n\n\n# In[ ]:\n\n\nfrom speechemotion.dlcode.dl_data_manager import DLDataSet\nDL_FILE_PATH = '../data/emodb/acoustic_emnet.hdf5'\ndl_dataset = DLDataSet(DL_FILE_PATH, FE_file_path,len(CLASS_NAMES))\n\n\n# In[ ]:\n\n\ndl_dataset.get_input_shape()\n\n\n# In[ ]:\n\n\nshape_stat = dl_dataset.describe_data()\n\n\n# # \n\n# In[ ]:\n\n\nimport h5py\ndef length_of_sentences(shape_stat):\n global dl_dataset\n dl_dataset.get_input_shape()\n counts, bins, patch = plt.hist(shape_stat[:, 0]) # , bins=[50 * i for i in range(10)]\n for indx in range(len(counts)):\n plt.text(bins[indx], counts[indx], '%d'%counts[indx])\n plt.title('Length of Sentence')\n plt.show()\n # shape_stat[-40:, 0]\n\n max_length_value = np.max(shape_stat[:, 0])\n max_value_indexs = np.where(shape_stat[:, 0] == max_length_value)\n print('Max length is:', max_length_value, '\\tCorresponding indexs:', max_value_indexs)\n# with h5py.File(DL_FILE_PATH, \"r\") as feat_clps:\n# print('Clip_id:', list(feat_clps.keys())[max_value_indexs[0][0]])\n# for indx in range(shape_stat.shape[0]):\n# if shape_stat[indx, 0] >= 3000:\n# print(list(feat_clps.keys())[indx], end=' ')\n \nlength_of_sentences(shape_stat)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\nX_train, X_test, Y_train, Y_test, info_dict = dl_dataset.get_data_scaled(1998, 2, normlize=True, data_splitter=data_splitter)\n# 计算方差和均值不会消耗太多内存,载入数据集X到内存约花费14G空间\nprint('-> X shape:', X_train.shape, X_test.shape)\nprint('-> Y shape:', Y_train.shape, Y_test.shape)\nprint(info_dict.keys())\n\n\n# In[ ]:\n\n\n# 测试extract_feature是否正常\n\ndef display_feature(x_i, figsize=(10,6), vmin=-10, vmax=10):\n print('x_i shape:', x_i.shape)\n plt.figure(figsize=figsize)\n librosa.display.specshow(x_i[:,:].T, sr=100, hop_length=1, x_axis='time', \n cmap='viridis', vmin=vmin, vmax=vmax)\n plt.colorbar()\n plt.title('Feature')\n plt.show()\n plt.tight_layout()\n\ntest_no = 31\nx_i = X_train[test_no]\nprint(info_dict['train_index'][test_no])\ndisplay_feature(x_i)\nprint ('x_i var:', x_i.var(axis=0))\nprint ('x_i mean:', x_i.mean(axis=0))\n\n\n# In[ ]:\n\n\n# x_i_x = np.linspace(0, len(x_i)/100.0, num=len(x_i))\n# plt.plot(x_i_x, x_i[:, 0], x_i_x, x_i[:, 28]+5)\n# plt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\ndel X_train, X_test\n\n\n# # DeepLearning Models\n\n# In[ ]:\n\n\nimport keras\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nprint(keras.__version__)\n\n\n# In[ ]:\n\n\n# 没有问题的话就开始搭建模型\nfrom speechemotion.dlcode.nn_model import KerasModelAdapter\nimport functools\n\nUTT_LENGTH = 512\n# dl_dataset.describe_data()\ndl_dataset.set_data_length(UTT_LENGTH)\nprint(dl_dataset.get_input_shape())\n\n\nfrom keras.models import Sequential\nfrom keras.layers import Input, Concatenate, Flatten, Reshape\nfrom keras.layers import Dense, Dropout, Activation, BatchNormalization, GaussianNoise\nfrom keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D, LeakyReLU, AveragePooling1D\n# from keras.layers import LSTM\nfrom keras.layers import CuDNNLSTM as LSTM\nfrom keras.layers import CuDNNGRU as GRU\nfrom keras.layers import Bidirectional\n\n\nfrom keras.regularizers import l1, l2\n\n\n# In[ ]:\n\n\n# model_creator = functools.partial(model_factory, model_choose='cnn_1')\n\n# def model_creator(input_shape):\n# model = Sequential()\n# # default \"image_data_format\": \"channels_last\"\n\n# model.add(Conv1D(64, 3, strides=1, input_shape=input_shape))\n# model.add(Activation('relu'))\n# model.add(MaxPooling1D(2))\n# model.add(Dropout(0.5))\n\n# for filter_num in [64, 128]:\n# model.add(Conv1D(filter_num, 3, strides=1, padding='same'))\n# model.add(Activation('relu'))\n# model.add(MaxPooling1D(2))\n# model.add(Dropout(0.5))\n\n# model.add(Flatten())\n# model.add(Dense(7, activation='softmax'))\n# return model\n\n# def model_creator2(input_shape):\n# model = Sequential()\n# # default \"image_data_format\": \"channels_last\"\n# # assert K.image_data_format() == 'channels_last':\n\n# model.add(Reshape((*input_shape, 1), input_shape=input_shape))\n# model.add(Conv2D(64, (6,1), strides=1))\n# model.add(Activation('relu'))\n# model.add(MaxPooling2D(pool_size=(4, 1)))\n# model.add(Dropout(0.5))\n# model.add(Reshape((-1,64*input_shape[1])))\n\n# for filter_num in [64, 128]:\n# model.add(Conv1D(filter_num, 3, strides=1, padding='same'))\n# model.add(Activation('relu'))\n# model.add(MaxPooling1D(2))\n# model.add(Dropout(0.5))\n\n# model.add(Flatten())\n# model.add(Dense(7, activation='softmax'))\n# return model\n\n\ndef model_creator3(input_shape):\n model = Sequential()\n # default \"image_data_format\": \"channels_last\"\n # assert K.image_data_format() == 'channels_last':\n\n model.add(Reshape((*input_shape, 1), input_shape=input_shape))\n model.add(Conv2D(8, (6,1), strides=1, padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(4, 1)))\n model.add(Dropout(0.5))\n# model.add(Conv2D(8, (6,1), strides=1, padding='same', activation='relu'))\n# model.add(MaxPooling2D(pool_size=(4, 1)))\n# model.add(Dropout(0.5))\n model.add(Reshape((-1,8*input_shape[1])))\n\n\n model.add(Conv1D(64, 3, strides=1, padding='same', activation='relu'))\n model.add(MaxPooling1D(2))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(7, activation='softmax'))\n return model\n \ndef EmNet_creator(input_shape):\n model = Sequential()\n # default \"image_data_format\": \"channels_last\"\n # assert K.image_data_format() == 'channels_last':\n\n model.add(Reshape((*input_shape, 1), input_shape=input_shape))\n model.add(GaussianNoise(0.05))\n model.add(Conv2D(64, (6,1), strides=1, padding='same')) # , kernel_regularizer=l1(0.001)\n model.add(Activation('relu'))\n# model.add(LeakyReLU())\n model.add(MaxPooling2D(pool_size=(4, 1)))\n\n model.add(Reshape((-1,64*input_shape[1])))\n model.add(Conv1D(128, 3, strides=1, padding='same')) # , kernel_regularizer=l1(0.00001)\n model.add(Activation('relu'))\n# model.add(LeakyReLU())\n model.add(MaxPooling1D(2))\n\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n model.add(Bidirectional(LSTM(48, return_sequences=True), merge_mode='concat')) # returns a sequence of vectors , dropout=0.25\n model.add(Dropout(0.5)) # Attention\n model.add(Bidirectional(LSTM(48, return_sequences=True), merge_mode='concat')) # return a single vector , dropout=0.25\n model.add(MaxPooling1D(pool_size=64))\n \n model.add(Flatten())\n model.add(Dense(32, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(7, activation='softmax'))\n return model\n\n\ndef get_2d_conv_model(input_shape):\n ''' CNN-LSTM'''\n inp = Input(shape=input_shape)\n \n x = Reshape((*input_shape, 1))(inp)\n x = GaussianNoise(0.05)(x)\n x = Conv2D(64, (6,1), strides=1, padding='same')(x)\n x = Activation(\"relu\")(x)\n x = MaxPooling2D(pool_size=(4, 1))(x)\n \n x = Reshape((-1,64*input_shape[1]))(x)\n x = Conv1D(128, 3, strides=1, padding='same', kernel_regularizer=l1(0.0005))(x)\n x = Activation('relu')(x)\n x = MaxPooling1D(2)(x)\n x = BatchNormalization()(x)\n\n x = Dropout(0.5)(x)\n x = Bidirectional(LSTM(48, return_sequences=True), merge_mode='concat')(x) # returns a sequence of vectors , dropout=0.25\n x = Dropout(0.5)(x) # TODO: Try Attention\n x = Bidirectional(LSTM(48, return_sequences=False), merge_mode='concat')(x) # return a single vector , dropout=0.25\n# x1 = MaxPooling1D(pool_size=64)(x)\n# x2 = AveragePooling1D(pool_size=64)(x)\n# x = Concatenate()([x1, x2])\n \n# x = Flatten()(x)\n# x = Dense(32, activation='relu')(x)\n# x = Dropout(0.5)(x)\n out = Dense(7, activation='softmax')(x)\n\n model = keras.models.Model(inputs=inp, outputs=out)\n return model\n\n\nhyper_params = {\n 'lr':0.001,\n 'epochs':250,\n 'lr_decay':0.05,\n 'gpus':1,\n 'batch_size':64\n}\nmodel = KerasModelAdapter(dl_dataset.get_input_shape(), model_creator=get_2d_conv_model, **hyper_params)\nprint(model)\n# visualize model layout with pydot_ng\nmodel.plot_model()\n\n\n# In[ ]:\n\n\n# from speechemotion.mlcode.pipelineCV import PipelineCV\n\n# pipelineCV = PipelineCV(model, dl_dataset, data_splitter, n_splits=10)\n# result = pipelineCV.run_pipeline(2000)\n# from speechemotion.mlcode.main_exp import gen_report, save_exp_log\n# print(result['conf_mx'])\n# gen_report(result['fold_metrics'])\n\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nfrom speechemotion.mlcode.main_exp import main_experiment\n\nresult = main_experiment(dl_dataset, data_splitter, model)\n\nconf_mx = result['conf_mx_sum']\nreport = result['report']\n\n\n# result_df_stat # .describe()\n# UAR\ndisplay(report)\n\n\n# In[ ]:\n\n\nfrom speechemotion.mlcode.main_exp import save_exp_log, gen_report\nplt.style.use('ggplot')\n\n# show_confusion_matrix(conf_mx, save_pic_path='./log/cconf_mx.png')\nplot_confusion_matrix(conf_mx, classes=CLASS_NAMES, figsize=(7,7))\n\nsave_exp_log({\n 'Memo': '|'.join(CLASS_NAMES),\n 'Data': 'File: %s\\n' % (DL_FILE_PATH),\n 'Model': '\\n%s\\n' % str(model),\n 'Report': report, # gen_report(result['fold_metrics']),\n 'Confusion Matrix': '\\n%s\\n' % repr(result['conf_mx_sum']),\n 'CV_result_detail': result['cv_metrics_stat'].describe() # fold_metrics\n}, name_str='DeepLearning' )\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\nexit(0)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"notebook/DL1-OpenSMILE_LLDs.py","file_name":"DL1-OpenSMILE_LLDs.py","file_ext":"py","file_size_in_byte":11025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"602845061","text":"from Model import SignalError\nfrom Dataset.Simulation.GaussCurve_TF import FBG_spectra\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\ne_model, f_model = SignalError.ErrorModel()\n\nsamples = 100000\nfbgs = 3\n\n\ndef normalize(spectra):\n maximum = tf.expand_dims(tf.reduce_max(spectra, axis=1), axis=1)\n minimum = tf.expand_dims(tf.reduce_min(spectra, axis=1), axis=1)\n return (spectra-minimum)/(maximum-minimum)\n\n\nx_coord = tf.linspace(0.0, 1.0, 1000)\n\nX1 = tf.random.uniform([samples, fbgs])\nI1 = (tf.random.uniform([samples, fbgs])-0.5)*0.1 + \\\n tf.repeat([[1, 0.5, 0.25]], samples, axis=0)\nW1 = tf.ones([samples, fbgs]) * tf.random.uniform([1], 0.05, 0.15)\nspectrums1 = normalize(FBG_spectra(x_coord, X1, I1, W1))\n\nX2 = tf.random.uniform([samples, fbgs])\nI2 = I1 + (tf.random.uniform([samples, fbgs])-0.5)*0.01\nW2 = W1 + (tf.random.uniform([samples, fbgs])-0.5)*0.005\nspectrums2 = normalize(FBG_spectra(x_coord, X2, I1, W2) +\n (tf.random.uniform([samples, 1000])-0.5)*1e-5)\n\ntrain_X = tf.concat([tf.expand_dims(spectrums1, axis=1),\n tf.expand_dims(spectrums2, axis=1)], axis=1)\n\ntrain_Y = tf.reduce_mean(tf.abs(X2-X1), axis=1)\n\nplt.plot(spectrums1[0])\nplt.plot(spectrums2[0])\nplt.show()\n\ne_model.summary()\ne_model.load_weights('./SavedModel/SignalErrorModel.hdf5')\n\ne_model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-2), loss=\"mse\")\n\nfor i in range(10):\n print(\"training cycle\", i)\n e_model.fit(train_X, train_Y, epochs=10, batch_size=2000, validation_split=0.2, shuffle=True)\n e_model.save_weights('./SavedModel/SignalErrorModel.hdf5')\n\npred_Y = e_model(train_X)[:, 0]\nprint(pred_Y.shape, train_Y.shape)\nplt.plot(pred_Y-train_Y, \"o\")\nplt.show()\n","sub_path":"ML_SpectraError.py","file_name":"ML_SpectraError.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"215576607","text":"import pyupbit\r\nimport time\r\nimport datetime\r\n\r\n# 1초에 한번씩 현재가와 현재시간 출력\r\n# while True :\r\n# now = datetime.datetime.now()\r\n# price = pyupbit.get_current_price(\"KRW-BTC\")\r\n# print(price)\r\n# print(now, price)\r\n# time.sleep(1) # API를 너무 빨리 호출하면 호출 제한이 걸리게 됨. 주의할 것\r\n\r\n\r\n# 목표가 계산 함수\r\ndef cal_target(ticker):\r\n df = pyupbit.get_ohlcv(ticker, \"day\")\r\n print(df.tail())\r\n yesterday = df.iloc[-2]\r\n today = df.iloc[-1]\r\n yesterday_range = yesterday['high']-yesterday['low']\r\n target = today['open'] + yesterday_range * 0.5\r\n return target\r\n\r\ntarget = cal_target(\"KRW-BTC\")\r\nprint(target)\r\n\r\n\r\n# 목표가 갱신 (매일 아침 9시 이후 목표가 갱신 필요)\r\nwhile True :\r\n now = datetime.datetime.now()\r\n #9시 0분 20초~ 30초 사이에 \r\n if now.hour == 9 and now.minute == 0 and (20 <= now.second <= 30):\r\n target = cal_target(\"KRW-BTC\")\r\n time.sleep(10)\r\n \r\n price = pyupbit.get_current_price(\"KRW-BTC\")\r\n print(now, price)\r\n time.sleep(1)\r\n \r\n","sub_path":"coin_project/stategy_lw.py","file_name":"stategy_lw.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"276865298","text":"from app.models.like_model import Like\n\n\ndef verify_match(like: Like):\n # Se o dislike for --False--, verificar se possui algum like dado a ele\n if not like.dislike:\n # Se sim, verificar se o id do dog dado o like está entre os que deram\n # o like para ele e o dislike for --False--\n like_received = Like.query.filter_by(\n dog_id_receive=like.dog_id_give,\n dog_id_give=like.dog_id_receive,\n dislike=False).first()\n\n if like_received is not None:\n # Se sim, modificar o match de ambos como --True--\n like.match = True\n like_received.match = True\n","sub_path":"app/services/like_services.py","file_name":"like_services.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"214952161","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, unicode_literals, print_function\n\nimport io\nimport sys\n\nfrom astropy.extern import six\n\nimport numpy as np\nfrom numpy import ma\nfrom numpy.testing import assert_array_equal\n\nimport yaml\n\nfrom ....tests import helpers\nfrom .... import asdf\n\nfrom .. import ndarray\n\n\ndef test_sharing(tmpdir):\n x = np.arange(0, 10, dtype=np.float)\n tree = {\n 'science_data': x,\n 'subset': x[3:-3],\n 'skipping': x[::2]\n }\n\n def check_asdf(asdf):\n tree = asdf.tree\n\n assert_array_equal(tree['science_data'], x)\n assert_array_equal(tree['subset'], x[3:-3])\n assert_array_equal(tree['skipping'], x[::2])\n\n assert tree['science_data'].ctypes.data == tree['skipping'].ctypes.data\n\n assert len(list(asdf.blocks.internal_blocks)) == 1\n assert next(asdf.blocks.internal_blocks)._size == 80\n\n tree['science_data'][0] = 42\n assert tree['skipping'][0] == 42\n\n def check_raw_yaml(content):\n assert b'!core/ndarray' in content\n\n helpers.assert_roundtrip_tree(tree, tmpdir, check_asdf, check_raw_yaml)\n\n\ndef test_byteorder(tmpdir):\n tree = {\n 'bigendian': np.arange(0, 10, dtype=str('>f8')),\n 'little': np.arange(0, 10, dtype=str(''\n assert tree['little'].dtype.byteorder == '='\n else:\n assert tree['bigendian'].dtype.byteorder == '='\n assert tree['little'].dtype.byteorder == '<'\n\n def check_raw_yaml(content):\n assert b'byteorder: little' in content\n assert b'byteorder: big' in content\n\n helpers.assert_roundtrip_tree(tree, tmpdir, check_asdf, check_raw_yaml)\n\n\ndef test_all_dtypes(tmpdir):\n tree = {}\n for byteorder in ('>', '<'):\n for dtype in ndarray._datatype_names.values():\n # Python 3 can't expose these dtypes in non-native byte\n # order, because it's using the new Python buffer\n # interface.\n if six.PY3 and dtype in ('c32', 'f16'):\n continue\n\n if dtype == 'b1':\n arr = np.array([True, False])\n else:\n arr = np.arange(0, 10, dtype=str(byteorder + dtype))\n\n tree[byteorder + dtype] = arr\n\n helpers.assert_roundtrip_tree(tree, tmpdir)\n\n\ndef test_dont_load_data():\n x = np.arange(0, 10, dtype=np.float)\n tree = {\n 'science_data': x,\n 'subset': x[3:-3],\n 'skipping': x[::2]\n }\n ff = asdf.AsdfFile(tree)\n\n buff = io.BytesIO()\n ff.write_to(buff)\n\n buff.seek(0)\n ff = asdf.AsdfFile.read(buff)\n\n ff.run_hook('pre_write')\n\n # repr and str shouldn't load data\n str(ff.tree['science_data'])\n repr(ff.tree)\n\n for block in ff.blocks.internal_blocks:\n assert block._data is None\n\n\ndef test_table_inline(tmpdir):\n table = np.array(\n [(0, 1, (2, 3)), (4, 5, (6, 7))],\n dtype=[(str('MINE'), np.int8),\n (str(''), np.float64),\n (str('arr'), '>i4', (2,))])\n\n tree = {'table_data': table}\n\n def check_raw_yaml(content):\n content = b'\\n'.join(content.splitlines()[4:-1])\n tree = yaml.load(content)\n\n assert tree == {\n 'datatype': [\n {'datatype': 'int8', 'name': 'MINE'},\n {'datatype': 'float64', 'name': 'f1'},\n {'datatype': 'int32', 'name': 'arr', 'shape': [2]}\n ],\n 'data': [[0, 1.0, [2, 3]], [4, 5.0, [6, 7]]],\n 'shape': [2]\n }\n\n helpers.assert_roundtrip_tree(\n tree, tmpdir, None, check_raw_yaml, {'auto_inline': 64})\n\n\ndef test_auto_inline_recursive(tmpdir):\n from astropy.modeling import models\n aff = models.AffineTransformation2D(matrix=[[1, 2], [3, 4]])\n tree = {'test': aff}\n\n def check_asdf(asdf):\n assert len(list(asdf.blocks.internal_blocks)) == 0\n\n helpers.assert_roundtrip_tree(\n tree, tmpdir, check_asdf, None, {'auto_inline': 64})\n\n\ndef test_table(tmpdir):\n table = np.array(\n [(0, 1, (2, 3)), (4, 5, (6, 7))],\n dtype=[(str('MINE'), np.int8),\n (str(''), np.float64),\n (str('arr'), '>i4', (2,))])\n\n tree = {'table_data': table}\n\n def check_raw_yaml(content):\n content = b'\\n'.join(content.splitlines()[4:-1])\n tree = yaml.load(content)\n\n assert tree == {\n 'datatype': [\n {'byteorder': 'big', 'datatype': 'int8', 'name': 'MINE'},\n {'byteorder': 'little', 'datatype': 'float64', 'name': 'f1'},\n {'byteorder': 'big', 'datatype': 'int32', 'name': 'arr', 'shape': [2]}\n ],\n 'shape': [2],\n 'source': 0,\n 'byteorder': 'big'\n }\n\n helpers.assert_roundtrip_tree(tree, tmpdir, None, check_raw_yaml)\n\n\ndef test_table_nested_fields(tmpdir):\n table = np.array(\n [(0, (1, 2)), (4, (5, 6)), (7, (8, 9))],\n dtype=[(str('A'), np.int64),\n (str('B'), [(str('C'), np.int64), (str('D'), np.int64)])])\n\n tree = {'table_data': table}\n\n def check_raw_yaml(content):\n content = b'\\n'.join(content.splitlines()[4:-1])\n tree = yaml.load(content)\n\n assert tree == {\n 'datatype': [\n {'datatype': 'int64', 'name': 'A', 'byteorder': 'little'},\n {'datatype': [\n {'datatype': 'int64', 'name': 'C', 'byteorder': 'little'},\n {'datatype': 'int64', 'name': 'D', 'byteorder': 'little'}\n ], 'name': 'B', 'byteorder': 'big'}],\n 'shape': [3],\n 'source': 0,\n 'byteorder': 'big'\n }\n\n helpers.assert_roundtrip_tree(tree, tmpdir, None, check_raw_yaml)\n\n\ndef test_inline():\n x = np.arange(0, 10, dtype=np.float)\n tree = {\n 'science_data': x,\n 'subset': x[3:-3],\n 'skipping': x[::2]\n }\n\n buff = io.BytesIO()\n\n with asdf.AsdfFile(tree) as ff:\n ff.blocks[tree['science_data']].array_storage = 'inline'\n ff.write_to(buff)\n\n buff.seek(0)\n with asdf.AsdfFile.read(buff, mode='rw') as ff:\n helpers.assert_tree_match(tree, ff.tree)\n assert len(list(ff.blocks.internal_blocks)) == 0\n buff = io.BytesIO()\n with asdf.AsdfFile(ff).write_to(buff):\n pass\n\n assert b'[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]' in buff.getvalue()\n\n\ndef test_inline_bare():\n content = \"arr: !core/ndarray [[1, 2, 3, 4], [5, 6, 7, 8]]\"\n buff = helpers.yaml_to_asdf(content)\n\n ff = asdf.AsdfFile.read(buff)\n\n assert_array_equal(ff.tree['arr'], [[1, 2, 3, 4], [5, 6, 7, 8]])\n\n\ndef test_mask_roundtrip(tmpdir):\n x = np.arange(0, 10, dtype=np.float)\n m = ma.array(x, mask=x > 5)\n tree = {\n 'masked_array': m,\n 'unmasked_array': x\n }\n\n def check_asdf(asdf):\n tree = asdf.tree\n\n m = tree['masked_array']\n x = tree['unmasked_array']\n\n print(m)\n print(m.mask)\n assert np.all(m.mask[6:])\n assert len(asdf.blocks) == 2\n\n helpers.assert_roundtrip_tree(tree, tmpdir, check_asdf)\n\n\ndef test_mask_nan():\n content = \"\"\"\n arr: !core/ndarray\n data: [[1, 2, 3, .NaN], [5, 6, 7, 8]]\n mask: .NaN\n \"\"\"\n\n buff = helpers.yaml_to_asdf(content)\n ff = asdf.AsdfFile.read(buff)\n\n assert_array_equal(\n ff.tree['arr'].mask,\n [[False, False, False, True], [False, False, False, False]])\n\n\ndef test_string(tmpdir):\n tree = {\n 'ascii': np.array([b'foo', b'bar', b'baz']),\n 'unicode': np.array(['სამეცნიერო', 'данные', 'வடிவம்'])\n }\n\n helpers.assert_roundtrip_tree(tree, tmpdir)\n\n\ndef test_string_table(tmpdir):\n tree = {\n 'table': np.array([(b'foo', 'სამეცნიერო', 42, 53.0)])\n }\n\n helpers.assert_roundtrip_tree(tree, tmpdir)\n\n\ndef test_inline_string():\n content = \"arr: !core/ndarray ['a', 'b', 'c']\"\n buff = helpers.yaml_to_asdf(content)\n\n ff = asdf.AsdfFile.read(buff)\n\n assert_array_equal(ff.tree['arr']._make_array(), ['a', 'b', 'c'])\n\n\ndef test_inline_structured():\n content = \"\"\"\n arr: !core/ndarray\n datatype: [['ascii', 4], uint16, uint16, ['ascii', 4]]\n data: [[M110, 110, 205, And],\n [ M31, 31, 224, And],\n [ M32, 32, 221, And],\n [M103, 103, 581, Cas]]\"\"\"\n\n buff = helpers.yaml_to_asdf(content)\n\n ff = asdf.AsdfFile.read(buff)\n\n assert ff.tree['arr']['f1'].dtype.char == 'H'\n\n\ndef test_simple_table():\n table = np.array(\n [(10.683262825012207, 41.2674560546875, 0.13, 0.12, 213.916),\n (10.682777404785156, 41.270111083984375, 0.1, 0.09, 306.825),\n (10.684737205505371, 41.26903533935547, 0.08, 0.07, 96.656),\n (10.682382583618164, 41.26792526245117, 0.1, 0.09, 237.145),\n (10.686025619506836, 41.26922607421875, 0.13, 0.12, 79.581),\n (10.685656547546387, 41.26955032348633, 0.13, 0.12, 55.219),\n (10.684028625488281, 41.27090072631836, 0.13, 0.12, 345.269),\n (10.687610626220703, 41.270301818847656, 0.18, 0.14, 60.192)],\n dtype=[\n (str('ra'), str('= 8.0:\n f.write(u'title:{}\\nrating value:{}\\nurl:{}\\n\\n'.format(book[0], book[1], book[2]).encode('utf8'))\n f.flush()\n\n print('\\nFinished\\n')\n\n\n\ndef parse_line(line):\n if line[0:5] == 'title':\n return (line[:5], line[6:-1])\n elif line[0:12] == 'rating value':\n return (line[:12], line[14:-2])\n elif line[0:3] == 'url':\n return (line[:3], line[4:-1])\n else:\n return (None,)\n\ngood_format_list = ('txt', 'mobi', 'epub')\ngood_score_list = ('score-four', 'score-four-point-five', 'score-five')\n\ndef get_book_format_list(book_info):\n print('getting book format info for {}'.format(book_info))\n format_list = []\n r = requests.get(book_info['url'])\n soup = BeautifulSoup(r.text,'html.parser')\n table = soup.find('table')\n if not table:\n return format_list\n tr_list = table.find_all('tr')\n for tr in tr_list:\n td_list = tr.find_all('td')\n if td_list:\n format_info = td_list[0].text \n if format_info in good_format_list:\n div = td_list[1].find('div')\n if div is not None:\n score_info = div.attrs['class'][1]\n format_list.append((format_info, score_info))\n return format_list\n\n \n\ndef isgood2push(book_info):\n format_list = get_book_format_list(book_info)\n book_info.setdefault('format list', [])\n for format in format_list:\n if format[1] in good_score_list:\n book_info['format list'].append(format)\n return True\n return False\n\ndef pickup_good_books_to_push(sourcefile='jisihuibooks.txt', destfile='jisihuibooks-topush.txt'):\n with open(sourcefile, 'r') as fsource, open(destfile, 'w') as fdest:\n book_info = {}\n for line in fsource:\n line_info = parse_line(line) \n if line_info[0] == 'title':\n book_info[line_info[0]] = line_info[1]\n elif line_info[0] == 'rating value':\n book_info[line_info[0]] = line_info[1]\n elif line_info[0] == 'url':\n book_info[line_info[0]] = line_info[1]\n if isgood2push(book_info):\n print(book_info)\n fdest.write('title:{}\\nrating value:{}\\nurl:{}\\nformat list:{}\\npushed flag: False\\n\\n'.\\\n format(book_info['title'], book_info['rating value'], book_info['url'], book_info.get('format list','none')))\n fdest.flush()\n else:\n book_info = {}\n \n# 集思会登陆\nJISIHUI_LOGIN_URL = 'http://www.kindlepush.com/user/login'\ndef jisihui_login(username=None,password=None):\n '''\n 集思会登陆接口\n 参数:\n username: 登陆用户名\n password: 登陆密码\n 返回:\n 登陆成功,返回相应的requests库的session\n 登陆失败,触发相关异常\n '''\n if username is None or password is None:\n raise Exception('username or password is none')\n login_data = {'username':username, 'password':password, 'rememberMe':'false'}\n jisihui_session = requests.Session()\n r = jisihui_session.post(JISIHUI_LOGIN_URL, login_data)\n if r.text != 'true':\n raise Exception('login fail!')\n return jisihui_session\n\n#集思会推送指定id的书到我的kindle\n#\nJISIHUI_PUSH_BOOK_URL_PATTERN = 'http://www.kindlepush.com/book/push/{}'\ndef jisihui_push_book(session=None, book_id=None):\n if session is None:\n raise Exception('session is none')\n if book_id is None:\n raise Exception('invalid book id')\n r = session.post(JISIHUI_PUSH_BOOK_URL_PATTERN.format(book_id)) \n if r.text[:7] != 'success':\n raise Exception('push book {} fail resean({})'.format(book_id, r.text))\n\n#集思会收藏指定id的书本\nJISIHUI_MARK_BOOK_URL_PATTERN = 'http://www.kindlepush.com/book/push/{}'\ndef jisihui_mark_book(session=None, book_id=None):\n if session is None:\n raise Exception('session is none')\n if book_id is None:\n raise Exception('invalid book id')\n pass\n \n\nJISIHUI_BOOK_COLLECT_URL = 'http://www.kindlepush.com/user/collect'\ndef jisihui_get_collect_book_list(session=None):\n if session is None:\n raise Exception('session is none')\n r = session.get(JISIHUI_BOOK_COLLECT_URL)\n soup = BeautifulSoup(r.text, 'html.parser')\n \n\n\n\n\n\nif __name__ == '__main__':\n jisihui_collect_book_list()\n pickup_good_books_to_push() \n\n\n","sub_path":"jisihui-spider.py","file_name":"jisihui-spider.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"406954666","text":"def fibonacci(x):\n\n\tfibonacci = [0,1]\n\ta = 0\n\tb = 1\n\n\tfor i in range(x - 2):\n\n\t\tif a > b:\n\t\t\tb += a\n\t\t\tfibonacci.append(b)\n\n\t\telif a <= b:\n\t\t\ta += b\n\t\t\tfibonacci.append(a)\n\n\treturn(fibonacci[x - 1])\n","sub_path":"python3/fibonacci/Function/fibonacci-function.py","file_name":"fibonacci-function.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"403453299","text":"import warnings\nimport pandas as pd\n\nwarnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)\n\ndef get_values(match_id):\n \"\"\"\n Loads a dataframe from a pickle with all the values from an specific match\n \"\"\"\n values_df = pd.read_pickle('match_values/'+str(match_id)+'.pkl')\n\n return values_df\n\n\ndef player_value(player_id, values_df, is_gk):\n \"\"\"\n Returns the value of an specific player\n \"\"\"\n if is_gk == False:\n player_columns = values_df[values_df['player_id'] == player_id]\n \n player_value = player_columns['total_value'].sum()\n else:\n team_id = values_df.loc[0, 'team_id']\n player_columns = values_df[values_df['player_id'] == player_id]\n player_columns = player_columns.reset_index(drop=True)\n if player_columns.loc[0, 'team_id'] == team_id:\n player_value = player_columns.loc[0, 'team2_xG'] - player_columns.loc[0, 'team2_goals']\n else:\n player_value = player_columns.loc[0, 'team1_xG'] - player_columns.loc[0, 'team1_goals']\n\n\n return player_value\n\n\ndef all_players_value(players_df,values_df):\n \"\"\"\n Returns a dataframe with the value for all players\n \"\"\"\n gk = players_df[players_df['player_position'] == 'Goalkeeper']\n gk = gk['player_id'].to_list()\n player_ids = players_df['player_id'].to_list()\n player_values = []\n for id in player_ids:\n p_value = player_value(id, values_df, True if id in gk else False)\n\n player_values.append({'player_id': id, 'value': p_value})\n\n player_values = pd.DataFrame(player_values)\n\n\n return player_values\n\ndef get_rating(players_values, players_df):\n \"\"\"\n Transform the players value to rating for 90 minutes.\n \"\"\"\n player_minutes = players_df[['player_id', 'player_name', 'minutes_played']]\n\n players_values = pd.merge(players_values, player_minutes, on='player_id')\n\n players_values['rating'] = (90 / players_values['minutes_played']) * players_values['value'] \n\n return players_values\n\ndef get_best_ratings(all_players_df, goalkeepers, n_players=10, minutes_played=900):\n \"\"\"\n Prints a dataframe with the n best ratings, for the players or goalkeepers that played more than\n minutes_played\n \"\"\"\n mask = all_players_df['minutes_played'] > minutes_played\n if goalkeepers:\n mask2 = all_players_df['player_position'] == 'Goalkeeper'\n else:\n mask2 = all_players_df['player_position'] != 'Goalkeeper'\n\n all_players_df = all_players_df[mask]\n all_players_df = all_players_df[mask2]\n \n\n all_players_df = all_players_df.sort_values('rating', ascending=False)\n\n if n_players == 'All':\n pd.set_option('display.max_rows', None)\n print(all_players_df)\n else:\n print(all_players_df.head(n_players))\n\n\n\n\n\n\n\n \n\n\n\n","sub_path":"value_players/value_to_players.py","file_name":"value_to_players.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"88189251","text":"# %%\nimport gc\nimport json\nimport os\nimport pickle\nimport random\nimport re\nimport string\nimport shutil\nimport sys\nimport time\nimport glob\nfrom typing import Callable\nimport warnings\nfrom functools import partial\nfrom pathlib import Path\n\nfrom omegaconf import OmegaConf\nimport matplotlib.pyplot as plt\nimport mlflow\nimport numpy as np\nimport pandas as pd\nimport pytorch_lightning as pl\nfrom pytorch_lightning.core.saving import CHECKPOINT_PAST_HPARAMS_KEYS\nimport seaborn as sns\nimport torch\nimport transformers\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import KFold, StratifiedKFold, GroupShuffleSplit\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.optim import optimizer\nfrom torch.utils.data import DataLoader\nimport torchmetrics\nfrom tqdm import tqdm\n\nwarnings.simplefilter(\"ignore\")\n\n# %%\n\n# ====================================================\n# preprocess\n# ====================================================\n\n\ndef preprocess_df(df):\n df[\"target\"] = df[\"target\"].astype(np.float32)\n df = df.loc[\n ~((df[\"target\"] == 0) & (df[\"standard_error\"] == 0))\n ].sort_values(\"id\").reset_index(drop=True)\n\n # sentence tokenize\n from nltk import sent_tokenize\n df[\"excerpt_ls\"] = df[\"excerpt\"].apply(sent_tokenize)\n\n result_df = pd.DataFrame()\n for i, row in tqdm(df.iterrows()):\n r = row.copy()\n for t in row[\"excerpt_ls\"]:\n r[\"excerpt\"] = t\n result_df = result_df.append(r)\n\n result_df.drop(\"excerpt_ls\", axis=1, inplace=True)\n\n return result_df.reset_index(drop=True)\n\n# ====================================================\n# metric\n# ====================================================\n\n\nclass RMSE(torchmetrics.Metric):\n def __init__(self):\n super().__init__(compute_on_step=False)\n self.add_state(\n \"sum_squared_errors\",\n torch.tensor(0.0),\n dist_reduce_fx=\"sum\",\n )\n self.add_state(\n \"n_observations\",\n torch.tensor(0.0),\n dist_reduce_fx=\"sum\",\n )\n\n def update(self, preds, target):\n self.sum_squared_errors += torch.sum((preds - target) ** 2)\n self.n_observations += preds.numel()\n\n def compute(self):\n return torch.sqrt(self.sum_squared_errors / self.n_observations)\n\n\n# ====================================================\n# transform\n# ====================================================\n\n\nclass Transform():\n def __init__(self, data, tokenizer_name, tokenizer_max_length):\n self.data = data\n self.tokenizer_name = tokenizer_name\n self.tokenizer_max_length = tokenizer_max_length\n\n self.tokenizer = self.get_tokenizer()\n\n def get_tokenizer(self):\n config = transformers.AutoConfig.from_pretrained(\n self.tokenizer_name\n )\n tokenizer = transformers.AutoTokenizer.from_pretrained(\n self.tokenizer_name,\n config=config,\n )\n return tokenizer\n\n def get_transform_fn(self):\n def transform(text):\n tokens = self.tokenizer.encode_plus(\n text,\n truncation=True,\n padding=\"max_length\",\n pad_to_max_length=True,\n max_length=self.tokenizer_max_length,\n )\n return tokens\n\n return transform\n\n def get_collate_fn(self):\n return None\n\n# ====================================================\n# loss\n# ====================================================\n\n\ndef get_loss(loss_name, loss_params):\n return getattr(\n torch.nn,\n loss_name\n )(**loss_params)\n\n# ====================================================\n# optimizer\n# ====================================================\n\n\ndef get_optimizer(model, optimizer_name, optimizer_params):\n return getattr(\n torch.optim,\n optimizer_name\n )(model.parameters(), **optimizer_params)\n\n# ====================================================\n# scheduler\n# ====================================================\n\n\ndef get_scheduler(optimizer, scheduler_name, scheduler_params):\n return getattr(\n torch.optim.lr_scheduler,\n scheduler_name\n )(optimizer, **scheduler_params)\n\n\n# ====================================================\n# dataset\n# ====================================================\nclass DatasetBase(torch.utils.data.Dataset):\n def __init__(\n self,\n texts,\n transform\n ):\n self.texts = texts\n self.transform = transform\n self.length = len(texts)\n\n def __len__(\n self,\n ):\n return self.length\n\n def __getitem__(\n self,\n idx\n ):\n text = self.texts[idx]\n if self.transform:\n text = self.transform(text)\n text = {k: torch.tensor(v, dtype=torch.long)\n for k, v in text.items()}\n return text\n return {\"text\": text}\n\n\nclass Dataset(DatasetBase):\n def __init__(\n self,\n texts,\n labels,\n transform,\n ):\n super().__init__(texts, transform)\n self.labels = labels\n\n def __getitem__(\n self,\n idx,\n ):\n text = super().__getitem__(idx)\n label = label = torch.tensor(self.labels[idx]).float()\n return text, label\n\n\nclass TestDataset(DatasetBase):\n def __init__(\n self,\n texts,\n transform,\n ):\n super().__init__(texts, transform)\n\n def __getitem__(\n self,\n idx,\n ):\n text = super().__getitem__(idx)\n return text\n\n# ====================================================\n# model\n# ====================================================\n\n\nclass BaseModel(nn.Module):\n def __init__(\n self,\n basemodel_name: str,\n multisample_dropout: int,\n multisample_dropout_rate: float,\n model_params,\n ):\n super().__init__()\n\n # model\n config = transformers.AutoConfig.from_pretrained(\n basemodel_name\n )\n config.update(model_params)\n self.model = transformers.AutoModel.from_pretrained(\n basemodel_name,\n config=config,\n )\n self.middle_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-07)\n self.middle_linear = torch.nn.utils.weight_norm(\n nn.Linear(\n config.hidden_size, config.hidden_size // 2\n )\n )\n self.layer_norm = nn.LayerNorm(config.hidden_size // 2, eps=1e-07)\n self.dropouts = nn.ModuleList([\n nn.Dropout(multisample_dropout_rate) for _ in range(multisample_dropout)\n ])\n self.regressors = nn.ModuleList([\n torch.nn.utils.weight_norm(nn.Linear(config.hidden_size // 2, 1)) for _ in range(multisample_dropout)\n ])\n\n def forward(self, x):\n output = self.model(**x)[1]\n output = F.relu(self.middle_linear(self.middle_layer_norm(output)))\n output = self.layer_norm(output)\n logits = torch.stack(\n [\n regressor(dropout(output)) for regressor, dropout in zip(self.regressors, self.dropouts)\n ]\n ).mean(axis=0)\n return logits.flatten()\n\n\nclass Model(pl.LightningModule):\n def __init__(\n self,\n basemodel_name: str,\n multisample_dropout: int,\n multisample_dropout_rate: float,\n model_params: dict,\n loss_name: str,\n loss_params: dict,\n optimizer_name: str,\n optimizer_params: dict,\n scheduler_name: str,\n scheduler_params: dict,\n scheduler_interval: str,\n ):\n super().__init__()\n\n # model\n self.model = BaseModel(\n basemodel_name=basemodel_name,\n multisample_dropout=multisample_dropout,\n multisample_dropout_rate=multisample_dropout_rate,\n model_params=model_params,\n )\n\n # optimizer and shceduler\n self.optimizer_name = optimizer_name\n self.optimizer_params = optimizer_params\n self.scheduler_name = scheduler_name\n self.scheduler_params = scheduler_params\n self.scheduler_interval = scheduler_interval\n\n # critetria\n self.criterion = get_loss(loss_name, loss_params)\n\n # metrics\n self.train_rmse = RMSE()\n self.valid_rmse = RMSE()\n\n # init model training histories\n self.history = {\n \"train_rmse\": [],\n \"valid_rmse\": [],\n \"lr\": []\n }\n\n def forward(self, x):\n output = self.model(x)\n return output\n\n def configure_optimizers(self):\n optimizer = get_optimizer(\n self.model,\n optimizer_name=self.optimizer_name,\n optimizer_params=self.optimizer_params,\n )\n if self.scheduler_name is None:\n return {\"optimizer\": optimizer, }\n else:\n scheduler = get_scheduler(\n optimizer,\n scheduler_name=self.scheduler_name,\n scheduler_params=self.scheduler_params,\n )\n\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": scheduler,\n \"monitor\": \"valid_rmse\",\n \"interval\": self.scheduler_interval,\n }\n }\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = self.criterion(y_hat, y)\n self.train_rmse(y_hat, y)\n self.log(\n name=\"train_rmse\",\n value=self.train_rmse,\n prog_bar=True,\n logger=False,\n on_step=False,\n on_epoch=True,\n )\n self.history[\"lr\"].append(\n self.optimizers(False).param_groups[0][\"lr\"]\n )\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = self.criterion(y_hat, y)\n self.valid_rmse(y_hat, y)\n self.log(\n name=\"valid_rmse\",\n value=self.valid_rmse,\n prog_bar=True,\n logger=False,\n on_step=False,\n on_epoch=True,\n )\n return loss\n\n def on_train_epoch_end(self) -> None:\n self.history[\"train_rmse\"].append(\n self.train_rmse.compute().detach().cpu().numpy()\n )\n return super().on_train_epoch_end()\n\n def on_validation_epoch_end(self) -> None:\n self.history[\"valid_rmse\"].append(\n self.valid_rmse.compute().detach().cpu().numpy()\n )\n return super().on_validation_epoch_end()\n\n# ====================================================\n# dataloader\n# ====================================================\n\n\ndef get_dataloader(\n texts,\n labels,\n transform,\n collate_fn,\n loader_params,\n):\n ds = Dataset(texts, labels, transform)\n dl = DataLoader(ds, collate_fn=collate_fn, **loader_params)\n\n return dl\n\n\ndef get_test_dataloader(\n texts,\n transform,\n collate_fn,\n loader_params,\n):\n ds = TestDataset(texts, transform)\n dl = DataLoader(ds, collate_fn=collate_fn, **loader_params)\n\n return dl\n\n# ====================================================\n# plots\n# ====================================================\n\n\ndef plot_dist(ytrue, ypred, filename):\n plt.figure()\n plt.hist(ytrue, alpha=0.5, bins=100)\n plt.hist(ypred, alpha=0.5, bins=100)\n plt.legend([\"ytrue\", \"ypred\"])\n plt.savefig(filename)\n plt.close()\n\n\ndef plot_training_curve(train_history, valid_history, filename):\n plt.figure()\n legends = []\n plt.plot(range(len(train_history)), train_history,\n marker=\".\", color=\"skyblue\")\n legends.append(\"train\")\n plt.plot(range(len(valid_history)), valid_history,\n marker=\".\", color=\"orange\")\n legends.append(\"valid\")\n plt.legend(legends)\n plt.savefig(filename)\n plt.close()\n\n\ndef plot_lr_scheduler(lr_history, filename, steps_per_epoch, accumulate_grad_batches):\n epoch_index = [\n step for step in range(len(lr_history)) if step % (steps_per_epoch * accumulate_grad_batches) == 0\n ]\n plt.figure()\n plt.plot(range(len(lr_history)), lr_history)\n plt.plot(\n [i for i in range(len(lr_history)) if i in epoch_index],\n [lr_history[i] for i in range(len(lr_history)) if i in epoch_index],\n color=\"orange\",\n linestyle=\"None\",\n marker=\"D\"\n )\n plt.xlabel(\"step\")\n plt.ylabel(\"lr\")\n plt.legend([\"lr\", \"epoch\"])\n plt.savefig(filename)\n plt.close()\n\n\n# ====================================================\n# util\n# ====================================================\n\ndef detect_device():\n import torch\n if torch.cuda.is_available():\n return {\"gpus\": 1}\n return {\"gpus\": None}\n\n# ====================================================\n# inference\n# ====================================================\n\n\ndef inference(\n test_ids,\n test_texts,\n trainer,\n model,\n tokenizer_name,\n tokenizer_max_length,\n loader_params,\n):\n print(f\"inference check: model.model.training={model.model.training}\")\n transform = Transform(\n data=\"test\",\n tokenizer_name=tokenizer_name,\n tokenizer_max_length=tokenizer_max_length\n )\n dataloader = get_test_dataloader(\n texts=test_texts,\n transform=transform.get_transform_fn(),\n collate_fn=transform.get_collate_fn(),\n loader_params=loader_params,\n )\n prediction = torch.cat(\n trainer.predict(\n model=model,\n dataloaders=dataloader,\n )\n ).detach().cpu().numpy()\n df = pd.DataFrame()\n df[\"id\"] = test_ids\n df[\"target\"] = prediction\n return df\n\n# ====================================================\n# train fold\n# ====================================================\n\n\ndef train_fold(\n fold,\n train_texts,\n train_labels,\n train_idx,\n valid_idx,\n logger,\n CFG\n):\n print(\"#\" * 30, f\"fold: {fold}\", \"#\" * 30)\n # device\n device_params = detect_device()\n\n model = Model(\n basemodel_name=CFG.model.name,\n multisample_dropout=CFG.model.multisample_dropout,\n multisample_dropout_rate=CFG.model.multisample_dropout_rate,\n model_params=CFG.model.params,\n loss_name=CFG.loss.name,\n loss_params=CFG.loss.params,\n optimizer_name=CFG.optimizer.name,\n optimizer_params=CFG.optimizer.params,\n scheduler_name=CFG.scheduler.name,\n scheduler_params=CFG.scheduler.params,\n scheduler_interval=CFG.scheduler.interval,\n )\n\n transform_train = Transform(\n data=\"train\",\n tokenizer_name=CFG.tokenizer.name,\n tokenizer_max_length=CFG.tokenizer.max_length,\n )\n\n train_dataloader = get_dataloader(\n texts=train_texts[train_idx],\n labels=train_labels[train_idx],\n transform=transform_train.get_transform_fn(),\n collate_fn=transform_train.get_collate_fn(),\n loader_params=CFG.loader.train,\n )\n valid_dataloader = get_dataloader(\n texts=train_texts[valid_idx],\n labels=train_labels[valid_idx],\n transform=transform_train.get_transform_fn(),\n collate_fn=transform_train.get_collate_fn(),\n loader_params=CFG.loader.train,\n )\n\n CHECKPOINT_NAME = \\\n f\"fold{fold}_{CFG.model.name}_\"\"{epoch:02d}_{valid_rmse:.3f}\"\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n filename=CHECKPOINT_NAME,\n monitor='valid_rmse',\n mode='min',\n save_top_k=1,\n save_weights_only=True,\n )\n\n CFG.training.steps_per_epoch = (\n len(train_dataloader) + CFG.training.accumulate_grad_batches - 1\n ) // CFG.training.accumulate_grad_batches\n\n trainer = pl.Trainer(\n max_epochs=CFG.training.epochs,\n logger=logger,\n benchmark=True,\n deterministic=True,\n callbacks=[checkpoint_callback],\n num_sanity_val_steps=0,\n accumulate_grad_batches=CFG.training.accumulate_grad_batches,\n precision=CFG.training.precision,\n stochastic_weight_avg=CFG.training.stochastic_weight_avg,\n **device_params,\n )\n\n trainer.fit(\n model,\n train_dataloader=train_dataloader,\n val_dataloaders=valid_dataloader,\n )\n\n del train_dataloader, valid_dataloader, trainer, transform_train\n gc.collect()\n torch.cuda.empty_cache()\n pl.utilities.memory.garbage_collection_cuda()\n\n plot_training_curve(\n model.history[\"train_rmse\"],\n model.history[\"valid_rmse\"],\n filename=f\"training_curve_fold{fold}.png\"\n )\n\n plot_lr_scheduler(\n model.history[\"lr\"],\n filename=f\"lr_scheduler_fold{fold}.png\",\n steps_per_epoch=CFG.training.steps_per_epoch,\n accumulate_grad_batches=CFG.training.accumulate_grad_batches,\n )\n\n model.load_state_dict(\n torch.load(checkpoint_callback.best_model_path)[\"state_dict\"],\n )\n\n model.freeze()\n model.eval()\n\n return model\n\n# ====================================================\n# for kaggle notebook inference\n# ====================================================\n\n\ndef inference_main(CFG, checkpoint_paths):\n os.chdir(CFG.dir.work_dir)\n # seed\n pl.seed_everything(CFG.general.seed)\n\n # device\n device_params = detect_device()\n\n pl.seed_everything(CFG.general.seed)\n test_df = pd.read_csv(\n os.path.join(\n CFG.dir.input_dir,\n \"test.csv\"\n )\n )\n test_df[\"target\"] = -1\n test_df[\"standard_error\"] = -1\n test_df = preprocess_df(test_df)\n\n test_ids = test_df[\"id\"].values\n test_texts = test_df[\"excerpt\"].values\n\n predict_trainer = pl.Trainer(\n precision=CFG.training.precision,\n logger=None,\n **device_params,\n )\n\n predictions_df = pd.DataFrame()\n\n for checkpoint_path in checkpoint_paths:\n\n model = Model.load_from_checkpoint(\n checkpoint_path,\n basemodel_name=CFG.model.name,\n multisample_dropout=CFG.model.multisample_dropout,\n multisample_dropout_rate=CFG.model.multisample_dropout_rate,\n model_params=CFG.model.params,\n loss_name=CFG.loss.name,\n loss_params=CFG.loss.params,\n optimizer_name=CFG.optimizer.name,\n optimizer_params=CFG.optimizer.params,\n scheduler_name=CFG.scheduler.name,\n scheduler_params=CFG.scheduler.params,\n scheduler_interval=CFG.scheduler.interval,\n )\n\n model.freeze()\n model.eval()\n\n prediction = inference(\n test_ids,\n test_texts,\n trainer=predict_trainer,\n model=model,\n tokenizer_name=CFG.tokenizer.name,\n tokenizer_max_length=CFG.tokenizer.max_length,\n loader_params=CFG.loader.test,\n )\n\n predictions_df = pd.concat([predictions_df, prediction], axis=0)\n\n predictions_df = \\\n predictions_df.groupby(\"id\").mean().reset_index(drop=False)\n predictions_df.sort_values(\"id\", inplace=True)\n predictions_df.reset_index(drop=True, inplace=True)\n predictions_df.to_csv(\"submission.csv\", index=False)\n\n\ndef main(CFG):\n os.chdir(CFG.dir.work_dir)\n # seed\n pl.seed_everything(CFG.general.seed)\n\n # device\n device_params = detect_device()\n\n # prepare df\n train_df = pd.read_csv(\n os.path.join(\n CFG.dir.input_dir,\n \"train.csv\"\n )\n )\n train_df = preprocess_df(train_df)\n\n if CFG.general.debug:\n train_df = train_df.sample(200)\n CFG.training.epochs = 5\n CFG.training.n_fold = 3\n CFG.log.mlflow.experiment_name = \"debug\"\n\n # logger\n MLFLOW_LOGGER = pl.loggers.MLFlowLogger(\n experiment_name=CFG.log.mlflow.experiment_name,\n save_dir=CFG.log.mlflow.save_dir,\n )\n\n train_texts = train_df[\"excerpt\"].values\n train_labels = train_df[\"target\"].values\n\n # train\n oof_df = pd.DataFrame()\n\n # fold\n kf = GroupShuffleSplit(\n CFG.training.n_fold,\n random_state=CFG.general.seed,\n )\n fold_x = train_texts\n fold_y = pd.cut(train_labels, 30).codes\n fold_group = train_df[\"id\"].values\n\n for fold, (train_idx, valid_idx) in enumerate(kf.split(fold_x, fold_y, fold_group)):\n model = train_fold(\n fold,\n train_texts,\n train_labels,\n train_idx,\n valid_idx,\n logger=MLFLOW_LOGGER,\n CFG=CFG\n )\n\n predict_trainer = pl.Trainer(\n precision=CFG.training.precision,\n logger=None,\n callbacks=None,\n\n **device_params,\n )\n\n oof_ids = train_df[\"id\"].values[valid_idx]\n oof_texts = train_df[\"excerpt\"].values[valid_idx]\n oof_prediction = inference(\n oof_ids,\n oof_texts,\n trainer=predict_trainer,\n model=model,\n tokenizer_name=CFG.tokenizer.name,\n tokenizer_max_length=CFG.tokenizer.max_length,\n loader_params=CFG.loader.test,\n )\n\n oof_df = pd.concat([oof_df, oof_prediction], axis=0)\n\n del predict_trainer, model, oof_ids, oof_texts\n gc.collect()\n torch.cuda.empty_cache()\n pl.utilities.memory.garbage_collection_cuda()\n\n oof_df = oof_df.groupby(\"id\").mean().reset_index(drop=False)\n oof_df.sort_values(\"id\", inplace=True)\n oof_df.reset_index(drop=True, inplace=True)\n oof_df.to_csv(\"oof.csv\", index=False)\n\n validation_score = mean_squared_error(\n train_df[\"target\"], oof_df[\"target\"], squared=False\n )\n print(f\"validation score: {validation_score}\")\n\n plot_dist(\n train_df[\"target\"],\n oof_df[\"target\"],\n filename=\"oof_dist.png\"\n )\n\n MLFLOW_LOGGER.log_hyperparams(CFG)\n open(\"config.yaml\", \"w\").write(OmegaConf.to_yaml(CFG))\n\n MLFLOW_LOGGER.log_metrics({\"validation_score\": validation_score})\n if globals().get(\"__file__\"):\n MLFLOW_LOGGER.experiment.log_artifact(MLFLOW_LOGGER._run_id, __file__)\n MLFLOW_LOGGER.experiment.log_artifact(MLFLOW_LOGGER._run_id, \"config.yaml\")\n MLFLOW_LOGGER.experiment.log_artifact(MLFLOW_LOGGER._run_id, \"oof.csv\")\n MLFLOW_LOGGER.experiment.log_artifact(\n MLFLOW_LOGGER._run_id, \"oof_dist.png\")\n for fold in range(CFG.training.n_fold):\n MLFLOW_LOGGER.experiment.log_artifact(\n MLFLOW_LOGGER._run_id, f\"training_curve_fold{fold}.png\"\n )\n MLFLOW_LOGGER.experiment.log_artifact(\n MLFLOW_LOGGER._run_id, f\"lr_scheduler_fold{fold}.png\"\n )\n\n # inference check\n CHECKPOINT_PATHS = glob.glob(\n os.path.join(\n CFG.log.mlflow.save_dir,\n MLFLOW_LOGGER.experiment_id,\n MLFLOW_LOGGER._run_id,\n \"checkpoints\",\n \"*.ckpt\"\n )\n )\n\n # check inference main\n inference_main(CFG, CHECKPOINT_PATHS)\n\n\nif __name__ == \"__main__\":\n CFG = OmegaConf.load(\n \"/workspaces/commonlitreadabilityprize/config/sentence_config.yaml\"\n )\n main(CFG)\n\n\n# %%\n","sub_path":"src/sentence_work.py","file_name":"sentence_work.py","file_ext":"py","file_size_in_byte":23167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"398664828","text":"# -*- coding:utf-8 -*-\nimport os\nimport time\nfrom PIL import Image\nfrom ..models import DBSession\nfrom ..models.picture import Picture\nfrom .. import ApplicationPath\n\n\nclass ImageManager:\n image = None\n thumbnail = None\n\n @classmethod\n def build(cls, raw_image):\n ins = cls()\n ins.image = ins._resize(raw_image, 3072)\n ins.thumbnail = ins._resize(ins.image, 512)\n return ins\n\n @classmethod\n def create(cls, file, filename):\n ex = Picture.PictureExtension.get_from_filename(filename)\n if ex is None:\n return None\n tmp_path = os.path.join(ApplicationPath, \"tmp\", str(time.time()))\n with open(tmp_path, 'wb') as f:\n f.write(file.read())\n raw_img = Image.open(tmp_path)\n image = cls.build(raw_img)\n picture = Picture.new(ex)\n image._save(picture)\n DBSession.add(picture)\n DBSession.merge(picture)\n return picture\n\n @classmethod\n def delete_image(cls, picture):\n os.remove(picture.picture_path)\n os.remove(picture.thumbnail_path)\n DBSession.delete(picture)\n\n @classmethod\n def _resize(cls, image, longer_side_max_px):\n image = image.copy()\n size = image.size\n orientation = int(size[0] > size[1])\n if size[1 - orientation] > longer_side_max_px:\n new_size = list(size)\n new_size[1 - orientation] = longer_side_max_px\n new_size[orientation] = int(size[orientation] * size[1 - orientation] / longer_side_max_px)\n image.thumbnail(tuple(new_size))\n return image\n\n def _save(self, picture):\n self.image.save(picture.picture_path)\n self.thumbnail.save(picture.thumbnail_path)\n","sub_path":"shoyu/services/image_manager.py","file_name":"image_manager.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"461790392","text":"import numpy as np\nimport cv2\n\n\nLINE_LENGTH = 5000\n\n\ndef show(i):\n cv2.imshow('image', i)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef add_lines(lines, i):\n for line in lines:\n rho = line[0][0]\n theta = line[0][1]\n\n a = np.cos(theta)\n b = np.sin(theta)\n\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + LINE_LENGTH*(-b))\n y1 = int(y0 + LINE_LENGTH*(a))\n x2 = int(x0 - LINE_LENGTH*(-b))\n y2 = int(y0 - LINE_LENGTH*(a))\n\n cv2.line(i, (x1,y1), (x2,y2), (0,0,255), 1)\n\n\nimg = cv2.imread('design1.png', 1)\ngrey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ncorners = cv2.goodFeaturesToTrack(grey, 50, 0.01, 100)\n\nfor i in corners:\n x,y = i.ravel()\n cv2.circle(img, (x,y), 3, (0,0,255), -1)\n\nshow(img)","sub_path":"cornerDetection.py","file_name":"cornerDetection.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"255047246","text":"import sys\nfrom math import pi, sin, cos, log\n\ndef rad(d): return (d/360.0) * 2 * pi\n\n# format: lat,lon\ndef project(p0,l0,v=False):\n a = 6378206.4 # m\n e = 0.0822719\n e_sq = 0.00676866\n \n # standard calcs\n P0 = rad(p0) \n L0 = rad(l0)\n\n # reference latitudes\n p1,p2 = 29.5, 45.5\n if v: print('p1: %.1f' % p1)\n if v: print('p2: %.1f' % p2)\n P1,P2 = rad(p1),rad(p2)\n \n S1 = sin(P1)\n S2 = sin(P2)\n C1 = cos(P1)\n C2 = cos(P2)\n \n # ------\n \n def get_q(p):\n P = rad(p)\n SP = sin(P)\n f = e * SP\n \n g1 = 1 - e_sq\n g2 = SP/(1-f**2)\n g3 = 1.0/(2*e)\n g4 = log((1 - f)/(1 + f))\n return g1 * (g2 - (g3 * g4))\n \n q0 = get_q(p0)\n q1 = get_q(p1)\n q2 = get_q(p2)\n \n if v: print('q0: %.7f' % q0)\n if v: print('q1: %.7f' % q1)\n if v: print('q2: %.7f' % q2)\n \n def get_m(p):\n P = rad(p)\n SP = sin(P)\n f = (1 - e_sq * SP**2)**0.5\n return cos(P)/f\n \n m1 = get_m(p1)\n m2 = get_m(p2)\n \n if v: print('m1: %.7f' % m1)\n if v: print('m2: %.7f' % m2)\n\n n = (m1**2 - m2**2)/(q2 - q1)\n if v: print('n: %.7f' % n)\n \n C = m1**2 + n * q1\n if v: print('C: %.7f' % C)\n \n def rho(q):\n part = (C - (n * q))**0.5\n return a * part / n\n \n R0 = rho(q0)\n if v: print('R0: %.1f' % R0)\n \n # specific calcs\n def f(p,l):\n P = rad(p)\n q = get_q(p)\n if v: print('q: %.7f' % q)\n \n R = rho(q)\n if v: print('R: %.1f' % R)\n \n theta = n * (l - l0)\n if v: print('t: %.7f' % theta)\n T = rad(theta)\n \n x = R * sin(T)\n y = R0 - R * cos(T)\n return x,y\n return f\n\ndef test1():\n print('test1')\n p0 = 23\n l0 = -96\n print('p0: %.1f' % p0)\n print('l0: %.1f' % l0)\n \n g = project(p0,l0)\n \n p = 35\n l = -75\n print('p: %.1f' % p)\n print('l: %.1f' % l)\n\n x,y = g(p,l)\n \n print('x: %.1f' % x)\n print('y: %.1f' % y)\n\nif __name__ == \"__main__\":\n test1()\n \n ","sub_path":"data/ellipsoid.py","file_name":"ellipsoid.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"526157120","text":"from PyQt5 import QtOpenGL, QtWidgets\nimport ModernGL as GL\nimport struct, time\n\ncontext = {\n\t'width' : 800,\n\t'height' : 600,\n\n\t'dmx' : 0.0,\n\t'dmy' : 0.0,\n\t'ds' : 0.0,\n\n\t'mx': 0,\n\t'my' : 0,\n\t's' : -1,\n}\n\nclass QGLControllerWidget(QtOpenGL.QGLWidget):\n\tdef __init__(self, format = None):\n\t\tsuper(QGLControllerWidget, self).__init__(format, None)\n\n\tdef initializeGL(self):\n\t\ttry:\n\t\t\tGL.InitializeModernGL()\n\t\t\tGL.Viewport(0, 0, context['width'], context['height'])\n\n\t\t\tvert = GL.NewVertexShader('''\n\t\t\t\t#version 330\n\n\t\t\t\tuniform vec2 pos;\n\t\t\t\tuniform float zoom;\n\n\t\t\t\tin vec2 vert;\n\t\t\t\tout vec2 textcoord;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tgl_Position = vec4(vert, 0.0, 1.0);\n\t\t\t\t\ttextcoord = ((vert + pos) * zoom) / 2.0 + vec2(0.5, 0.5);\n\t\t\t\t}\n\t\t\t''')\n\n\t\t\tfrag = GL.NewFragmentShader('''\n\t\t\t\t#version 330\n\t\t\t\t\n\t\t\t\tin vec2 textcoord;\n\t\t\t\tout vec4 color;\n\n\t\t\t\tuniform int iter;\n\n\t\t\t\tvoid main() {\n\t\t\t\t\tvec2 z = vec2(3.0 * (textcoord.x - 0.5), 2.0 * (textcoord.y - 0.5));\n\t\t\t\t\tvec2 c = vec2(0.0, 1.0);\n\n\t\t\t\t\tint i;\n\t\t\t\t\tfor(i = 0; i < iter; i++) {\n\t\t\t\t\t\tfloat x = (z.x * z.x - z.y * z.y) + c.x;\n\t\t\t\t\t\tfloat y = (z.y * z.x + z.x * z.y) + c.y;\n\t\t\t\t\t\tif ((x * x + y * y) > 4.0) break;\n\t\t\t\t\t\tz.x = x;\n\t\t\t\t\t\tz.y = y;\n\t\t\t\t\t}\n\n\t\t\t\t\tfloat cm = fract((i == iter ? 0.0 : float(i)) * 10 / iter);\n\t\t\t\t\tcolor = vec4(fract(cm + 0.0 / 3.0), fract(cm + 1.0 / 3.0), fract(cm + 2.0 / 3.0), 1.0);\n\t\t\t\t}\n\t\t\t''')\n\n\t\t\tprog, iface = GL.NewProgram([vert, frag])\n\t\t\tcontext['pos'] = iface['pos']\n\t\t\tcontext['zoom'] = iface['zoom']\n\n\t\t\tvbo = GL.NewVertexBuffer(struct.pack('8f', -1.0, -1.0, -1.0, 1.0, 1.0, -1.0, 1.0, 1.0))\n\t\t\tcontext['vao'] = GL.NewVertexArray(prog, vbo, '2f', ['vert'])\n\n\t\t\tGL.SetUniform(iface['iter'], 100)\n\t\t\t\n\t\texcept GL.Error as error:\n\t\t\tprint(error)\n\t\t\texit(1)\n\n\tdef paintGL(self):\n\t\tz = 0.5 ** context['s']\n\t\tcontext['mx'] += context['dmx'] * z\n\t\tcontext['my'] += context['dmy'] * z\n\t\tcontext['s'] += context['ds']\n\t\tGL.Clear(240, 240, 240)\n\n\t\tGL.SetUniform(context['pos'], context['mx'] / z, context['my'] / z)\n\t\tGL.SetUniform(context['zoom'], z)\n\t\tGL.RenderTriangleStrip(context['vao'], 4)\n\t\tself.update()\n\n\tdef keyPressEvent(self, event):\n\t\tif event.key() == ord('D'):\n\t\t\tcontext['dmx'] += 0.01\n\t\tif event.key() == ord('A'):\n\t\t\tcontext['dmx'] -= 0.01\n\t\tif event.key() == ord('W'):\n\t\t\tcontext['dmy'] += 0.01\n\t\tif event.key() == ord('S'):\n\t\t\tcontext['dmy'] -= 0.01\n\t\tif event.key() == ord('Q'):\n\t\t\tcontext['ds'] += 0.01\n\t\tif event.key() == ord('E'):\n\t\t\tcontext['ds'] -= 0.01\n\n\tdef keyReleaseEvent(self, event):\n\t\tif event.key() == ord('D'):\n\t\t\tcontext['dmx'] -= 0.01\n\t\tif event.key() == ord('A'):\n\t\t\tcontext['dmx'] += 0.01\n\t\tif event.key() == ord('W'):\n\t\t\tcontext['dmy'] -= 0.01\n\t\tif event.key() == ord('S'):\n\t\t\tcontext['dmy'] += 0.01\n\t\tif event.key() == ord('Q'):\n\t\t\tcontext['ds'] -= 0.01\n\t\tif event.key() == ord('E'):\n\t\t\tcontext['ds'] += 0.01\n\nclass GLCanvas(QtWidgets.QMainWindow):\n\n\tdef keyPressEvent(self, event):\n\t\tself.widget.keyPressEvent(event)\n\n\tdef keyReleaseEvent(self, event):\n\t\tself.widget.keyReleaseEvent(event)\n\n\tdef __init__(self, parent = None):\n\t\tsuper(GLCanvas, self).__init__(parent)\n\n\t\tfmt = QtOpenGL.QGLFormat()\n\t\tfmt.setVersion(3, 3)\n\t\tfmt.setProfile(QtOpenGL.QGLFormat.CoreProfile)\n\t\tfmt.setSampleBuffers(True)\n\n\t\tself.setFixedSize(context['width'], context['height'])\n\t\tself.widget = QGLControllerWidget(fmt)\n\t\tself.setCentralWidget(self.widget)\n\t\tself.show()\n\napp = QtWidgets.QApplication([])\nwindow = GLCanvas()\nwindow.show()\napp.exec_()\n","sub_path":"Examples/PyQt5/JuliaFractal-Zoom.py","file_name":"JuliaFractal-Zoom.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"440624158","text":"from handy.accountDetails import AccountDetail\nfrom handy.useful_functions import grab_username_from_email\nfrom pages.stark import Stark\n\nnot_okay = True\nemail_id=\"\"\nwhile not_okay:\n email_id = input(\"Enter your email-ID only gmail : \")\n if email_id.split('@')[1] == 'gmail.com':\n print(\"Thank you!\")\n not_okay = False\nusername = grab_username_from_email(email_id)\naccountdetail = AccountDetail(get_state=\"ohio\")\nstudent_info = accountdetail.get_student_info()\nprint(student_info)\nstark = Stark(username, student_info)\nstark.start_process()\n","sub_path":"run_me.py","file_name":"run_me.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"185744013","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom .models import Post, Category, PostComment, Tag, PostMedia, UserProfile\nfrom .forms import PostForm, EditPostForm, NewPostForm\nfrom django.urls import reverse_lazy, reverse\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nimport json\n# Create your views here.\n\nclass HomeView(ListView):\n model = Post\n template_name = \"home.html\"\n ordering = ['-id']\n paginate_by = 5 # for pagination\n context_object_name = 'posts' # for pagination\n \n\n def get_context_data(self, *args, **kwargs):\n cat_menu = Category.objects.all().values('name', 'slug')\n context = super(HomeView, self).get_context_data(*args, **kwargs)\n context[\"cat_menu\"] = cat_menu\n return context\n\nclass ArticleDetailView(DetailView):\n model = Post\n template_name = \"article_detail.html\"\n \n def get_context_data(self, *args, **kwargs):\n cat_menu = Category.objects.all().values('name', 'slug')\n context = super(ArticleDetailView, self).get_context_data(*args, **kwargs)\n context[\"cat_menu\"] = cat_menu\n\n # Get the total likes of the post\n thisArticle = get_object_or_404(Post, id=self.kwargs['pk'])\n total_likes = thisArticle.totalLikes()\n context[\"total_likes\"] = total_likes\n\n liked = False\n if thisArticle.likes.filter(id=self.request.user.id).exists():\n liked = True\n context[\"liked\"] = liked\n\n return context\n\nclass addPostView(CreateView):\n model = Post\n template_name = \"new_post.html\"\n form_class = NewPostForm\n #fields = '__all__'\n #fields = ('title', 'body')\n\n def form_valid(self, form):\n user = self.request.user\n tags_obj = []\n files_obj = []\n postmedias = self.request.FILES.getlist('postmedias')\n title = form.cleaned_data.get('title')\n category = form.cleaned_data.get('category')\n body = form.cleaned_data.get('body')\n tags_list = list(form.cleaned_data.get('tags').split(','))\n\n for tag in tags_list:\n t, created = Tag.objects.get_or_create(title=tag.strip())\n tags_obj.append(t)\n\n for file in postmedias:\n file_instance = PostMedia(file=file, user=self.request.user)\n file_instance.save()\n files_obj.append(file_instance)\n\n post, created = Post.objects.update_or_create(author=user, title=title, category=category, body=body)\n post.tags.set(tags_obj)\n post.postmedias.set(files_obj)\n\n post.save() \n return HttpResponseRedirect(reverse_lazy('Home'))\n\ndef newPostView(request):\n \n user = request.user\n tags_obj = []\n files_obj = []\n \n if request.method == 'POST':\n form = NewPostForm(request.POST, request.FILES)\n if form.is_valid():\n\n postmedias = self.request.FILES.getlist('postmedias')\n title = form.cleaned_data.get('title')\n category = form.cleaned_data.get('category')\n body = form.cleaned_data.get('body')\n tags_list = list(form.cleaned_data.get('tags').split(','))\n\n for tag in tags_list:\n t, created = Tag.objects.get_or_create(title=tag.strip())\n tags_obj.append(t)\n\n for file in postmedias:\n file_instance = PostMedia(file=file, user=self.request.user)\n file_instance.save()\n files_obj.append(file_instance)\n\n post, created = Post.objects.update_or_create(author=user, title=title, category=category, body=body)\n post.tags.set(tags_obj)\n post.postmedias.set(files_obj)\n\n post.save() \n return HttpResponseRedirect(reverse_lazy('Home'))\n else:\n form = NewPostForm()\n\n context = {\n 'form': form\n }\n \n return render(request, 'new_post.html', context)\n\n \nclass updatePostView(UpdateView):\n model = Post\n template_name = \"update_post.html\"\n form_class = NewPostForm\n #fields = '__all__'\n #fields = ('title', 'body')\n\n def get_context_data(self, **kwargs):\n context = super(updatePostView, self).get_context_data(**kwargs)\n # Get the total likes of the post\n thisArticle = get_object_or_404(Post, id=self.kwargs['pk'])\n tags_line = thisArticle.tag_str()\n context['form'] = self.form_class(instance=thisArticle, initial={'tags':tags_line})\n context['thisPost'] = thisArticle\n\n return context\n\n def form_valid(self, form):\n user = self.request.user\n tags_obj = []\n files_obj = []\n\n postmedias = self.request.FILES.getlist('postmedias')\n title = form.cleaned_data.get('title')\n category = form.cleaned_data.get('category')\n body = form.cleaned_data.get('body')\n tags_list = list(form.cleaned_data.get('tags').split(','))\n\n for tag in tags_list:\n t, created = Tag.objects.get_or_create(title=tag.strip())\n tags_obj.append(t)\n\n post, created = Post.objects.update_or_create(id=self.kwargs['pk'])\n post.author = user\n\n for file in post.postmedias.all():\n files_obj.append(file)\n \n for file in postmedias:\n file_instance = PostMedia(file=file, user=self.request.user)\n file_instance.save()\n files_obj.append(file_instance)\n\n #post.header_image = header_image\n #if post.header_image == False:\n # post.header_image = None\n\n post.title = title\n post.category = category\n post.body = body\n post.tags.set(tags_obj)\n post.postmedias.set(files_obj)\n\n post.save() \n return HttpResponseRedirect(reverse_lazy('Home'))\n\n\n \n\nclass deletePostView(DeleteView):\n model = Post\n template_name = \"delete_post.html\"\n success_url = reverse_lazy('Home')\n\n\nclass addCategoryView(CreateView):\n model = Category\n template_name = \"add_category.html\"\n #form_class = PostForm\n #fields = '__all__'\n fields = ('name',)\n\ndef categoryPostView(request, cats):\n cats_list = Category.objects.filter(slug=cats).values_list('name')\n cats_name = cats_list[0][0]\n\n post_full = Post.objects.filter(category=cats_name).order_by('-post_date')\n cat_menu = Category.objects.all().values('name', 'slug')\n \n #numbers_list = range(1, 1000)\n page = request.GET.get('page', 1)\n paginator = Paginator(post_full, 5)\n try:\n post_list = paginator.page(page)\n except PageNotAnInteger:\n post_list = paginator.page(1)\n except EmptyPage:\n post_list = paginator.page(paginator.num_pages)\n\n return render(request, 'category_post.html', {'post_list': post_list, 'cats':cats_name, 'cat_menu':cat_menu})\n\ndef likeView(request, pk):\n post = get_object_or_404(Post, id=request.POST.get('post_id'))\n\n liked = False\n if post.likes.filter(id = request.user.id).exists():\n liked = False\n post.likes.remove(request.user)\n else:\n liked = True\n post.likes.add(request.user)\n return HttpResponseRedirect(reverse('Article_Detail', args=[str(pk)]))\n\n\ndef AddPostCommentView(request, pk):\n \n post = get_object_or_404(Post, id=request.POST.get('post_id')) \n postcomment, created = PostComment.objects.update_or_create(post=post, commenter=request.user, comment=request.POST.get('postcomment'))\n postcomment.save()\n\n return HttpResponseRedirect(reverse('Article_Detail', args=[str(pk)]))\n\n\ndef like_button(request):\n if request.method ==\"POST\":\n if request.POST.get(\"operation\") == \"like_submit\" and request.is_ajax():\n like_post_id = request.POST.get(\"post_id\",None)\n post_id=like_post_id.split(\"_\")[1]\n post = get_object_or_404(Post, id=post_id)\n if post.likes.filter(id = request.user.id).exists():\n liked = False\n post.likes.remove(request.user)\n else:\n liked = True\n post.likes.add(request.user)\n\n total_likes = post.totalLikes()\n \n ctx={\"likes_count\":total_likes,\"liked\":liked,\"post_id\":like_post_id}\n return HttpResponse(json.dumps(ctx), content_type='application/json')\n\ndef delimg_button(request):\n if request.method ==\"POST\":\n if request.POST.get(\"operation\") == \"delimg_submit\" and request.is_ajax():\n delimg_post_id = request.POST.get(\"post_id\",None)\n postmedia_id=delimg_post_id.split(\"_\")[1]\n postmedia = get_object_or_404(PostMedia, id=postmedia_id)\n postmedia.delete()\n \n ctx={\"postmedia_id\":postmedia_id}\n return HttpResponse(json.dumps(ctx), content_type='application/json')\n\n\ndef HomePageView(request, pk): \n post_full = Post.objects.filter(author=pk).order_by('-post_date')\n cat_menu = Category.objects.all().values('name', 'slug')\n\n userprofile = UserProfile.objects.get(user=pk)\n \n #numbers_list = range(1, 1000)\n page = request.GET.get('page', 1)\n paginator = Paginator(post_full, 5)\n\n try:\n post_list = paginator.page(page)\n except PageNotAnInteger:\n post_list = paginator.page(1)\n except EmptyPage:\n post_list = paginator.page(paginator.num_pages)\n\n return render(request, 'homepage.html', {'post_list': post_list, 'cat_menu':cat_menu, 'userprofile':userprofile, })\n\n\n","sub_path":"src/familypost/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"256975805","text":"import contextlib\nimport pathlib\nimport pickle\nfrom typing import Dict, Iterable, Iterator, List, NamedTuple, Optional, Tuple, \\\n Union\nimport warnings\n\nimport atomicwrites\n\nfrom . import pruning\nfrom .ops import Schedule\nfrom .specs import Spec\n\n\nclass CachedSchedule(NamedTuple):\n \"\"\"A container for schedules stored in the cache.\n\n Stores, along with the schedule itself, the cost of the schedule.\n \"\"\"\n\n schedule: Schedule\n cost: int\n\n\nclass _Rect(NamedTuple):\n \"\"\"Stores the best schedule for a region from its used memory to `caps`.\n\n Interpret a _Rect as a record of the best schedule that exists up to `caps`.\n That schedule is no longer the best as soon as any memory capacity is above\n its corresponding level in `caps` or below the memory used at that level by\n the schedule.\n \"\"\"\n\n spec: Spec\n schedule: Optional[CachedSchedule]\n caps: Tuple[int, ...]\n\n @property\n def peak_memory(self) -> Iterable[int]:\n \"\"\"Returns an iterable of peak memory used by the nested schedule.\n\n This is just a convenience accessor.\n \"\"\"\n assert self.schedule is not None\n return iter(self.schedule.schedule.peak_memory)\n\n\nclass ScheduleCache:\n _rects: Dict[Spec, List[_Rect]]\n\n def __init__(self):\n self._rects = {}\n\n def get(\n self, spec: Spec, memory_limits: pruning.MemoryLimits\n ) -> Optional[CachedSchedule]:\n \"\"\"Returns a CachedSchedule or None if no schedule exists below given caps.\n\n Raises KeyError if neither a schedule nor the fact that no schedule\n exists below that cap exists.\n \"\"\"\n if not isinstance(memory_limits, pruning.StandardMemoryLimits):\n # TODO: Add support for PipelineChildMemoryLimits\n warnings.warn(\n \"ScheduleCache only supports StandardMemoryLimits. Queries with\"\n \" other MemoryLimits implementations always miss.\"\n )\n raise KeyError(f\"'{str(spec)}'\")\n\n memory_caps = memory_limits.available\n for rect in self._rects[spec]:\n if rect.schedule is None:\n if all(q <= b for q, b in zip(memory_caps, rect.caps)):\n return None\n else:\n if all(\n a <= q <= b\n for a, q, b in zip(rect.peak_memory, memory_caps, rect.caps)\n ):\n return rect.schedule\n raise KeyError(f\"'{str(spec)}'\")\n\n def put(\n self,\n spec: Spec,\n schedule: Optional[CachedSchedule],\n memory_limits: pruning.MemoryLimits,\n ) -> None:\n assert schedule is None or spec == schedule.schedule.spec\n\n if not isinstance(memory_limits, pruning.StandardMemoryLimits):\n # TODO: Add support for PipelineChildMemoryLimits\n warnings.warn(\n \"ScheduleCache only supports StandardMemoryLimits. Puts with\"\n \" other MemoryLimits implementations always miss.\"\n )\n return\n\n memory_caps = memory_limits.available\n assert schedule is None or all(\n m <= c for m, c in zip(schedule.schedule.peak_memory, memory_caps)\n )\n rects = self._rects.setdefault(spec, [])\n\n for idx in range(len(rects)):\n if schedule is None:\n if rects[idx].schedule is None:\n rects[idx] = _Rect(\n spec,\n None,\n tuple(max(a, b) for a, b in zip(memory_caps, rects[idx].caps)),\n )\n return\n else:\n if rects[idx].schedule is None:\n continue\n if all(\n a <= b <= c\n for a, b, c in zip(\n rects[idx].peak_memory,\n schedule.schedule.peak_memory,\n rects[idx].caps,\n )\n ):\n rects[idx] = _Rect(\n spec,\n schedule,\n tuple(max(a, b) for a, b in zip(memory_caps, rects[idx].caps)),\n )\n return\n # TODO: Assert that there is at most one intersection\n\n # If we haven't returned at this point, then we didn't find a _Rect to\n # update, so add one.\n rects.append(_Rect(spec, schedule, memory_caps))\n\n def update(self, other: \"ScheduleCache\") -> None:\n for spec, rect_schedule, limits in other:\n self.put(spec, rect_schedule, limits)\n\n def specs(self) -> Iterable[Spec]:\n yield from self._rects.keys()\n\n def __iter__(\n self,\n ) -> Iterator[Tuple[Spec, Optional[CachedSchedule], pruning.MemoryLimits]]:\n for spec, rects in self._rects.items():\n for rect in rects:\n assert rect.spec == spec\n yield spec, rect.schedule, pruning.StandardMemoryLimits(rect.caps)\n\n\n@contextlib.contextmanager\ndef persistent_cache(path: Optional[Union[str, pathlib.Path]], save: bool = True):\n if path is None:\n yield ScheduleCache()\n return\n\n if isinstance(path, str):\n path = pathlib.Path(path)\n\n if path.exists():\n if not path.is_file():\n # TODO: Is ValueError the correct exception here?\n raise ValueError(f\"Expected a path to a file; got: {str(path)}\")\n with path.open(mode=\"rb\") as fo:\n cache = pickle.load(fo)\n else:\n cache = ScheduleCache()\n\n try:\n yield cache\n finally:\n if save:\n with atomicwrites.atomic_write(path, mode=\"wb\", overwrite=True) as fo:\n pickle.dump(cache, fo)\n","sub_path":"morello/search_cache.py","file_name":"search_cache.py","file_ext":"py","file_size_in_byte":5757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"277327934","text":"from django.http import HttpRequest\nfrom django.shortcuts import render, redirect\n\nfrom Models.Grado.FormsG import FormularioGrado\nfrom Models.Grado.models import Grado\n\nclass FormularioGradoViews(HttpRequest):\n\n # GRADO\n\n def indexg(request):\n grado = FormularioGrado()\n return render(request, \"GradoIndex.html\", {\"form\": grado})\n\n def procesar_formulariog(request):\n grado = FormularioGrado(request.POST)\n if grado.is_valid():\n grado.save()\n grado = FormularioGrado()\n return render(request, \"GradoIndex.html\", {\"form\": grado, \"mensaje\": 'OK'})\n\n def listar_grados(request):\n grados = Grado.objects.all()\n return render(request, \"ListaGrados.html\", {\"lb_grados\": grados})\n\n def eliminarG(request, id):\n Grado.objects.filter(id_grado=id).delete()\n return redirect(to=\"listarGrados\")\n\n def modificarG(request, id):\n MODG = Grado.objects.get(id_grado=id)\n data = {\n 'form': FormularioGrado(instance=MODG)\n }\n if request.method == 'POST':\n formulario = FormularioGrado(data=request.POST, instance=MODG)\n if formulario.is_valid():\n formulario.save()\n data['mensaje'] = \"Se ha actualizado el registro.\"\n data['form'] = formulario\n return render(request, 'Modificar_grado.html', data)","sub_path":"Models/Grado/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"238666231","text":"\"\"\"A class for the Lemke Howson algorithm\"\"\"\nimport warnings\nfrom itertools import cycle\n\n\nimport numpy as np\n\nfrom typing import List, Tuple, Union, Dict, Sequence, Set, Any\n\nfrom nashpy.integer_pivoting import (\n make_tableau,\n non_basic_variables,\n pivot_tableau,\n)\n\n\ndef shift_tableau(tableau: np.ndarray, shape: Tuple[int, int, int]) -> np.ndarray:\n \"\"\"\n Shift a tableau to ensure labels of pairs of tableaux coincide\n\n Parameters\n ----------\n tableau : array\n a tableau corresponding to a vertex of a polytope.\n shape : tuple\n the required shape of the tableau\n\n Returns\n -------\n array\n The shifted tableau\n \"\"\"\n return np.append(\n np.roll(tableau[:, :-1], shape[0], axis=1),\n np.ones((shape[0], 1)),\n axis=1,\n )\n\n\ndef tableau_to_strategy(\n tableau: np.ndarray, basic_labels: Set[Any], strategy_labels: Set[Any]\n) -> np.ndarray:\n \"\"\"\n Return a strategy vector from a tableau\n\n Parameters\n ----------\n tableau : array\n a tableau corresponding to a vertex of a polytope.\n basic_labels : set\n the set of basic labels.\n strategy_labels : set\n the set of labels that correspond to strategies.\n\n Returns\n -------\n array\n A strategy.\n \"\"\"\n vertex = []\n for column in strategy_labels:\n if column in basic_labels:\n for i, row in enumerate(tableau[:, column]):\n if row != 0:\n vertex.append(tableau[i, -1] / row)\n else:\n vertex.append(0)\n strategy = np.array(vertex)\n return strategy / sum(strategy)\n\n\ndef lemke_howson(\n A: np.ndarray, B: np.ndarray, initial_dropped_label: int = 0\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Obtain the Nash equilibria using the Lemke Howson algorithm implemented\n using integer pivoting.\n\n Algorithm implemented here is Algorithm 3.6 of [Nisan2007]_.\n\n 1. Start at the artificial equilibrium (which is fully labeled)\n 2. Choose an initial label to drop and move in the polytope for which\n the vertex has that label to the edge\n that does not share that label. (This is implemented using integer\n pivoting)\n 3. A label will now be duplicated in the other polytope, drop it in a\n similar way.\n 4. Repeat steps 2 and 3 until have Nash Equilibrium.\n\n Parameters\n ----------\n A : array\n The row player payoff matrix\n B : array\n The column player payoff matrix\n initial_dropped_label: int\n The initial dropped label.\n\n Returns\n -------\n Tuple\n An equilibria\n \"\"\"\n\n if np.min(A) <= 0:\n A = A + abs(np.min(A)) + 1\n if np.min(B) <= 0:\n B = B + abs(np.min(B)) + 1\n\n # build tableaux\n col_tableau = make_tableau(A)\n col_tableau = shift_tableau(col_tableau, A.shape)\n row_tableau = make_tableau(B.transpose())\n full_labels = set(range(sum(A.shape)))\n\n if initial_dropped_label in non_basic_variables(row_tableau):\n tableux = cycle((row_tableau, col_tableau))\n else:\n tableux = cycle((col_tableau, row_tableau))\n\n # First pivot (to drop a label)\n entering_label = pivot_tableau(next(tableux), initial_dropped_label)\n while (\n non_basic_variables(row_tableau).union(non_basic_variables(col_tableau))\n != full_labels\n ):\n entering_label = pivot_tableau(next(tableux), next(iter(entering_label)))\n\n row_strategy = tableau_to_strategy(\n row_tableau, non_basic_variables(col_tableau), range(A.shape[0])\n )\n col_strategy = tableau_to_strategy(\n col_tableau,\n non_basic_variables(row_tableau),\n range(A.shape[0], sum(A.shape)),\n )\n\n if row_strategy.shape != (A.shape[0],) and col_strategy.shape != (A.shape[0],):\n msg = \"\"\"The Lemke Howson algorithm has returned probability vectors of \nincorrect shapes. This indicates an error. Your game could be degenerate.\"\"\"\n\n warnings.warn(msg, RuntimeWarning)\n return row_strategy, col_strategy\n","sub_path":"src/nashpy/algorithms/lemke_howson.py","file_name":"lemke_howson.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"454644393","text":"i = 1\n\nwhile i < 5:\n print('Loop Running : Value Of i Is : {}'.format(i))\n i += 1\n\nn = int(input('Enter A Natural Number : '))\nsumm, i = 0, 1\nwhile i <= n:\n summ = summ + i\n i += 1\nprint(summ)\n\nno = int(input(\"Enter A Number Of Multiple Digits : \"))\nrem, ad = 0, 0\nwhile no>0 :\n rem = no%10\n ad = ad+rem\n no= no//10\n\nprint(ad)\n\n##Printing Count Of Characters In A String\nwhile True : # Infinite Loop : CTRL + C To Break The Loop\n my_name = input('Enter Your Name : ')\n my_name = my_name.lower()\n name_length = len(my_name)\n i = 0\n temp = ''\n while i < name_length - 1:\n if my_name[i] not in temp:\n temp = temp + my_name[i]\n\n p = my_name.count(my_name[i])\n print(f\"Count Of {my_name[i]} Is : {p} \\n\")\n i += 1\n\n\ny=5\nwhile y in range(10) :\n print(y)\n y+=1","sub_path":"learn_codes/Basics/while_loop.py","file_name":"while_loop.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"269762516","text":"#class practice\r\n\r\n\r\nclass Parrot:\r\n\r\n species = 'bird'\r\n\r\n def __init__(self, name, age):\r\n self.name = name\r\n self.age = age\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n #instantiate the parrot class\r\n blue = Parrot(\"blue\", 10)\r\n woo = Parrot(\"woo\", 10)\r\n\r\n #access the class attributes\r\n print(\"blue is a {}\".format(blue.__class__.species))\r\n print(\"woo is a {}\".format(woo.__class__.species))\r\n\r\n #access the instance attributes\r\n print(\"{} is {} year old\".format(blue.name, blue.age))\r\n print(\"{} is {} year old\".format(woo.name, woo.age))\r\n","sub_path":"exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"339755895","text":"from _global_constants import *\n## Server Info\nKAFKA_SERVER_PORT=KAFKA_SERVER + ':' + str(KAFKA_PORT)\nWEBAPP_SERVER_PORT=WEBAPP_SERVER + ':' + str(WEBAPP_PORT)\n\n## Topic constants\nMQTT_TOPIC_NAME=TOPIC_PREFIX + '-mqtt-topic'\nKAFKA_TOPIC_NAME=TOPIC_PREFIX + '-kafka-topic'\n\n## Simulator constants\nGENERATE_TIME_INTERVAL_SECS=5\n\n## Webpage\nLAST_N_MESSAGES=5\n","sub_path":"_constants.py","file_name":"_constants.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"539033360","text":"\"\"\"This program tests the hyperlinks for a given page\"\"\"\n\nfrom selenium import webdriver\nimport requests\n\nwebsite = 'https://www.section508.gov/about-us'\n\ndriver = webdriver.Chrome()\ndriver.get(website)\n\nlist_links = driver.find_elements_by_tag_name('a')\n\nissue_links = [] \nfor i in list_links:\n\tprint(i.text) \t\t\t\t\t# print link text\n\tprint(i.get_attribute('href')) \t# get urls\n\ttry:\n\t\tr = requests.get(i.get_attribute('href'))\n\t\tprint(r.status_code)\t\t# print status code\n\t\tif r.status_code is not 200:\n\t\t\tissue_links.append(i.get_attribute('href'))\n\texcept Exception as e:\n\t\tprint(f\"Unable to get status code for url: {i.get_attribute('href')}; {e}\")\n\tprint(\"*\" * 50)\n\ndriver.quit()\n\n# print list of links with issues\nprint(issue_links)\n\n\n\n\n","sub_path":"Test_HyperLinks.py","file_name":"Test_HyperLinks.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"465366630","text":"#library: https://github.com/pimoroni/mlx90640-library \nimport tkinter as tk\nimport MLX90640 as mlx\nimport math\n\n\n\n\ndef draw_rectangle(x, y, r, g, b):\n c.create_rectangle(x*10, y*10, (x*10)+10, (y*10)+10, fill=_from_rgb(r, g, b), outline='')\n \ndef _from_rgb(r, g, b):\n return \"#%02x%02x%02x\" % (r, g, b)\n\n\ndef temp_color(x, y, temp, limt_temp):\n t = temp - limt_temp\n t *= (765/ 12)\n if t >= 765:\n t = 764\n else:\n t = int(t)\n # print(\"Mapping: \"+ str(t))\n r = 0\n g = 0\n b = 0\n if t < 255:\n g = t\n b = 255\n if t >= 255 and t < 510:\n r = 255\n g = 255 - (t-255)\n if t >= 510:\n r = 255\n b = (t-510)\n draw_rectangle(x ,y , int(r), int(g), int(b))\n\nmlx.setup(32)\n\n\nwindows = tk.Tk()\nwindows.title('test')\nwindows.geometry('500x500')\n\nc = tk.Canvas(windows, width=800 , height=800)\nc.grid(row = 1, column = 0)\n\n\nlabel_frame = tk.Frame(windows)\nlabel_frame.grid(row=0, column=0, sticky='w')\nlow_label = tk.Label(label_frame, text=\"Lowest Temp: \")\nlow_label.grid(row= 0, column=0, sticky='w')\n\nhigh_label = tk.Label(label_frame, text=\"Highest Temp: \")\nhigh_label.grid(row=1, column=0, sticky='w')\n\ncenter_label = tk.Label(label_frame)\ncenter_label.grid(row=2, column=0, sticky='w')\n\ndef loop_Process():\n print(\"Scanning\")\n try: \n frame = mlx.get_frame()\n except ValueError:\n print(\"ReadError\") \n x = 0\n y = 0\n low_temp = 1000.0\n high_temp = -100\n for i in frame:\n if float(i) < low_temp:\n low_temp = i\n if float(i) > high_temp:\n high_temp = i\n\n for t in range(0, len(frame)):\n # print(frame[t])\n if math.isnan(frame[t]) == True:\n frame[t] = low_temp\n temp_color(x, y, frame[t], low_temp)\n x += 1\n if x > 31:\n y += 1\n x = 0\n low_label.configure(text=('Lowest Temp=' + str(\"%2f\" % low_temp)))\n high_label.configure(text=('Highest Temp= '+ str(\"%2f\" % high_temp)))\n center_label.configure(text=('Center Temp= '+ str(\"%2f\" % frame[384])))\n windows.after(5, loop_Process)\n \n\nwindows.after(5, loop_Process)\n\nwindows.mainloop()\nmlx.cleanup()\n","sub_path":"Lab4/xGui.py","file_name":"xGui.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"335773181","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 14 00:48:13 2020\n\n@author: bhargav Pandya\n\"\"\"\n\n\nimport cv2\n\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nleft = cv2.CascadeClassifier('left_eye.xml')\nright = cv2.CascadeClassifier('right_eye.xml')\n\nimg = cv2.imread('C:\\\\Users\\\\bhargav Pandya\\\\Desktop\\\\Capture.PNG')\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\nfaces = face_cascade.detectMultiScale(gray, 1.1, 10)\nlefteye = left.detectMultiScale(gray, 1.3, 5)\nrighteye = right.detectMultiScale(gray, 1.1, 10)\n\n\n\n\nfor (x,y,w,h) in faces:\n \n\n cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n\n roi = img[y:y+h, x:x+w]\n \n \n lefteye = left.detectMultiScale(img, 1.3, 5)\n righteye = right.detectMultiScale(img, 1.1, 10)\n \n for (ex,ey,ew,eh) in righteye:\n cv2.rectangle(img, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)\n\n for (lx,ly,lw,lh) in lefteye:\n cv2.rectangle(img, (lx, ly), (lx+lw, ly+lh), (0, 0, 255), 2)\n\n\n#cv2.imwrite('C:\\\\Users\\\\bhargav Pandya\\\\Desktop\\\\1.1_10.PNG',img)\ncv2.imshow('img',img)\ncv2.imshow('cropped', roi)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"Cafe Application App/Face_detection.py","file_name":"Face_detection.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"513485368","text":"from mido import (\n MidiFile,\n MidiTrack,\n Message\n)\nimport sys\n\nfrom configman import (\n Namespace,\n RequiredConfig\n)\n\nclass FileStream(RequiredConfig):\n required_config = Namespace()\n required_config.add_option(\n name=\"pathname\"\n )\n\n def __init__(self, config):\n self.config = config\n self.pathname = config.pathname\n\n\nclass FileInputStream(FileStream):\n def __init__(self, config):\n super(FileInputStream, self).__init__(config)\n self.midi_file = MidiFile(self.pathname)\n\n def __iter__(self):\n for message in self.midi_file.play():\n yield message\n\n\nclass FileOutputStream(FileStream):\n def __init__(self, config):\n super(FileOutputStream, self).__init__(config)\n self.output_file = MidiFile()\n self.track = MidiTrack()\n self.output_file.tracks.append(self.track)\n\n def send(self, message):\n self.track.append(message)\n\n def close(self):\n self.output_file.save(self.pathname)\n\n\nclass StdInStream(RequiredConfig):\n required_config = Namespace()\n\n def __init__(self, config):\n self.config = config\n\n def __iter__(self):\n for line in sys.stdin:\n line = line.strip().strip(\">\")\n parts = line.split(\" \")\n message_type = parts[1]\n kwargs = {}\n for key_value in parts[2:]:\n key, value = key_value.split(\"=\")\n if value[-1] == \">\":\n value = value[:-1]\n kwargs[key] = int(value)\n yield Message(message_type, **kwargs)\n\n\nclass StdOutStream(RequiredConfig):\n required_config = Namespace()\n\n def __init__(self, config):\n self.config = config\n\n def send(self, message):\n print (repr(message))\n\n def close(self):\n pass\n","sub_path":"iostreams.py","file_name":"iostreams.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"261139709","text":"#!/usr/bin/env python2.7\nimport g\nimport f\nimport c\nimport display\nimport forces\n\nrunning=True\ncompute_times=0\ncommand=''\ntime=0\nwhile running:\n\twhile compute_times>0:\n\t\tfor i in range(len(g.particles)):\n\t\t\tg.particles[i].apply_force(forces.gravitational(g.particles[i]))\n\t\t\tfor j in range(len(g.particles)):\n\t\t\t\tif not i==j:\n\t\t\t\t\tg.particles[i].apply_force(forces.electric(g.particles[i], g.particles[j]))\n\t\t\t\t\tg.particles[i].apply_force(forces.strong(g.particles[i], g.particles[j]))\n\t\tfor particle in g.particles:\n\t\t\tparticle.update()\n\t\tcompute_times-=1\n\t\ttime+=1\n\n\tdisplay.update()\n\n\tcmd = raw_input('>>> ')\n\tif cmd!='':\n\t\tcommand=cmd\n\n\tif command=='exit':\n\t\texit()\n\telif command=='clear':\n\t\ttime=0\n\t\tdisplay.move_position=(0,0)\n\t\tg.particles=[]\n\t\tprint('Time: 0')\n\telif command=='m':\n\t\tdisplay.move()\n\telif command=='e':\n\t\tdisplay.add_electron()\n\telif command=='p':\n\t\tdisplay.add_proton()\n\telif command=='n':\n\t\tdisplay.add_neutron()\n\telse:\n\t\ttry:\n\t\t\tcompute_times=int(command)\n\t\t\tprint('Time: '+str(time+compute_times))\n\t\texcept:\n\t\t\tpass\n","sub_path":"Eden/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"344550410","text":"import sqlite3\n\nfrom dotenv import dotenv_values\n\nconfig = dotenv_values(\".env\")\nDB_LOCATION = config[\"DB_LOCATION\"]\n\nif __name__ == \"__main__\":\n # create db\n with sqlite3.connect(DB_LOCATION) as conn:\n conn.execute(\"PRAGMA foreign_keys = ON;\")\n conn.execute(\"CREATE TABLE IF NOT EXISTS profiles (\"\n \"userid integer PRIMARY KEY,\"\n \"username text NOT NULL,\"\n \"full_name character);\")\n conn.execute(\"CREATE TABLE IF NOT EXISTS posts (\"\n \"mediaid integer PRIMARY KEY,\"\n \"userid integer NOT NULL,\"\n \"FOREIGN KEY (userid)\"\n \" REFERENCES profiles (userid)\"\n \" ON DELETE CASCADE);\")\n conn.execute(\"CREATE TABLE IF NOT EXISTS likes (\"\n \"mediaid integer NOT NULL,\"\n \"userid integer NOT NULL,\"\n \"scrape_datetime text,\"\n \"UNIQUE (mediaid, userid) ON CONFLICT REPLACE,\"\n \"FOREIGN KEY (mediaid)\"\n \" REFERENCES posts (mediaid)\"\n \" ON DELETE CASCADE,\"\n \"FOREIGN KEY (userid)\"\n \" REFERENCES profiles (userid)\"\n \" ON DELETE CASCADE);\")\n conn.execute(\"CREATE TABLE IF NOT EXISTS followers (\"\n \"userid integer NOT NULL,\"\n \"follower_id integer NOT NULL,\"\n \"UNIQUE (userid, follower_id),\"\n \"FOREIGN KEY (userid)\"\n \" REFERENCES profiles (userid)\"\n \" ON DELETE CASCADE,\"\n \"FOREIGN KEY (follower_id)\"\n \" REFERENCES profiles (userid)\"\n \" ON DELETE CASCADE);\")\n","sub_path":"init_database.py","file_name":"init_database.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79476357","text":"import logging\nfrom decimal import Decimal\n\nfrom falcon.base.order import (lots_to_units, OrderSide, OrderType, OrderPositionFill, TimeInForce,\n OrderTriggerCondition, OrderState)\nfrom falcon.base.price import pip\nfrom v20.transaction import (StopLossDetails, ClientExtensions, TakeProfitDetails, TrailingStopLossDetails,\n LimitOrderTransaction, StopOrderTransaction)\n\nfrom .base import api\nfrom .common.convertor import get_symbol\nfrom .common.logger import log_error\nfrom .common.prints import print_orders\nfrom .common.view import print_entity\nfrom ... import config\nfrom ...base.constants import TransactionName\nfrom ...base.models import OrderBase\n\nlogger = logging.getLogger(__name__)\n\n\nclass OANDAOrderMixin(OrderBase):\n\n # order list\n def _process_orders(self, response):\n if response.status < 200 or response.status > 299:\n log_error(logger, response, 'LIST_ORDER')\n return []\n\n orders = response.get(\"orders\", \"200\")\n if len(orders) == 0:\n logger.debug(\"Account {} has no pending Orders to cancel\".format(\n self.account_id\n ))\n for order in orders:\n self.orders[order.id] = order\n\n if config.DEBUG:\n print_orders(orders)\n return orders\n\n def list_order(self, ids=None, state=None, instrument=None, count=20, beforeID=None):\n data = {}\n if ids:\n data['ids'] = str(ids)\n if state:\n data['state'] = state\n if instrument:\n data['instrument'] = instrument\n if count:\n data['count'] = str(count)\n if beforeID:\n data['beforeID'] = str(beforeID)\n response = api.order.list(self.account_id, **data)\n return self._process_orders(response)\n\n def list_pending_order(self):\n response = api.order.list_pending(self.account_id)\n return self._process_orders(response)\n\n # order creation\n def _process_order_paramters(self, **kwargs):\n data = {}\n instrument = None\n pip_unit = None\n\n if kwargs.get('instrument'):\n instrument = get_symbol(kwargs['instrument'])\n\n data['instrument'] = instrument\n\n if kwargs.get('trade_id'):\n data['tradeID'] = str(kwargs['trade_id'])\n trade = self.trades.get(data['tradeID']) or self.get_trade(data['tradeID'])\n instrument = trade.instrument\n\n if instrument:\n pip_unit = pip(instrument)\n\n if kwargs.get('lots'):\n units = lots_to_units(kwargs['lots'], kwargs.get('side') or OrderSide.BUY)\n data['units'] = str(units)\n\n if kwargs.get('type'):\n data['type'] = kwargs['type']\n\n if kwargs.get('timeInForce'):\n data['timeInForce'] = kwargs['timeInForce'] or TimeInForce.FOK\n\n if kwargs.get('priceBound'):\n data['priceBound'] = str(kwargs['priceBound'])\n\n if kwargs.get('price'):\n data['price'] = str(kwargs['price'])\n\n if kwargs.get('positionFill'):\n data['positionFill'] = kwargs['positionFill'] or OrderPositionFill.DEFAULT\n\n # The Client Extensions to update for the Order. Do not set, modify, or\n # delete clientExtensions if your account is associated with MT4.\n if kwargs.get('client_id') or kwargs.get('client_tag') or kwargs.get('client_comment'):\n data['clientExtensions'] = ClientExtensions(id=kwargs['client_id'],\n tag=kwargs['client_tag'],\n comment=kwargs['client_comment'])\n\n if kwargs.get('trade_client_id') or kwargs.get('trade_client_tag') or kwargs.get('trade_client_comment'):\n data['tradeClientExtensions'] = ClientExtensions(id=kwargs['trade_client_id'],\n tag=kwargs['trade_client_tag'],\n comment=kwargs['trade_client_comment'])\n\n if kwargs.get('take_profit_price'):\n data['takeProfitOnFill'] = TakeProfitDetails(\n price=str(kwargs['take_profit_price']),\n clientExtensions=data.get('clientExtensions')\n )\n\n if kwargs.get('stop_loss_pip') and pip_unit:\n stop_loss_price = pip_unit * Decimal(str(kwargs['stop_loss_pip']))\n data['stopLossOnFill'] = StopLossDetails(distance=str(stop_loss_price),\n clientExtensions=data.get('clientExtensions'))\n\n if kwargs.get('stop_loss_distance'):\n data['stopLossOnFill'] = StopLossDetails(distance=str(kwargs['stop_loss_distance']),\n clientExtensions=data.get('clientExtensions'))\n\n if kwargs.get('trailing_pip'):\n trailing_distance_price = pip_unit * Decimal(str(kwargs['trailing_pip']))\n data['trailingStopLossOnFill'] = TrailingStopLossDetails(distance=str(trailing_distance_price),\n clientExtensions=data.get('clientExtensions'))\n\n if kwargs.get('trigger_condition'):\n data['triggerCondition'] = kwargs['trigger_condition'] or OrderTriggerCondition.DEFAULT\n\n if kwargs.get('gtd_time'):\n # todo confirm gtdTime format\n data['gtdTime'] = str(kwargs['gtd_time'])\n\n if kwargs.get('client_trade_id'):\n data['clientTradeID'] = kwargs['client_trade_id']\n\n if kwargs.get('guaranteed'):\n data['guaranteed'] = kwargs['guaranteed']\n\n if kwargs.get('distance'):\n data['distance'] = kwargs['distance']\n\n return data\n\n def _process_order_response(self, response, func_name, response_status=\"200\"):\n if response.status < 200 or response.status > 299:\n log_error(logger, response, func_name)\n raise Exception(response.body.get('errorMessage'))\n\n transactions = []\n trade_ids = []\n order_ids = []\n for name in TransactionName.all():\n try:\n transaction = response.get(name, response_status)\n transactions.append(transaction)\n\n to = getattr(transaction, 'tradeOpened', None)\n if to:\n trade_ids.append(to.tradeID)\n\n if isinstance(transaction, LimitOrderTransaction) or isinstance(transaction, StopOrderTransaction):\n order_ids.append(transaction.id)\n except:\n pass\n\n if trade_ids or order_ids:\n self.pull()\n\n if config.DEBUG:\n for t in transactions:\n print_entity(t, title=t.__class__.__name__)\n print('')\n return transactions\n\n def get_order(self, order_id):\n response = api.order.get(self.account_id, str(order_id))\n if response.status < 200 or response.status > 299:\n log_error(logger, response, 'GET_ORDER')\n return None\n\n order = response.get(\"order\", \"200\")\n self.orders[order.id] = order\n\n if config.DEBUG:\n print_orders([order])\n return order\n\n def limit_order(self, instrument, side, price, lots, take_profit=None, stop_loss=None, trailing_pip=None,\n **kwargs):\n timeInForce = kwargs.get('timeInForce', TimeInForce.FOK)\n positionFill = kwargs.get('positionFill', OrderPositionFill.DEFAULT)\n trigger_condition = kwargs.get('trigger_condition', OrderTriggerCondition.DEFAULT)\n priceBound = kwargs.get('priceBound', None)\n gtd_time = kwargs.get('gtd_time', None)\n order_id = kwargs.get('order_id', None)\n client_id = kwargs.get('client_id', None)\n client_tag = kwargs.get('client_tag', None)\n client_comment = kwargs.get('client_comment', None)\n\n data = {'instrument': instrument, 'side': side, 'lots': lots, 'type': OrderType.LIMIT,\n 'timeInForce': timeInForce,\n 'price': price, 'positionFill': positionFill, 'take_profit_price': take_profit,\n 'trigger_condition': trigger_condition, 'gtd_time': gtd_time, 'priceBound': priceBound,\n 'stop_loss_pip': stop_loss, 'trailing_pip': trailing_pip, 'client_id': client_id,\n 'client_tag': client_tag, 'client_comment': client_comment}\n kwargs = self._process_order_paramters(**data)\n\n if order_id:\n response = self.api.order.limit_replace(self.account_id, str(order_id), **kwargs)\n else:\n response = self.api.order.limit(self.account_id, **kwargs)\n\n return self._process_order_response(response, 'LIMIT_ORDER', \"201\")\n\n def stop_order(self, instrument, side, price, lots, take_profit=None, stop_loss=None, trailing_pip=None,\n **kwargs):\n timeInForce = kwargs.get('timeInForce', TimeInForce.FOK)\n positionFill = kwargs.get('positionFill', OrderPositionFill.DEFAULT)\n trigger_condition = kwargs.get('trigger_condition', OrderTriggerCondition.DEFAULT)\n priceBound = kwargs.get('priceBound', None)\n gtd_time = kwargs.get('gtd_time', None)\n order_id = kwargs.get('order_id', None)\n client_id = kwargs.get('client_id', None)\n client_tag = kwargs.get('client_tag', None)\n client_comment = kwargs.get('client_comment', None)\n\n data = {'instrument': instrument, 'side': side, 'lots': lots, 'type': OrderType.STOP,\n 'timeInForce': timeInForce,\n 'price': price, 'positionFill': positionFill, 'take_profit_price': take_profit,\n 'trigger_condition': trigger_condition, 'gtd_time': gtd_time, 'priceBound': priceBound,\n 'stop_loss_pip': stop_loss, 'trailing_pip': trailing_pip, 'client_id': client_id,\n 'client_tag': client_tag, 'client_comment': client_comment}\n kwargs = self._process_order_paramters(**data)\n\n if order_id:\n response = self.api.order.stop_replace(self.account_id, str(order_id), **kwargs)\n else:\n response = self.api.order.stop(self.account_id, **kwargs)\n\n return self._process_order_response(response, 'STOP_ORDER', \"201\")\n\n def market_order(self, instrument, side, lots, take_profit=None, stop_loss=None, trailing_pip=None, **kwargs):\n timeInForce = kwargs.get('timeInForce', TimeInForce.FOK)\n positionFill = kwargs.get('positionFill', OrderPositionFill.DEFAULT)\n priceBound = kwargs.get('priceBound', None)\n client_id = kwargs.get('client_id', None)\n client_tag = kwargs.get('client_tag', None)\n client_comment = kwargs.get('client_comment', None)\n trade_client_id = kwargs.get('trade_client_id', None)\n trade_client_tag = kwargs.get('trade_client_tag', None)\n trade_client_comment = kwargs.get('trade_client_comment', None)\n\n data = {'instrument': instrument, 'side': side, 'lots': lots, 'type': OrderType.MARKET,\n 'timeInForce': timeInForce,\n 'priceBound': priceBound, 'positionFill': positionFill, 'take_profit_price': take_profit,\n 'stop_loss_pip': stop_loss, 'trailing_pip': trailing_pip, 'client_id': client_id,\n 'client_tag': client_tag, 'client_comment': client_comment, 'trade_client_id': trade_client_id,\n 'trade_client_tag': trade_client_tag, 'trade_client_comment': trade_client_comment}\n kwargs = self._process_order_paramters(**data)\n\n response = self.api.order.market(self.account_id, **kwargs)\n\n return self._process_order_response(response, 'MARKET_ORDER', \"201\")\n\n # todo fail: ORDER_CANCEL,MARKET_ORDER_REJECT\n # todo success:MARKET_ORDER + ORDER_FILL\n\n def market_if_touched(self, instrument, side, price, lots, **kwargs):\n timeInForce = kwargs.get('timeInForce', TimeInForce.FOK)\n positionFill = kwargs.get('positionFill', OrderPositionFill.DEFAULT)\n trigger_condition = kwargs.get('trigger_condition', OrderTriggerCondition.DEFAULT)\n gtd_time = kwargs.get('gtd_time', None)\n order_id = kwargs.get('order_id', None)\n priceBound = kwargs.get('priceBound', None)\n take_profit = kwargs.get('take_profit', None)\n stop_loss = kwargs.get('stop_loss', None)\n trailing_pip = kwargs.get('trailing_pip', None)\n client_id = kwargs.get('client_id', None)\n client_tag = kwargs.get('client_tag', None)\n client_comment = kwargs.get('client_comment', None)\n trade_client_id = kwargs.get('trade_client_id', None)\n trade_client_tag = kwargs.get('trade_client_tag', None)\n trade_client_comment = kwargs.get('trade_client_comment', None)\n\n data = {'instrument': instrument, 'side': side, 'lots': lots, 'type': OrderType.MARKET_IF_TOUCHED,\n 'timeInForce': timeInForce, 'gtd_time': gtd_time, 'trigger_condition': trigger_condition,\n 'priceBound': priceBound, 'positionFill': positionFill, 'take_profit_price': take_profit,\n 'stop_loss_pip': stop_loss, 'trailing_pip': trailing_pip, 'client_id': client_id,\n 'client_tag': client_tag, 'client_comment': client_comment, 'trade_client_id': trade_client_id,\n 'trade_client_tag': trade_client_tag, 'trade_client_comment': trade_client_comment, 'price': price}\n kwargs = self._process_order_paramters(**data)\n\n if order_id:\n response = self.api.order.market_if_touched_replace(self.account_id, str(order_id), **kwargs)\n else:\n response = self.api.order.market_if_touched(self.account_id, **kwargs)\n\n transactions = self._process_order_response(response, 'MARKET_IF_TOUCHED', \"201\")\n\n return transactions\n\n # TP , SL and trailing SL\n\n def take_profit(self, trade_id, price, **kwargs):\n timeInForce = kwargs.get('timeInForce', TimeInForce.FOK)\n trigger_condition = kwargs.get('trigger_condition', OrderTriggerCondition.DEFAULT)\n gtd_time = kwargs.get('gtd_time', None)\n order_id = kwargs.get('order_id', None)\n client_id = kwargs.get('client_id', None)\n client_tag = kwargs.get('client_tag', None)\n client_comment = kwargs.get('client_comment', None)\n client_trade_id = kwargs.get('client_trade_id', None)\n\n data = {'price': price, 'client_trade_id': client_trade_id, 'trade_id': trade_id,\n 'type': OrderType.TAKE_PROFIT, 'timeInForce': timeInForce,\n 'trigger_condition': trigger_condition, 'gtd_time': gtd_time,\n 'client_id': client_id, 'client_tag': client_tag, 'client_comment': client_comment}\n kwargs = self._process_order_paramters(**data)\n\n if order_id:\n response = self.api.order.take_profit_replace(self.account_id, str(order_id), **kwargs)\n else:\n response = self.api.order.take_profit(self.account_id, **kwargs)\n\n transactions = self._process_order_response(response, 'TAKE_PROFIT', \"201\")\n\n return transactions\n\n def stop_loss(self, trade_id, price=None, **kwargs):\n timeInForce = kwargs.get('timeInForce', TimeInForce.FOK)\n trigger_condition = kwargs.get('trigger_condition', OrderTriggerCondition.DEFAULT)\n gtd_time = kwargs.get('gtd_time', None)\n order_id = kwargs.get('order_id', None)\n guaranteed = kwargs.get('guaranteed', None)\n client_id = kwargs.get('client_id', None)\n client_tag = kwargs.get('client_tag', None)\n client_comment = kwargs.get('client_comment', None)\n client_trade_id = kwargs.get('client_trade_id', None)\n\n data = {'client_trade_id': client_trade_id, 'trade_id': trade_id,\n 'price': price,\n 'type': OrderType.STOP_LOSS, 'timeInForce': timeInForce, 'guaranteed': guaranteed,\n 'trigger_condition': trigger_condition, 'gtd_time': gtd_time,\n 'client_id': client_id, 'client_tag': client_tag, 'client_comment': client_comment}\n kwargs = self._process_order_paramters(**data)\n\n if order_id:\n response = self.api.order.stop_loss_replace(self.account_id, str(order_id), **kwargs)\n else:\n response = self.api.order.stop_loss(self.account_id, **kwargs)\n\n transactions = self._process_order_response(response, 'STOP_LOSS', \"201\")\n\n return transactions\n\n def trailing_stop_loss(self, trade_id, pips, **kwargs):\n timeInForce = kwargs.get('timeInForce', TimeInForce.FOK)\n trigger_condition = kwargs.get('trigger_condition', OrderTriggerCondition.DEFAULT)\n gtd_time = kwargs.get('gtd_time', None)\n order_id = kwargs.get('order_id', None)\n client_id = kwargs.get('client_id', None)\n client_tag = kwargs.get('client_tag', None)\n client_comment = kwargs.get('client_comment', None)\n client_trade_id = kwargs.get('client_trade_id', None)\n\n data = {'trade_id': trade_id, 'client_trade_id': client_trade_id,\n 'distance': pips,\n 'type': OrderType.TRAILING_STOP_LOSS, 'timeInForce': timeInForce,\n 'trigger_condition': trigger_condition, 'gtd_time': gtd_time,\n 'client_id': client_id, 'client_tag': client_tag, 'client_comment': client_comment}\n data = self._process_order_paramters(**data)\n\n if order_id:\n response = self.api.order.trailing_stop_loss_replace(self.account_id, str(order_id), **data)\n else:\n response = self.api.order.trailing_stop_loss(self.account_id, **data)\n\n transactions = self._process_order_response(response, 'TRAILING_STOP_LOSS', \"201\")\n\n return transactions\n\n # cancel & extensions\n def cancel_order(self, order_id, **kwargs):\n response = api.order.cancel(self.account_id, str(order_id))\n\n # print(\"Response: {} ({})\".format(response.status, response.reason))\n\n if response.status < 200 or response.status > 299:\n transaction = response.get('orderCancelRejectTransaction', \"404\")\n if config.DEBUG:\n print_entity(transaction, title='Order Cancel Reject')\n raise Exception('orderCancelRejectTransaction')\n\n transaction = response.get('orderCancelTransaction', \"200\")\n if config.DEBUG:\n print_entity(transaction, title='Order Canceled')\n\n order_id = transaction.orderID\n if order_id in self.orders:\n self.orders.pop(order_id)\n\n return transaction\n\n def cancel_pending_order(self):\n \"\"\"cancel all pending orders\"\"\"\n if not self.orders:\n self.list_order()\n\n ids = self.orders.keys()\n\n for id in ids:\n order = self.orders.get(id)\n if order.state == OrderState.PENDING:\n self.cancel_order(id)\n\n def order_client_extensions(self, order_id, client_id=None, client_tag=None, client_comment=None):\n data = {'client_id': client_id, 'client_tag': client_tag, 'client_comment': client_comment}\n kwargs = self._process_order_paramters(**data)\n response = api.order.set_client_extensions(self.account_id, str(order_id), **kwargs)\n transactions = self._process_order_response(response, 'ORDER_CLIENT_EXTENSIONS')\n\n return transactions\n","sub_path":"hulk/broker/oanda/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":19473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"525801039","text":"# Copyright 2019 Jetperch LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom PySide2 import QtGui, QtCore, QtWidgets\nimport pyqtgraph as pg\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\n\nclass YAxisMenu(QtGui.QMenu):\n\n def __init__(self, log_enable=None):\n QtGui.QMenu.__init__(self)\n self.setTitle('Y Axis')\n\n # range\n self.range = QtGui.QMenu()\n self.range.setTitle('Range')\n self.range_group = QtGui.QActionGroup(self)\n self.range_group.setExclusive(True)\n self.range_auto = QtGui.QAction(\n '&Auto', self.range_group,\n checkable=True,\n toolTip='Automatically adjust the y-axis range to show all visible data.'\n )\n self.range_auto.setChecked(True)\n self.range.addAction(self.range_auto)\n self.range_group.addAction(self.range_auto)\n self.range_manual = QtGui.QAction(\n '&Manual', self.range_group,\n checkable=True,\n toolTip='Manually zoom and pan the y-axis range.'\n )\n self.range.addAction(self.range_manual)\n self.range_group.addAction(self.range_manual)\n self.addMenu(self.range)\n\n self.scale = QtGui.QMenu()\n self.scale.setTitle('Scale')\n self.scale_group = QtGui.QActionGroup(self)\n self.scale_group.setExclusive(True)\n\n self.scale_linear = QtGui.QAction(\n '&Linear', self.scale_group,\n checkable=True,\n toolTip='Use a \"normal\" linear y-axis scale.'\n )\n self.scale_linear.setChecked(True)\n self.scale.addAction(self.scale_linear)\n self.scale_group.addAction(self.scale_linear)\n\n self.scale_logarithmic = QtGui.QAction(\n 'Lo&garithmic', self.scale_group,\n checkable=True,\n toolTip='Use a logarithmic y-axis scale.'\n )\n self.scale.addAction(self.scale_logarithmic)\n self.scale_group.addAction(self.scale_logarithmic)\n if log_enable:\n self.addMenu(self.scale)\n\n self.hide_request = QtGui.QAction('&Hide', self)\n self.hide_request.setToolTip('Hide this signal.')\n self.addAction(self.hide_request)\n\n def range_set(self, value):\n if value == 'manual':\n self.range_auto.setChecked(False)\n self.range_manual.setChecked(True)\n else:\n self.range_auto.setChecked(True)\n self.range_manual.setChecked(False)\n\n def scale_set(self, value):\n if value == 'logarithmic':\n self.scale_linear.setChecked(False)\n self.scale_logarithmic.setChecked(True)\n else:\n self.scale_linear.setChecked(True)\n self.scale_logarithmic.setChecked(False)\n\n\nclass YAxis(pg.AxisItem):\n\n sigConfigEvent = QtCore.Signal(object)\n \"\"\"Indicate a potential configuration event change.\n \n :param configuration: The dict of parameter-value pairs which include:\n * autorange: True - automatically determine min/max extents to display.\n False - allow the user to manually pan and zoom.\n * scale: \n * 'linear': display in a \"normal\" linear scale\n * 'logarithmic': Display the y-axis in logarithmic scale.\n \"\"\"\n\n sigWheelZoomYEvent = QtCore.Signal(float, float)\n \"\"\"A scroll wheel zoom event.\n\n :param y: The y-axis location in axis coordinates. \n :param delta: The scroll wheel delta.\n \"\"\"\n\n sigPanYEvent = QtCore.Signal(object, float)\n \"\"\"A pan y event.\n\n :param command: One of ['start', 'drag', 'finish', 'abort']\n :param y: The y-axis delta from the start in axis coordinates. \n \"\"\"\n\n sigHideRequestEvent = QtCore.Signal(str)\n \"\"\"Request to hide this signal.\n \n :param name: The name of the signal to hide.\n \"\"\"\n\n def __init__(self, name, log_enable=None):\n pg.AxisItem.__init__(self, orientation='left')\n self._name = name\n self.log = logging.getLogger(__name__ + '.' + name)\n self._pan = None\n self.menu = YAxisMenu(log_enable=log_enable)\n self.config = {\n 'range': 'auto',\n 'scale': 'linear',\n }\n self.menu.range_auto.triggered.connect(lambda: self._config_update(range='auto'))\n self.menu.range_manual.triggered.connect(lambda: self._config_update(range='manual'))\n self.menu.scale_linear.triggered.connect(lambda: self._config_update(scale='linear'))\n self.menu.scale_logarithmic.triggered.connect(lambda: self._config_update(scale='logarithmic'))\n self.menu.hide_request.triggered.connect(lambda: self.sigHideRequestEvent.emit(self._name))\n self._markers = {}\n self._proxy = None\n self._popup_menu_pos = None\n\n def _config_update(self, **kwargs):\n log.info('config update: %s', str(kwargs))\n self.config.update(**kwargs)\n self.sigConfigEvent.emit(self.config.copy())\n\n def mouseClickEvent(self, event, axis=None):\n if self.linkedView() is None:\n return\n pos = event.scenePos()\n if self.geometry().contains(pos):\n self.log.info('mouseClickEvent(%s)', event)\n event.accept()\n if event.button() == QtCore.Qt.RightButton:\n self._popup_menu_pos = self.linkedView().mapSceneToView(pos)\n # self.scene().addParentContextMenus(self, self.menu, event)\n self.menu.popup(event.screenPos().toPoint())\n\n def mouseDragEvent(self, event, axis=None):\n vb = self.linkedView()\n if vb is None:\n return\n pos = event.scenePos()\n if self.geometry().contains(pos):\n self.log.info('mouseDragEvent(%s)', event)\n event.accept()\n if self.config['range'] == 'manual':\n [x_min, x_max], [y_min, y_max] = vb.viewRange()\n pmin = vb.mapViewToScene(pg.Point(x_min, y_min))\n pmax = vb.mapViewToScene(pg.Point(x_max, y_max))\n\n yview_range = y_max - y_min\n yscene_range = pmax.y() - pmin.y()\n pnow_y = event.scenePos().y()\n\n if self._pan is not None:\n dx = (pnow_y - self._pan[1]) * yview_range / yscene_range\n self._pan[0] += dx\n self._pan[1] = pnow_y\n\n if event.button() & QtCore.Qt.LeftButton:\n if event.isFinish():\n if self._pan is not None:\n pan_x, self._pan = self._pan[0], None\n self.sigPanYEvent.emit('finish', pan_x)\n elif self._pan is None:\n self._pan = [0.0, pnow_y]\n self.sigPanYEvent.emit('start', 0.0)\n else:\n self.sigPanYEvent.emit('drag', self._pan[0])\n\n def wheelEvent(self, event, axis=None):\n vb = self.linkedView()\n if vb is None:\n return\n pos = event.scenePos()\n if self.geometry().contains(pos):\n self.log.info('wheelEvent(%s)', event)\n event.accept()\n if self.config['range'] == 'manual':\n p = vb.mapSceneToView(event.scenePos())\n self.sigWheelZoomYEvent.emit(p.y(), event.delta())\n else:\n event.setAccepted(False)\n # p = self.mapSceneToView(ev.scenePos())\n # self.sigWheelZoomXEvent.emit(p.x(), ev.delta())\n","sub_path":"joulescope_ui/oscilloscope/yaxis.py","file_name":"yaxis.py","file_ext":"py","file_size_in_byte":7939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"45669106","text":"#!/usr/bin/env python\n\nfrom datetime import date, datetime, timedelta\nimport pytz\n\nimport ga\nimport sd\nimport st\n\n\nYESTERDAY = date.today() - timedelta(days=1)\n\n\ndef swipedeals_data(query_date=YESTERDAY):\n \"\"\"\n returns a dict of key totals from swipedeals db\n\n @param query_date : date|datetime -- defaults to yesterday's date\n \"\"\"\n # since the db is totally fucked, we need to convert yesterday to utc\n local_timezone = pytz.timezone('America/Chicago')\n naive_local_time = datetime(query_date.year, query_date.month, query_date.day, 0, 0, 0)\n tz_local_time = local_timezone.localize(naive_local_time)\n utc_local_time = tz_local_time.astimezone(pytz.utc)\n\n # format dates\n start = utc_local_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n stop = utc_local_time + timedelta(hours=23, minutes=59)\n stop = stop.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # get connection and set up params\n conn, cursor = sd.initialize_connection_and_cursor()\n params = (start, stop)\n vals = {}\n\n # clearance\n query = \"select count(*) \" \\\n \"from orders \" \\\n \"inner join deals on deals.id = orders.deal_id \" \\\n \"where clearance = 1 \" \\\n \"and state in ('purchased', 'partially refunded', 'refunded') \" \\\n \"and orders.created_at between %s and %s\"\n cursor.execute(query, params)\n clearance = cursor.fetchone()[0]\n vals['clearance_sales'] = clearance\n\n # transactions\n query = \"select count(*) \" \\\n \"from orders \" \\\n \"where state = 'cart purchased' \" \\\n \"and orders.created_at between %s and %s\"\n cursor.execute(query, params)\n transactions = cursor.fetchone()[0]\n vals['transactions'] = transactions\n\n # new customers\n query = \"select count(*) \" \\\n \"from orders \" \\\n \"where orders.first_purchase = 1 \" \\\n \"and state in ('purchased', 'partially refunded', 'refunded')\" \\\n \"and orders.created_at between %s and %s\"\n cursor.execute(query, params)\n new_customers = cursor.fetchone()[0]\n vals['new_customers'] = new_customers\n\n # sales\n query = \"select sum(charged + money_used) \" \\\n \"from orders \" \\\n \"where state in ('purchased', 'partially refunded', 'refunded')\" \\\n \"and orders.created_at between %s and %s\"\n cursor.execute(query, params)\n sales = cursor.fetchone()[0]\n vals['sales'] = sales\n\n # refunded\n query = \"select sum(charged) \" \\\n \"from orders \" \\\n \"where state = 'REFUND'\" \\\n \"and orders.created_at between %s and %s\"\n cursor.execute(query, params)\n refunded = cursor.fetchone()[0]\n vals['refunded'] = refunded or 0\n\n # quantity\n query = \"select count(*) \" \\\n \"from orders \" \\\n \"inner join coupons on coupons.order_id = orders.id \" \\\n \"where state in ('purchased', 'partially refunded', 'refunded') \" \\\n \"and orders.created_at between %s and %s\"\n cursor.execute(query, params)\n quantity = cursor.fetchone()[0]\n vals['quantity'] = quantity\n\n # shipping\n query = \"select sum(shipping_cost) \" \\\n \"from orders \" \\\n \"inner join deals on orders.deal_id = deals.id \" \\\n \"where state in ('purchased', 'partially refunded', 'refunded')\" \\\n \"and orders.created_at between %s and %s\"\n cursor.execute(query, params)\n shipping = cursor.fetchone()[0]\n vals['shipping'] = float(shipping) / vals['quantity']\n\n # warehousing cost\n query = \"select sum(substring(deals.warehousing_cost, 2)) \" \\\n \"from orders \" \\\n \"inner join deals on orders.deal_id = deals.id \" \\\n \"where state in ('purchased', 'partially refunded', 'refunded') \" \\\n \"and warehousing_cost is not null \" \\\n \"and redeem_type = 'Inventory' \" \\\n \"and orders.created_at between %s and %s \"\n cursor.execute(query, params)\n warehousing = cursor.fetchone()[0]\n vals['warehousing'] = float(warehousing) / vals['quantity']\n\n # dloot dollars\n query = \"select sum(money_used) \" \\\n \"from orders \" \\\n \"where state in ('purchased', 'partially refunded', 'refunded') \" \\\n \"and purchased_at between %s and %s\"\n cursor.execute(query, params)\n dloot_dollars = cursor.fetchone()[0]\n vals ['dloot'] = dloot_dollars\n\n # email type\n # \"select email_type, count(*) \" for counts -- tolu wants total sales\n query = \"select email_type, sum(charged + money_used) \" \\\n \"from coupons \" \\\n \"inner join orders on coupons.order_id = orders.id \" \\\n \"inner join deals on orders.deal_id = deals.id \" \\\n \"where state in ('purchased', 'partially refunded', 'refunded') \" \\\n \"and purchased_at between %s and %s \" \\\n \"group by deals.email_type\"\n cursor.execute(query, params)\n # email_types = [(email_type, count) for email_type, count in cursor.fetchall()]\n for email_type, count in cursor.fetchall():\n if email_type in ('', None):\n email_type = 'No Email'\n vals[email_type] = count\n\n # promo code savings\n query = \"select sum(((orders.charged - ifnull(orders.tax_amount, 0)) * (promo_codes.percent_discount / 100))) \" \\\n \"from orders \" \\\n \"inner join promo_codes on promo_codes.id = promo_code_id \" \\\n \"where orders.id in ( select id from orders where cart_order_id is null ) \" \\\n \"and orders.created_at between %s and %s\"\n cursor.execute(query, params)\n promo_savings = cursor.fetchone()[0]\n vals['promo_savings'] = promo_savings\n\n # avg product cost\n query = \"select sum(deals.product_cost)/count(*) \" \\\n \"from orders \" \\\n \"inner join deals on orders.deal_id = deals.id \" \\\n \"where state in ('purchased', 'partially refunded', 'refunded') \" \\\n \"and orders.created_at between %s and %s\"\n cursor.execute(query, params)\n avg_product_cost = cursor.fetchone()[0]\n vals['avg_product_cost'] = avg_product_cost\n\n # results\n sd.close_connection_and_cursor(conn, cursor)\n return vals\n\n\ndef sailthru_data(list_name='doggyloot', query_date=YESTERDAY):\n \"\"\"\n returns a dict of email stats for a given date -- total, engaged, active, sign up counts\n\n @param list_name : str -- defaults to 'doggyloot'\n @param query_date : date|datetime -- defaults to yesterday's date\n \"\"\"\n client = st.initialize_client()\n result = st.get_stats(client, list_name, query_date)\n result = result.json\n return {\n 'email_count': result.get('email_count', 0),\n 'engaged_count': result.get('engaged_count', 0),\n 'active_count': result.get('active_count', 0),\n 'lists_signup_count': result.get('lists_signup_count', 0),\n }\n\n\ndef google_analytics_data(\n metrics='ga:sessions,ga:bounces,ga:bounceRate,ga:avgTimeOnPage,ga:pageviews,ga:pageviewsPerSession',\n query_date='yesterday'):\n \"\"\"\n returns a dict of ga stats for a given date for given metrics\n\n @param metrics : str -- defaults to 'ga:sessions,ga:bounces,ga:avgTimeOnPage,ga:pageviews,ga:pageviewsPerSession'\n @param query_date : date|datetime -- defaults to 'yesterday', which is a special value for ga queries\n \"\"\"\n ids = ga.get_table_ids()\n service = ga.initialize_service()\n if isinstance(query_date, (date, datetime)):\n query_date = query_date.strftime(\"%Y-%m-%d\")\n result = ga.perform_query(service, ids, query_date, query_date, metrics)\n result = result['rows'][0]\n return dict(zip(\n [metric[3:] for metric in metrics.split(',')],\n [float(res) for res in result]\n ))\n\n\nif __name__ == '__main__':\n st_data = sailthru_data()\n ga_data = google_analytics_data()\n sd_data = swipedeals_data()\n\n values = {}\n values.update(st_data)\n values.update(ga_data)\n values.update(sd_data)\n\n from pprint import pprint\n pprint(values)\n","sub_path":"flash_report/v2/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":7980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"37568094","text":"import sys\nN=int(sys.stdin.readline())\nqueue=[]\nfor i in range(N):\n a=sys.stdin.readline().strip() # .strip() 개행문자 제거\n if('push' in a):\n x,y=a.split(\" \")\n queue.append(y)\n elif(a=='pop'):\n if(len(queue)==0):\n print(-1)\n else:\n print(queue[0])\n queue.pop(0)\n elif(a=='size'):\n print(len(queue))\n elif(a=='front'):\n if(len(queue)==0):\n print(-1)\n else:\n print(queue[0])\n elif(a=='back'):\n if(len(queue)==0):\n print(-1)\n else:\n print(queue[-1])\n elif(a=='empty'):\n if(len(queue)==0):\n print(1)\n else:\n print(0)\n\n","sub_path":"BOJ queue and graph/BOJ 10845.py","file_name":"BOJ 10845.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"405283862","text":"import sys\n\nfrom docopt import docopt\nfrom bluepy.btle import (\n Scanner, Peripheral, AssignedNumbers,\n ADDR_TYPE_RANDOM, UUID, BTLEException\n)\nfrom binascii import hexlify\nfrom tabulate import tabulate\n\n\ndef identity(x):\n return x\n\n\nif sys.version_info >= (3,):\n # In Python 3 we are already dealing with bytes,\n # so just return the original value.\n get_byte = identity\nelse:\n get_byte = ord\n\n# firmware == 2.99.15 (or higher?)\nSWITCHMATE_SERVICE = 'a22bd383-ebdd-49ac-b2e7-40eb55f5d0ab'\n\n\nORIGINAL_STATE_HANDLE = 0x2e\nBRIGHT_STATE_HANDLE = 0x30\n\nORIGINAL_MODEL_STRING_HANDLE = 0x14\n\nSERVICES_AD_TYPE = 0x07\nMANUFACTURER_DATA_AD_TYPE = 0xff\n\n\ndef get_switchmates(scan_entries, mac_address):\n switchmates = []\n for scan_entry in scan_entries:\n service_uuid = scan_entry.getValueText(SERVICES_AD_TYPE)\n is_switchmate = service_uuid == SWITCHMATE_SERVICE\n if not is_switchmate:\n continue\n if mac_address and scan_entry.addr == mac_address:\n return [scan_entry]\n if scan_entry not in switchmates:\n switchmates.append(scan_entry)\n switchmates.sort(key=lambda sw: sw.addr)\n return switchmates\n\n\ndef scan(\n start_msg, process_entry,\n timeout=None, mac_address=None, success_msg=None\n):\n print(start_msg)\n sys.stdout.flush()\n\n scanner = Scanner()\n\n try:\n switchmates = get_switchmates(scanner.scan(timeout), mac_address)\n except BTLEException as ex:\n print(\n 'ERROR: Could not complete scan.',\n 'Try running switchmate with sudo.',\n ex.message\n )\n return\n except OSError as ex:\n print(\n 'ERROR: Could not complete scan.',\n 'Try compiling the bluepy helper.',\n ex\n )\n return\n\n if len(switchmates):\n if success_msg:\n print(success_msg)\n for switchmate in switchmates:\n process_entry(switchmate)\n else:\n print('No Switchmate devices found')\n\n\ndef debug_helper(device):\n output = [['uuid', 'common name', 'handle', 'properties', 'value']]\n for char in device.getCharacteristics():\n if char.supportsRead():\n val = char.read()\n binary = False\n for c in val:\n if get_byte(c) < 32 or get_byte(c) > 126:\n binary = True\n if binary:\n val = hexlify(val)\n output.append([\n str(char.uuid),\n UUID(char.uuid).getCommonName(),\n '{0:x}'.format(char.getHandle()),\n char.propertiesToString(),\n str(val)\n ])\n print(tabulate(output, headers='firstrow'))\n\n\ndef is_original_device(device):\n # The handle for reading the model string on Bright devices is actually\n # different from Original devices, but using getCharacteristics to read\n # the model is much slower.\n #model = device.readCharacteristic(ORIGINAL_MODEL_STRING_HANDLE)\n #print(model)\n #return model == b'Original'\n return True\n\n\ndef get_state_handle(device):\n return ORIGINAL_STATE_HANDLE\n #if is_original_device(device):\n # return ORIGINAL_STATE_HANDLE\n #else:\n # return BRIGHT_STATE_HANDLE\n\n\ndef switch(device, val):\n state_handle = get_state_handle(device)\n #curr_val = device.readCharacteristic(state_handle)\n #if val is None:\n # val = b'\\x01' if curr_val == b'\\x00' else b'\\x00'\n val_num = get_byte(val[0])\n #val_text = ('off', 'on')[val_num]\n #if curr_val != val:\n # device.writeCharacteristic(state_handle, val, True)\n # print('Switched {}!'.format(val_text))\n #else:\n # print('Already {}!'.format(val_text))\n device.writeCharacteristic(state_handle, val, True)\n\ndef print_entry_state(entry, state_handle=None):\n service_data = entry.getValueText(MANUFACTURER_DATA_AD_TYPE)\n print(service_data)\n val = int(service_data[1])\n if val>0: val=1\n print(entry.addr, (\"off\", \"on\")[val])\n\n\ndef print_battery_level(device):\n battery_level = AssignedNumbers.batteryLevel\n level = device.getCharacteristics(uuid=battery_level)[0].read()\n print('Battery level: {}%'.format(ord(level)))\n\n\ndef print_exception(ex):\n if 'disconnected' in ex.message.lower():\n print('ERROR: Device disconnected.')\n else:\n print('ERROR: ' + ex.message)\n\n\nclass Switch(object):\n \"\"\" A switch class for switchmate switches \"\"\"\n \n def __init__(self, mac_address='c1:59:2c:b2:8d:33'):\n \"\"\" return a switch object with the right mac_address\"\"\"\n self.mac_address = mac_address\n try:\n self.device = Peripheral(mac_address, ADDR_TYPE_RANDOM)\n self.state_handle = get_state_handle(self.device)\n self.curr_val = self.device.readCharacteristic(self.state_handle)\n except BTLEException as ex:\n print('ERROR: ' + ex.message)\n except OSError as ex:\n print('ERROR: Failed to connect to device.')\n \n @property\n def status(self,):\n return 'off' if self.curr_val==b'\\x00' else 'on'\n \n def switch(self, state=None):\n \"\"\" Switch the switchmate on or off \n Usage: switch('on')\n \"\"\"\n if state=='on':\n val = b'\\x01'\n elif state=='off':\n val = b'\\x00'\n elif state==None:\n val = b'\\x01' if self.curr_val == b'\\x00' else b'\\x00'\n try:\n self.device.writeCharacteristic(self.state_handle, val, True)\n self.curr_val=val\n except BTLEException as ex:\n print_exception(ex)\n \n # Functions for a cleaner interface\n def turn_on(self):\n self.switch('on')\n \n def turn_off(self):\n self.switch('off')\n \n","sub_path":"lab/devices/switchmate.py","file_name":"switchmate.py","file_ext":"py","file_size_in_byte":5757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416191829","text":"import pandas as pd\nimport plotly.express as px\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input,Output\napp=dash.Dash(__name__)\ndf=pd.read_csv('covidd.csv')\n\n#app layout\n\napp.layout=html.Div([\n html.H1(\"The Covid case in Each country\"),\n dcc.Dropdown(id='my_option',\n options=[{'label':i,'value':i}\n for i in df['Combined_Key'].unique()],\n value='Afghanistan',\n\n ),\n html.H6(\"Confirmed\"),\n html.Div(id='confirmedid'),\n\n html.Br(),\n html.H6(\"Recovered\"),\n html.Div(id='recoveredid'),\n html.Br(),\n html.H6(\"Deaths\"),\n html.Div(id='deathid'),\n html.Br(),\n dcc.Graph(id='incidentrate')\n])\n\n#app callback\n\n@app.callback(\n [Output(component_id='confirmedid',component_property='children'),\n Output(component_id='recoveredid',component_property='children'),\n Output(component_id='deathid',component_property='children'),\n Output(component_id='incidentrate',component_property='figure')],\n [Input(component_id='my_option',component_property='value')]\n)\ndef update_graph(option_slctd):\n filtered_data=df[df['Combined_Key']==option_slctd]\n\n index=filtered_data.index.values\n print(index)\n\n strings=[str(integer) for integer in index]\n a_string = \"\".join(strings)\n keyvalue = int(a_string)\n print(keyvalue)\n\n\n confirmation= filtered_data['Confirmed']\n\n recovery=filtered_data['Recovered']\n death=filtered_data['Deaths']\n named=['Confirmed','Recovered','Deaths']\n\n confi=filtered_data.loc[keyvalue,\"Confirmed\"]\n reco=filtered_data.loc[keyvalue,\"Recovered\"]\n dea=filtered_data.loc[keyvalue,\"Deaths\"]\n valued = [confi, reco, dea]\n\n fig=px.pie(data_frame=filtered_data,names=named,values=valued,hole=0.3,title=\"Covid Data associated with it\")\n\n return confirmation,recovery,death,fig\n\nif __name__ == '__main__':\n app.run_server(debug=True)","sub_path":"Country.py","file_name":"Country.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"401078134","text":"import pandas as pd\r\nimport datetime # To handle dates\r\n#import calendar # To get month\r\n# import statsmodels.formula.api as sm\r\nimport matplotlib.pylab as plt\r\nimport numpy as np\r\nimport seaborn as sns\r\n# import sklearn.metrics as metrics # To get regression metrics\r\n# import scipy as sp\r\n# import time # To do time complexity analysis\r\n# import random\r\nimport copy\r\n# import profile\r\n# import cProfile\r\n# from sklearn.cluster import KMeans # perform clustering operation\r\nfrom datetime import datetime\r\n# from sklearn import metrics\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.tree import DecisionTreeRegressor\r\n# from sklearn.ensemble import AdaBoostRegressor\r\n# from sklearn.ensemble import GradientBoostingRegressor\r\n# from sklearn.svm import SVC, LinearSVC\r\n\r\n# =============================================================================\r\n# pd.set_option('display.max_rows', 500)\r\n# pd.set_option('display.max_columns', 500)\r\n# pd.set_option('display.width', 1000)\r\n# =============================================================================\r\n\r\n# Data preprocessing:\r\n#loading in raw data\r\nfeatures_df = pd.read_csv(\"features.csv\")\r\nstores_df = pd.read_csv(\"stores.csv\")\r\ntrain_df = pd.read_csv(\"train.csv\")\r\ntest_df = pd.read_csv(\"test.csv\")\r\n\r\nprint(features_df.head())\r\nprint(stores_df.head())\r\nprint(train_df)\r\nprint(test_df)\r\n\r\n\r\n\r\n# merging the data\r\n \r\n# =============================================================================\r\n# (train + Store + Feature)\r\n# (test + Stoee + Feature)\r\n# \r\n# =============================================================================\r\n\r\n# =============================================================================\r\n# train_bt = pd.merge(train_df,stores_df) \r\n# train_df = pd.merge(train_bt,features_df)\r\n# \r\n# test_bt = pd.merge(test_df,stores_df)\r\n# test_df= pd.merge(test_bt,features_df)\r\n# \r\n# =============================================================================\r\n# =============================================================================\r\n# print(features_df.head())\r\n# print(features_df.describe())\r\n# \r\n# print(train_df.head())\r\n# print(train_df.describe())\r\n# print(train_df.tail())\r\n# \r\n# =============================================================================\r\n# =============================================================================\r\n# print(test_df.head(2))\r\n# print(test_df.describe())\r\n# \r\n# print(train_df.info())\r\n# =============================================================================\r\n\r\n\r\n\r\n# Creating a custom season dictionary to identify the season in each month\r\nseasons_dict = {\r\n 1:\"Winter\",\r\n 2:\"Winter\",\r\n 3:\"Spring\",\r\n 4:\"Spring\",\r\n 5:\"Spring\",\r\n 6:\"Summer\", \r\n 7:\"Summer\",\r\n 8:\"Summer\",\r\n 9:\"Fall\",\r\n 10:\"Fall\",\r\n 11:\"Fall\",\r\n 12:\"Winter\"\r\n}\r\n\r\ntest_bt = pd.merge(test_df,stores_df)\r\ntest_df= pd.merge(test_bt,features_df)\r\n\r\n# Creating the master dataset ((train + Store + Feature))\r\nmaster_df = train_df.merge(stores_df, on='Store', how='left')\r\nmaster_df = master_df.merge(features_df, on=['Store', 'Date'], how='left')\r\n\r\nd = copy.deepcopy(master_df)\r\n\r\nd1 = d[\"Weekly_Sales\"]\r\n\r\nprint(d[\"Weekly_Sales\"].describe())\r\n\r\nprint(\"Percentile less than 3% provides only negative value : \",d[\"Weekly_Sales\"].quantile(0.003))\r\n\r\n\r\nx = np.concatenate((d1[d[\"Weekly_Sales\"] < 0], d1[d[\"Weekly_Sales\"] > 0]))\r\n\r\nplt.hist(x, density=True)\r\n\r\nplt.xlim([-70496, 200000])\r\nplt.xlabel('Weekly Sales Values')\r\nplt.ylabel('Normalized Sales Values')\r\nplt.title('Normalized distribution of sales values')\r\nplt.show()\r\n\r\nprint(master_df.head())\r\n\r\n# Filling empty markdown columns\r\nmaster_df['MarkDown1'] = master_df['MarkDown1'].fillna(0)\r\nmaster_df['MarkDown2'] = master_df['MarkDown2'].fillna(0)\r\nmaster_df['MarkDown3'] = master_df['MarkDown3'].fillna(0)\r\nmaster_df['MarkDown4'] = master_df['MarkDown4'].fillna(0)\r\nmaster_df['MarkDown5'] = master_df['MarkDown5'].fillna(0)\r\n\r\n# =============================================================================\r\n# # Cleaning holiday columns\r\nmaster_df['isHoliday'] = master_df['IsHoliday_x']\r\nmaster_df = master_df.drop(columns=['IsHoliday_x', 'IsHoliday_y'])\r\nmaster_df['Date'] = pd.to_datetime(master_df['Date'], format='%Y-%m-%d')\r\nmaster_df['Year'] = master_df['Date'].dt.year\r\n\r\n # store vs sales\r\nax= sns.barplot(x=\"Store\", y=\"Weekly_Sales\", data=master_df)\r\nax.set_xticklabels(ax.get_xticklabels(), fontsize=7)\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# sales vs type : year wise\r\nsns.barplot(x=\"Year\", y=\"Weekly_Sales\", hue=\"Type\", data=master_df)\r\n \r\ndf_corr = master_df.corr()\r\nax=df_corr[['Weekly_Sales']].plot(kind='bar')\r\nplt.xlabel('Attribute')\r\nplt.ylabel('Correlation')\r\nplt.title('Correlation of Weekly sales with other variables')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\nsns.heatmap(df_corr)\r\n\r\n\r\ntrain_corr=pd.DataFrame(master_df.corr())\r\n# train_corr.to_excel(writer,'Train_Data Corr',index=True)\r\nprint(train_corr.head())\r\n\r\ntest_corr=pd.DataFrame(test_df.corr())\r\n# train_corr.to_excel(writer,'Train_Data Corr',index=True)\r\nprint(train_corr.head())\r\n\r\n# line graph of store vs sales \r\nmaster_df.plot(kind='line', x='Weekly_Sales', y='Store', alpha=0.5)\r\nplt.show()\r\n\r\n# bar graph of store vs sales\r\nmaster_df['Store'].value_counts(normalize=True).plot(kind = 'bar',fig=(4,5))\r\n\r\n\r\n\r\n# Sales vs Deptartment\r\n\r\nmaster_df.plot(kind='line', x='Dept', y='Weekly_Sales', alpha=1.5,fig=(4,5))\r\nplt.show()\r\n\r\n# Missing Value Treatment\r\nprint(master_df.isnull().sum())\r\nprint(\"*\"*30)\r\nprint(test_df.isnull().sum())\r\n\r\n\r\ntest_df['CPI']=test_df.groupby(['Dept'])['CPI'].transform(lambda x: x.fillna(x.mean()))\r\ntest_df['Unemployment']=test_df.groupby(['Dept'])['Unemployment'].transform(lambda x: x.fillna(x.mean()))\r\n\r\ntest_df=test_df.fillna(0)\r\n\r\n\r\nprint(master_df.isnull().sum())\r\nprint(\"*\"*30)\r\nprint(test_df.isnull().sum())\r\n\r\n# Outlier Treatment\r\nmaster_df.Weekly_Sales=np.where(master_df.Weekly_Sales>100000, 100000,master_df.Weekly_Sales)\r\nmaster_df.Weekly_Sales.plot.hist(bins=25)\r\nplt.show()\r\n\r\nmaster_df.info()\r\n\r\nmaster_df['Date'] = pd.to_datetime(master_df['Date'])\r\ntest_df['Date'] = pd.to_datetime(test_df['Date'])\r\n\r\n\r\n# Extract date features\r\nmaster_df['Date_dayofweek'] =master_df['Date'].dt.dayofweek\r\nmaster_df['Date_month'] =master_df['Date'].dt.month \r\nmaster_df['Date_year'] =master_df['Date'].dt.year\r\nmaster_df['Date_day'] =master_df['Date'].dt.day \r\nmaster_df['IsHoliday'] =master_df['isHoliday'] \r\n\r\ntest_df['Date_dayofweek'] =test_df['Date'].dt.dayofweek \r\ntest_df['Date_month'] =test_df['Date'].dt.month \r\ntest_df['Date_year'] =test_df['Date'].dt.year\r\ntest_df['Date_day'] =test_df['Date'].dt.day\r\n\r\n\r\nprint(master_df.Type.value_counts())\r\nprint(\"*\"*30)\r\nprint(test_df.Type.value_counts())\r\n\r\nprint(train_df.IsHoliday.value_counts())\r\nprint(\"*\"*30)\r\nprint(test_df.IsHoliday.value_counts())\r\n\r\ntrain_test_data = [master_df, test_df]\r\n\r\ntype_mapping = {\"A\": 1, \"B\": 2, \"C\": 3}\r\nfor dataset in train_test_data:\r\n dataset['Type'] = dataset['Type'].map(type_mapping)\r\n \r\n \r\n# Converting Categorical Variable 'IsHoliday' into Numerical Variable \r\n \r\ntype_mapping = {False: 0, True: 1}\r\nfor dataset in train_test_data:\r\n dataset['IsHoliday'] = dataset['IsHoliday'].map(type_mapping)\r\n \r\n \r\n# Creating Extra Holiday Variable.\r\n# If that week comes under extra holiday then 1(=Yes) else 2(=No)\r\n \r\nmaster_df['Super_Bowl'] = np.where((master_df['Date']==datetime(2010, 2, 12)) | (master_df['Date']==datetime(2011, 2, 11)) | (master_df['Date']==datetime(2012, 2, 10)) | (master_df['Date']==datetime(2013, 2, 8)),1,0)\r\nmaster_df['Labour_Day'] = np.where((master_df['Date']==datetime(2010, 9, 10)) | (master_df['Date']==datetime(2011, 9, 9)) | (master_df['Date']==datetime(2012, 9, 7)) | (master_df['Date']==datetime(2013, 9, 6)),1,0)\r\nmaster_df['Thanksgiving'] = np.where((master_df['Date']==datetime(2010, 11, 26)) | (master_df['Date']==datetime(2011, 11, 25)) | (master_df['Date']==datetime(2012, 11, 23)) | (master_df['Date']==datetime(2013, 11, 29)),1,0)\r\nmaster_df['Christmas'] = np.where((master_df['Date']==datetime(2010, 12, 31)) | (master_df['Date']==datetime(2011, 12, 30)) | (master_df['Date']==datetime(2012, 12, 28)) | (master_df['Date']==datetime(2013, 12, 27)),1,0)\r\n#........................................................................\r\ntest_df['Super_Bowl'] = np.where((test_df['Date']==datetime(2010, 2, 12)) | (test_df['Date']==datetime(2011, 2, 11)) | (test_df['Date']==datetime(2012, 2, 10)) | (test_df['Date']==datetime(2013, 2, 8)),1,0)\r\ntest_df['Labour_Day'] = np.where((test_df['Date']==datetime(2010, 9, 10)) | (test_df['Date']==datetime(2011, 9, 9)) | (test_df['Date']==datetime(2012, 9, 7)) | (test_df['Date']==datetime(2013, 9, 6)),1,0)\r\ntest_df['Thanksgiving'] = np.where((test_df['Date']==datetime(2010, 11, 26)) | (test_df['Date']==datetime(2011, 11, 25)) | (test_df['Date']==datetime(2012, 11, 23)) | (test_df['Date']==datetime(2013, 11, 29)),1,0)\r\ntest_df['Christmas'] = np.where((test_df['Date']==datetime(2010, 12, 31)) | (test_df['Date']==datetime(2011, 12, 30)) | (test_df['Date']==datetime(2012, 12, 28)) | (test_df['Date']==datetime(2013, 12, 27)),1,0)\r\n\r\n# Altering the isHoliday value depending on these new holidays...\r\nmaster_df['IsHoliday']=master_df['IsHoliday']|master_df['Super_Bowl']|master_df['Labour_Day']|master_df['Thanksgiving']|master_df['Christmas']\r\ntest_df['IsHoliday']=test_df['IsHoliday']|test_df['Super_Bowl']|test_df['Labour_Day']|test_df['Thanksgiving']|test_df['Christmas']\r\n\r\n\r\nprint(master_df.Christmas.value_counts())\r\nprint(master_df.Super_Bowl.value_counts())\r\nprint(master_df.Thanksgiving.value_counts())\r\nprint(master_df.Labour_Day.value_counts())\r\n\r\nprint(test_df.Christmas.value_counts())\r\nprint(test_df.Super_Bowl.value_counts())\r\nprint(test_df.Thanksgiving.value_counts())\r\nprint(test_df.Labour_Day.value_counts())\r\n\r\n# Since we have Imputed IsHoliday according to Extra holidays..These extra holiday variable has redundant..\r\n# Droping the Extra holiday variables because its redundant..\r\ndp=['Super_Bowl','Labour_Day','Thanksgiving','Christmas']\r\nmaster_df.drop(dp,axis=1,inplace=True)\r\ntest_df.drop(dp,axis=1,inplace=True)\r\n\r\nprint(master_df.info())\r\nmaster_df.head(2)\r\n# Since we have imputed markdown variables therefore we will not be removing the all markdown variables.\r\n# -Removing MarkDown5 because its Highly Skewed\r\npd.set_option('display.max_columns', 10)\r\nfeatures_drop=['Unemployment','CPI','MarkDown5','isHoliday','Year']\r\nfeatures_drop_test=['Unemployment','CPI','MarkDown5']\r\nmaster_df=master_df.drop(features_drop, axis=1)\r\ntest_df=test_df.drop(features_drop_test, axis=1)\r\n\r\nprint(master_df.head(2))\r\nprint(test_df.head(2))\r\n\r\n# Classification & Accuracy\r\n# Define training and testing set\r\n\r\n# Converting all float var int integer..\r\nfor var in master_df:\r\n if master_df[var].dtypes == float:\r\n master_df[var]=master_df[var].astype(int)\r\n \r\nfor var in test_df:\r\n if test_df[var].dtypes == float:\r\n test_df[var]=test_df[var].astype(int)\r\n\r\n#### train X= Exery thing except Weekly_Sales\r\nmaster_df_X=master_df.drop(['Weekly_Sales','Date'], axis=1)\r\n\r\n#### train Y= Only Weekly_Sales \r\nmaster_df_y=master_df['Weekly_Sales'] \r\ntest_df_X=test_df.drop('Date',axis=1).copy()\r\n\r\nprint(master_df_X.shape, master_df_y.shape, test_df_X.shape)\r\n\r\n# Building models & comparing their RMSE values\r\n# 1.Linear Regression\r\n# print(master_df_X)\r\n## Methood 1..\r\nclf = LinearRegression()\r\nclf.fit(master_df_X, master_df_y)\r\ny_pred_linear=clf.predict(test_df_X)\r\nacc_linear=round( clf.score(master_df_X, master_df_y) * 100, 2)\r\nprint ('scorbe:'+str(acc_linear) + ' percent')\r\n\r\n\r\n# 2. Random Forest\r\nclf = RandomForestRegressor(n_estimators=100)\r\nclf.fit(master_df_X, master_df_y)\r\ny_pred_rf=clf.predict(test_df_X)\r\nacc_rf= round(clf.score(master_df_X, master_df_y) * 100, 2)\r\nprint (\"Accuracy: %i %% \\n\"%acc_rf)\r\n\r\n# 3. Decision tree\r\nclf=DecisionTreeRegressor()\r\nclf.fit(master_df_X, master_df_y)\r\ny_pred_dt= clf.predict(test_df_X)\r\nacc_dt = round( clf.score(master_df_X, master_df_y) * 100, 2)\r\nprint (str(acc_dt) + ' percent')\r\n\r\n# =============================================================================\r\n# \r\n# Comparing Models\r\n# Let's compare the accuracy score of all the regression models used above.\r\n# =============================================================================\r\n\r\n\r\nmodels = pd.DataFrame({\r\n 'Model': ['Linear Regression','Random Forest','Decision Tree'],\r\n \r\n 'Score': [acc_linear, acc_rf,acc_dt]\r\n })\r\n\r\nprint(models.sort_values(by='Score', ascending=False))\r\n\r\n# Prediction value using Random Forest model..\r\nsubmission = pd.DataFrame({\r\n \"Store_Dept_Date\": test_df.Store.astype(str)+'_'+test_df.Dept.astype(str)+'_'+test_df.Date.astype(str),\r\n \"Weekly_Sales\": y_pred_rf\r\n })\r\n\r\n# =============================================================================\r\n# submission.to_csv('weekly_sales predicted.csv', index=False)\r\n# submission.to_excel(pd.writer,'Weekly_sales Pred',index=False)\r\n# \r\n# =============================================================================\r\nprint(submission.head()) ","sub_path":"Walmart.py","file_name":"Walmart.py","file_ext":"py","file_size_in_byte":13356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"203053527","text":"\n\nimport collections\n\ndef group_items(items, key, reverse_sort=True):\n groups = collections.defaultdict(list)\n for item in items:\n groups[key(item)].append(item)\n \n sorted_groups = sorted(groups.items(), key=lambda groups: len(groups[1]), reverse=reverse_sort)\n \n return [(group, items_list) for group, items_list in sorted_groups]\n\n\ndef print_items_count(items, item_key):\n item_group = group_items(items, key=item_key)\n\n for item, items_list in item_group:\n print(\"{}: {}\".format(item, len(items_list)))\n ","sub_path":"govhack2018/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"582744747","text":"import numpy as np\nimport pyqtgraph as pg\nimport time\n\nfrom PyQt5.Qsci import QsciScintilla, QsciLexerPython\n\nfrom spyre import Spyrelet, Task, Element\nfrom spyre.widgets.task import TaskWidget\nfrom spyre.plotting import LinePlotWidget\nfrom spyre.widgets.rangespace import Rangespace\nfrom spyre.widgets.param_widget import ParamWidget\nfrom spyre.widgets.repository_widget import RepositoryWidget\n\nfrom lantz import Q_\n\nfrom lantz.drivers.keysight import Keysight_33622A\nfrom lantz.drivers.tektronix import TDS2024C\n\nclass laser_calibration(Spyrelet):\n\n requires = {\n 'fungen': Keysight_33622A,\n 'osc': TDS2024C\n }\n\n @Task()\n def calibrate_amplitude(self, **kwargs):\n self.dataset.clear()\n params = self.sweep_parameters.widget.get()\n chn = params['channel']\n current = params['start']\n while current <= params['stop']:\n self.fungen.offset[chn] = current\n print('Changed to {}'.format(current))\n\n time.sleep(params['wait'])\n\n values = {\n 'x': self.fungen.offset[chn],\n 'y': self.osc.measure_mean(chn),\n }\n self.calibrate_amplitude.acquire(values)\n\n #time.sleep(params['wait'])\n current += params['step']\n\n @calibrate_amplitude.initializer\n def initialize(self):\n params = self.sweep_parameters.widget.get()\n chn = params['channel']\n self.fungen.waveform[chn] = 'DC'\n self.fungen.output[chn] = 'ON'\n return\n\n @calibrate_amplitude.finalizer\n def finalize(self):\n params = self.sweep_parameters.widget.get()\n chn = params['channel']\n self.fungen.output[chn] = 'OFF'\n return\n\n @Task()\n def calibrate_frequency(self, **kwargs):\n self.dataset.clear()\n params = self.sweep_parameters.widget.get()\n chn = params['channel']\n current = params['start']\n while current <= params['stop']:\n self.fungen.offset[chn] = current\n print('Changed to {}'.format(current))\n\n time.sleep(params['wait'])\n\n values = {\n 'x': self.fungen.offset[chn],\n 'y': self.osc.measure_frequency(chn),\n }\n self.calibrate_frequency.acquire(values)\n\n #time.sleep(params['wait'])\n current += params['step']\n\n @calibrate_frequency.initializer\n def initialize(self):\n params = self.sweep_parameters.widget.get()\n chn = params['channel']\n self.fungen.waveform[chn] = 'DC'\n self.fungen.output[chn] = 'ON'\n return\n\n @calibrate_frequency.finalizer\n def finalize(self):\n params = self.sweep_parameters.widget.get()\n chn = params['channel']\n self.fungen.output[chn] = 'OFF'\n return\n\n @Element(name='Sweep parameters')\n def sweep_parameters(self):\n params = [\n ('channel', {'type': dict, 'items': {'1': 1, '2': 2}}),\n ('start', {'type': float, 'default': 0, 'units':'V'}),\n ('stop', {'type': float, 'default': 5, 'units':'V'}),\n ('step', {'type': float, 'default': 1, 'units': 'V', 'positive': True}),\n ('wait', {'type': float, 'default': 1, 'nonnegative': True})\n ]\n w = ParamWidget(params)\n return w\n\n @Element(name='Calibration')\n def latest(self):\n p = LinePlotWidget()\n p.plot('Amplitude')\n p.plot('Frequency')\n return p\n\n @latest.on(calibrate_amplitude.acquired)\n def latest_update(self, ev):\n w = ev.widget\n data = self.data\n w.set('Amplitude', xs=data.x, ys=data.y)\n return\n\n @latest.on(calibrate_frequency.acquired)\n def latest_update(self, ev):\n w = ev.widget\n data = self.data\n w.set('Frequency', xs=data.x, ys=data.y)\n return\n \n @Element()\n def save(self):\n w = RepositoryWidget(self)\n return w","sub_path":"laser_calibration.py","file_name":"laser_calibration.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"179398710","text":"import time\nfrom concurrent.futures import ThreadPoolExecutor\nfrom typing import Any, Iterable, List, Tuple\n\nimport pandas as pd\n\nfrom snowshu.adapters import BaseSQLAdapter\nfrom snowshu.configs import MAX_ALLOWED_DATABASES, MAX_ALLOWED_ROWS\nfrom snowshu.core.models import DataType, Relation\nfrom snowshu.core.models.relation import at_least_one_full_pattern_match\nfrom snowshu.core.utils import correct_case\nfrom snowshu.logger import Logger, duration\n\nlogger = Logger().logger\n\n\nclass BaseSourceAdapter(BaseSQLAdapter):\n name = ''\n MAX_ALLOWED_DATABASES = MAX_ALLOWED_DATABASES\n MAX_ALLOWED_ROWS = MAX_ALLOWED_ROWS\n DEFAULT_CASE = 'lower'\n SUPPORTS_CROSS_DATABASE = False\n SUPPORTED_FUNCTIONS = set()\n\n class _DatabaseObject:\n \"\"\" An internal class to allow for preserving name casing when needed\n\n Ex: When querying information_schema, an object name may need to\n passed as a varchar for a case-sensitive match\n\n Args:\n case_sensitive_name (str): The name of the object in the original case\n full_relation (Relation): An object that represents the database object\n to allow for convenient pattern matching\n \"\"\"\n\n def __init__(self, case_sensitive_name: str, full_relation: Relation):\n self.case_sensitive_name = case_sensitive_name\n self.full_relation = full_relation\n\n def __init__(self, preserve_case: bool = False):\n self.preserve_case = preserve_case\n super().__init__()\n for attr in ('DATA_TYPE_MAPPINGS', 'SUPPORTED_SAMPLE_METHODS',):\n if not hasattr(self, attr):\n raise NotImplementedError(\n f'Source adapter requires attribute f{attr} but was not set.')\n\n def build_catalog(self, patterns: Iterable[dict], thread_workers: int = 1) -> Tuple[Relation]:\n \"\"\" This function is expected to return all of the relations that satisfy the filters \n\n Args:\n patterns (Iterable[dict]): Filter dictionaries to apply to the source databases\n requires \"database\", \"schema\", and \"name\" keys\n thread_workers (int): The number of workers to use when building the catalog\n\n Returns:\n Tuple[Relation]: All of the relations from the source adapter pass the filters\n \"\"\"\n filtered_schemas = self._get_filtered_schemas(patterns)\n\n def accumulate_relations(schema_obj: BaseSourceAdapter._DatabaseObject, accumulator):\n try:\n relations = self._get_relations_from_database(schema_obj)\n accumulator += [\n r for r in relations if at_least_one_full_pattern_match(r, patterns)]\n except Exception as exc:\n logger.critical(exc)\n raise exc\n\n # get all columns for filtered db/schema\n catalog = []\n logger.info('Building filtered catalog...')\n start_time = time.time()\n with ThreadPoolExecutor(max_workers=thread_workers) as executor:\n for f_schema in filtered_schemas:\n executor.submit(accumulate_relations, f_schema, catalog)\n\n logger.info(f'Done building catalog. Found a total of {len(catalog)} relations '\n f'from the source in {duration(start_time)}.')\n return tuple(catalog)\n\n def _get_all_databases(self) -> List[str]:\n raise NotImplementedError()\n\n def _get_all_schemas(self, database: str) -> List[str]:\n \"\"\" Returns the raw names of the schemas in the given database (raw case) \"\"\"\n raise NotImplementedError()\n\n def _get_filtered_schemas(self, filters: Iterable[dict]) -> List[_DatabaseObject]:\n \"\"\" Get all of the filtered schema structures based on the provided filters. \"\"\"\n db_filters = []\n schema_filters = []\n for _filter in filters:\n new_filter = _filter.copy()\n new_filter[\"name\"] = \".*\"\n if schema_filters.count(new_filter) == 0:\n schema_filters.append(new_filter)\n for s_filter in schema_filters:\n new_filter = s_filter.copy()\n new_filter[\"schema\"] = \".*\"\n if db_filters.count(new_filter) == 0:\n db_filters.append(new_filter)\n\n databases = self._get_all_databases()\n database_relations = [Relation(self._correct_case(\n database), \"\", \"\", None, None) for database in databases]\n filtered_databases = [\n rel for rel in database_relations if at_least_one_full_pattern_match(rel, db_filters)]\n\n # get all schemas in all databases\n filtered_schemas = []\n for db_rel in filtered_databases:\n schemas = self._get_all_schemas(\n database=db_rel.quoted(db_rel.database))\n schema_objs = [BaseSourceAdapter._DatabaseObject(schema,\n Relation(db_rel.database,\n self._correct_case(\n schema),\n \"\", None, None))\n for schema in schemas]\n filtered_schemas += [d for d in schema_objs if at_least_one_full_pattern_match(\n d.full_relation, schema_filters)]\n\n return filtered_schemas\n\n def _get_relations_from_database(self, schema_obj: _DatabaseObject):\n raise NotImplementedError()\n\n def _safe_query(self, query_sql: str) -> pd.DataFrame:\n \"\"\"runs the query and closes the connection.\"\"\"\n logger.debug('Beginning query execution...')\n start = time.time()\n conn = None\n cursor = None\n try:\n conn = self.get_connection()\n cursor = conn.connect()\n # we make the STRONG assumption that all responses will be small enough\n # to live in-memory (because sampling engine).\n # further safety added by the constraints in snowshu.configs\n # this allows the connection to return to the pool\n logger.debug(f'Executed query in {time.time()-start} seconds.')\n frame = pd.read_sql_query(query_sql, conn)\n logger.debug(\"Dataframe datatypes: %s\", str(frame.dtypes).replace('\\n', ' | '))\n if len(frame) > 0:\n for col in frame.columns:\n logger.debug(\"Pandas loaded element 0 of column %s as %s\", col, type(frame[col][0]))\n else:\n logger.debug(\"Dataframe is empty\")\n finally:\n if cursor:\n cursor.close()\n if conn:\n conn.dispose()\n return frame\n\n def _correct_case(self, val: str) -> str:\n \"\"\"The base case correction method for a source adapter.\n \"\"\"\n return val if self.preserve_case else correct_case(val, self.DEFAULT_CASE == 'upper')\n\n def _count_query(self, query: str) -> int:\n \"\"\"wraps any query in a COUNT statement, returns that integer.\"\"\"\n raise NotImplementedError()\n\n def check_count_and_query(self, query: str, max_count: int, unsampled: bool) -> pd.DataFrame:\n \"\"\"checks the count, if count passes returns results as a dataframe.\"\"\"\n raise NotImplementedError()\n\n def scalar_query(self, query: str) -> Any:\n \"\"\"Returns only a single value.\n\n When the database is expected to return a single row with a single column, \n this method will return the raw value from that cell. Will throw a :class:`TooManyRecords\n ` exception.\n\n Args:\n query: the query to execute.\n\n Returns:\n the raw value from cell [0][0]\n \"\"\"\n return self.check_count_and_query(query, 1, False).iloc[0][0]\n\n def _get_data_type(self, source_type: str) -> DataType:\n try:\n return self.DATA_TYPE_MAPPINGS[source_type.lower()]\n except KeyError as err:\n logger.error(\n '%s adapter does not support data type %s.', self.CLASSNAME, source_type)\n raise err\n","sub_path":"snowshu/adapters/source_adapters/base_source_adapter.py","file_name":"base_source_adapter.py","file_ext":"py","file_size_in_byte":8241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"371904515","text":"#!/usr/bin/env python\n# coding:utf-8\nimport _env # noqa\nfrom _route import route\nfrom _base import View, LoginView\nfrom app.pspm.model.role import Role\nfrom solo.config import HOST\nfrom app.web.model.session import Session\n\n\n@route('/')\nclass Index(View):\n def get(self):\n if self.get_cookie('S'):\n self.redirect('/html/pspm/base.html#/index/report')\n else:\n self.redirect('/html/pspm/base.html')\n\n\n@route('/logout')\nclass Logout(View):\n def get(self):\n if self.current_user_id:\n self.clear_cookie('S', domain=\".\" + HOST)\n Session.rm(self.current_user_id)\n self.redirect('/html/pspm/base.html')\n","sub_path":"pspm/view/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"239113319","text":"#from writer import *\nfrom inputs import *\nfrom numpy import linalg as LA\n\nimport pickle\nimport copy\nimport random\n#import matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\nnp.set_printoptions(suppress = True)\nnp.set_printoptions(precision = 6)\n\ndef plot_graph(G, filename):\n plt.figure()\n plt.title(\"Nodes: \" + str(len(G.nodes())) + \" Edges: \" + str(len(G.edges())))\n nx.draw(G, with_labels=True)\n # plt.xlabel('Degree')\n # plt.ylabel('Number of nodes')\n plt.draw()\n plt.savefig(filename + \"_\" + str(len(G.nodes())) + \".png\")\n plt.close()\n\ndef plot_deg_dist(G, filename):\n md = max([G.degree(u) for u in G.nodes()])\n\n Y = [0.0 for i in range(md + 1)]\n X = [i for i in range(md + 1)]\n\n for u in G.nodes():\n Y[G.degree(u)] += 1\n\n plt.figure()\n plt.plot(X, Y)\n plt.savefig(filename + '_' + str(len(G.nodes())) + \".png\")\n plt.close()\n\ndef supplement(GBD,G2,t1_G2):\n D = list(set(G2.nodes()) - set(GBD.nodes()))\n GBD.add_nodes_from(D)\n D.extend(nx.isolates(GBD))\n print(\"Isolated nodes:\", len(D))\n\n for u in D:\n for v in t1_G2:\n if nx.has_path(G2,u,v):\n p = nx.shortest_path(G2,source = u,target = v)\n for i in range(1,len(p)):\n GBD.add_edge(p[i - 1],p[i])\n break\n return GBD\n\ndef prep(_hG,_sG,_nG,hD,sD,nD,sim,mNO):\n\n hFG = []\n sFG = []\n nFG = []\n\n hFD = []\n sFD = []\n nFD = []\n\n outer = False\n mapped = 0\n sim = np.array(sim)\n for i in range(len(sim)):\n for j in range(len(sim)):\n\n pos = np.argwhere(np.amax(sim) == sim)\n #print pos,mapped\n #input('hi')\n uG = pos[0][0]\n uD = pos[0][1]\n flag = 0\n if mNO[uG] in _hG and uD in hD:\n hFG.append(mNO[uG])\n hFD.append(uD)\n flag = 1\n\n elif mNO[uG] in _sG and uD in sD:\n sFG.append(mNO[uG])\n sFD.append(uD)\n flag = 1\n\n elif mNO[uG] in _nG and uD in nD:\n nFG.append(mNO[uG])\n nFD.append(uD)\n flag = 1\n\n if flag == 1:\n sim[uG,:] = [-1.0 for i in range(len(sim))]\n sim[:,uD] = [-1.0 for i in range(len(sim))]\n\n mapped += 1\n\n sim[uG,uD] = -1.0\n\n if mapped >= len(sim):\n outer = True\n break\n\n if outer:\n break\n return hFG,sFG,nFG,hFD,sFD,nFD\n\n\ndef check(A,B,e):\n\n for eachA in range(len(A)):\n for eachB in range(len(A[0])):\n if abs(A[eachA][eachB] - B[eachA][eachB]) > e:\n return False\n\n return True\n\n\ndef similarity(G1,G2):\n\n #G1 = nx.gnp_random_graph(10,p = 0.4,directed = True)\n #G2 = nx.gnp_random_graph(10,p = 0.3,directed = True)\n\n #print (len(G1))\n #print (len(G2))\n\n inG1 = [G1.predecessors(u) for u in G1.nodes()]\n inG2 = [G2.predecessors(u) for u in G2.nodes()]\n\n outG1 = [G1.successors(u) for u in G1.nodes()]\n outG2 = [G2.successors(u) for u in G2.nodes()]\n\n #print (inG1)\n #print (inG2)\n #print (outG1)\n #print (outG2)\n\n oS = [[-1.0 for i in range(len(G2))] for j in range(len(G1))]\n S = [[0.1 for i in range(len(G2))] for j in range(len(G1))]\n #print (S)\n e = 0.00001\n Iterate = 10000\n counter = 0\n\n while(counter < Iterate):\n\n #print (counter)\n _S = [[0.0 for i in range(len(G2))] for j in range(len(G1))]\n for i in G1.nodes():\n\n for j in G2.nodes():\n if len(outG1[i]) == 0 or len(outG2[j]) == 0:\n continue\n\n for p in outG1[i]:\n for q in outG2[j]:\n _S[i][j] += S[p][q]\n\n for i in G1.nodes():\n for j in G2.nodes():\n\n if len(inG1[i]) == 0 or len(inG2[j]) == 0:\n continue\n\n for p in inG1[i]:\n for q in inG2[j]:\n _S[i][j] += S[p][q]\n\n\n #f = lambda x: x\n #sq = np.sum([[f(_S[u][v]) for v in range(len(_S[0]))] for u in range(len(_S))])\n\n PSD = B = np.dot(_S,np.transpose(_S))\n w,v = LA.eig(PSD)\n sq = math.sqrt(max(w))\n\n _S = [[float(_S[j][i])/float(sq) for i in range(len(G2))] for j in range(len(G1))]\n\n if check(S,_S,e) or check(oS,_S,e):\n return S\n\n oS = copy.copy(S)\n S = copy.copy(_S)\n #print (np.array(S))\n counter += 1\n\ndef dist(u, v):\n d = 0.0\n for i in range(len(u)):\n d = d + math.pow((u[i] - v[i]), 2)\n\n return math.sqrt(d)\n\ndef mapping(hD,sD,nD,hG,sG,nG):\n\n #One to one mapping: DRN --> GRN\n m = {}\n for i in range(len(hD)):\n m[hD[i]] = hG[i]\n\n for i in range(len(sD)):\n m[sD[i]] = sG[i]\n\n for i in range(len(nD)):\n m[nD[i]] = nG[i]\n\n return m\n\ndef create(n,Coor,TRan):\n\n G = nx.DiGraph()\n G.add_nodes_from([i for i in range(n)])\n for u in G.nodes():\n for v in G.nodes():\n\n if u == v:\n continue\n\n if dist(Coor[u], Coor[v]) <= TRan:\n #r = random.randint(0, 2)\n #if r == 0:\n #G.add_edge(u, v)\n #if r == 1:\n #G.add_edge(v, u)\n #else:\n G.add_edge(u, v)\n G.add_edge(v, u)\n\n\n\n return G\n\ndef place(Xlim, Ylim, TRan, N):\n\n G = nx.DiGraph()\n G.add_nodes_from([u for u in range(N)])\n\n Coor = []\n\n for i in range(N):\n x = random.uniform(0, Xlim)\n y = random.uniform(0, Ylim)\n\n Coor.append([x, y])\n\n print (Coor)\n #plt.scatter([pt[0] for pt in Coor], [pt[1] for pt in Coor], s=3, c='blue')\n #plt.show()\n\n G = create(N,Coor,TRan)\n\n return G, Coor\n\ndef correctness(gG,test,m):\n#Checking Correctness\n\n for e in test.edges():\n try:\n if not nx.has_path(gG,m[e[0]],m[e[1]]):\n return False\n except:\n continue\n\n return True\n\ndef mapToGRN(rG,gD,hG,sG,nG,hD,sD,nD):\n\n mgD = nx.DiGraph()\n mgD.add_nodes_from(gD.nodes())\n for e in gD.edges():\n\n try:\n if e[0] in hD and e[1] in sD:\n u = hD.index(e[0])\n v = sD.index(e[1])\n if nx.has_path(rG,hG[u],sG[v]):\n mgD.add_edge(e[0],e[1])\n\n elif e[0] in hD and e[1] in nD:\n u = hD.index(e[0])\n v = nD.index(e[1])\n if nx.has_path(rG,hG[u],nG[v]):\n mgD.add_edge(e[0],e[1])\n\n\n elif e[0] in sD and e[1] in sD:\n if e[0] == e[1]:\n continue\n\n u = sD.index(e[0])\n v = sD.index(e[1])\n if nx.has_path(rG,sG[u],sG[v]):\n mgD.add_edge(e[0],e[1])\n\n\n elif e[0] in sD and e[1] in nD:\n u = sD.index(e[0])\n v = nD.index(e[1])\n if nx.has_path(rG,sG[u],nG[v]):\n mgD.add_edge(e[0],e[1])\n\n except:\n continue\n\n return mgD\n\ndef refGRN(G,rH,rS,rN,hCount,sCount,nCount,kh,kn,mC):\n\n rG = nx.DiGraph()\n\n rG.add_nodes_from(rS[:sCount])\n #print \"This:\",[mC[i] for i in rS[:sCount]]\n hubFlag = False\n\n toBeMapped = rH + rN\n mapCount = 0\n\n for node in toBeMapped:\n\n count = 0\n for exist in rG.nodes():\n\n if not hubFlag:\n if G.has_edge(node, exist) or G.has_edge(exist, node):\n count += 1\n elif G.has_edge(exist, node):\n count += 1\n\n if not hubFlag and count >= kh:\n break\n if count >= kn:\n break\n if not hubFlag and count >= kh:\n rG.add_node(node)\n mapCount += 1\n elif count >= kn:\n rG.add_node(node)\n mapCount += 1\n\n #print mapCount\n\n if mapCount >= hCount and not hubFlag:\n toBeMapped = toBeMapped[len(rH):]\n mapCount = 0\n hubFlag = True\n continue\n\n elif mapCount >= nCount and hubFlag:\n break\n\n for e in G.edges():\n if e[0] in rG.nodes() and e[1] in rG.nodes():\n rG.add_edge(e[0],e[1])\n\n return rG\n\ndef reassign(r,hD,sD,nD,gD,Coor,Xlim,Ylim):\n\n #print \"gD:\",gD\n for i in range(int(r)):\n\n u = random.choice(nD)\n\n x = random.uniform(0,Xlim)\n y = random.uniform(0,Ylim)\n Coor[u] = (x,y)\n\n\n return hD,sD,nD,Coor\n\ndef saver(fileG,fileR,mgD,gD,m,cnt,Coor):\n\n\n for u in gD.nodes():\n\n s = str(cnt * 3600) + ' ' + str(u) + ' ' + str(Coor[u][0]) + ' ' + str(Coor[u][1])\n with open(fileR, \"a\") as myfile:\n myfile.write(s)\n myfile.write('\\n')\n\n for u in mgD.nodes():\n s = str(cnt * 3600) + ' ' + str(u) + ' ' + str(Coor[u][0]) + ' ' + str(Coor[u][1])\n with open(fileG, \"a\") as myfile:\n myfile.write(s)\n myfile.write('\\n')\n\ndef findSinks(G,D,K,t,gSink,dSink):\n\n\n allcontenders = [G.out_degree(u) for u in G.nodes()]\n contenders = sorted(range(len(allcontenders)), key = lambda x: allcontenders[x])\n contenders = contenders[- int(K):]\n contenders = [allcontenders[each] for each in contenders]\n print (contenders)\n\n with open(gSink, \"a\") as myfile:\n myfile.write(str(contenders) + '\\n')\n\n\n contenders = [D.out_degree(u) for u in D.nodes()]\n contenders = sorted(range(len(contenders)), key = lambda x: contenders[x])\n contenders = contenders[- int(K):]\n\n #print contenders\n\n with open(dSink, \"a\") as myfile:\n myfile.write(str(contenders) + '\\n')\n\n\ndef mCount(mList,G,hG, sG, nG):\n\n '''\n G = G.to_undirected()\n mList = []\n for i in G.nodes():\n #print (i)\n for j in G.nodes():\n if j <= i:\n continue\n for k in G.nodes():\n if k <= j:\n continue\n\n if G.has_edge(i,j) and G.has_edge(j,k) and G.has_edge(i,k):\n mList.append([i,j,k])\n\n\n '''\n C = [0.0 for i in range(len(G))]\n for motif in mList:\n for node in motif:\n C[node] += 1.0\n\n rankList = sorted(range(len(C)), key = lambda x: C[x])[::-1]\n\n rH = [i for i in rankList if i in hG]\n rS = [i for i in rankList if i in sG]\n rN = [i for i in rankList if i in nG]\n\n return C,rankList,rH,rS,rN\n\ndef tiers(G):\n\n hub = []\n sub = []\n non = []\n for i in G.nodes():\n\n if G.out_degree(i) == 0:\n non.append(i)\n elif G.in_degree(i) == 0:\n hub.append(i)\n else:\n sub.append(i)\n\n return hub,sub,non\n\ndef place(Xlim, Ylim, TRan, N):\n\n G = nx.DiGraph()\n G.add_nodes_from([u for u in range(N)])\n\n Coor = []\n\n for i in range(N):\n x = random.uniform(0, Xlim)\n y = random.uniform(0, Ylim)\n\n Coor.append([x, y])\n\n #print Coor\n #plt.scatter([pt[0] for pt in Coor], [pt[1] for pt in Coor], s=3, c='blue')\n #plt.show()\n\n G = create(N,Coor,TRan)\n\n return G, Coor\n\n\n#Main starts here\nisFirst = True\nhD = []\nsD = []\nnD = []\ncnt = -1\n\n#fileR = str(ND) + 'R.txt'\n#fileG = str(ND) + 'G.txt'\n\nyeast_directory = \"../Yeast_Data/\"\ndata_directory = '../Bhaktapur/Data/'\ngG = nx.read_gml(yeast_directory + 'Yeast_Ordered.gml')\ngG = nx.convert_node_labels_to_integers(gG)\n#print \"Number of nodes in GRN graph:\",len(gG)\n#print \"Number of edges in GRN graph:\",len(gG.edges())\nhG , sG, nG = tiers(gG)\n#print len(nG),len(sG),len(hG)\n\n\n#Count motif list and motif centrality in GRN graph gG\nmList = pickle.load( open(yeast_directory + \"Motif_Yeast.p\", \"rb\" ) )\n#mList = []\nmC,rmC,hG,sG,nG = mCount(mList,gG,hG,sG,nG)\n#print (mList)\n\n#pickle.dump(mList, open( \"mList.p\", \"wb\" ))\nprint (len(hG))\n\n'''\ngD,coor = place(Xlim,Ylim,TRan,ND)\n\n# Distribution of hub, sub and non nodes in DRN graph gD\nhCount = int(hR * float(ND))\nsCount = int(sR * float(ND))\nnCount = ND - hCount - sCount\n\n# List of hub, sub and non nodes in DRN graph gD and GRN graph gG\nhD = [i for i in range(hCount)]\nsD = [(i + hCount) for i in range(sCount)]\nnD = [(i + hCount + sCount) for i in range(nCount)]\n'''\n\ngD = nx.read_gml('../Bhaktapur/Orig_NepalDRN_0.gml')\ncoor = pickle.load(open('../Bhaktapur/Data/CC_locs.p', 'rb'))\ncoor.append(pickle.load(open('../Bhaktapur/Data/PoI_locs.p', 'rb')))\ncoor.append(pickle.load(open('../Bhaktapur/Data/Vol_locs.p', 'rb')))\ncoor.append(pickle.load(open('../Bhaktapur/Data/S_locs.p', 'rb')))\n\n#plot_deg_dist(gD, 'Plots/Orig_NepalDRN_degree')\n#plot_graph(gD, \"Plots/Orig_NepalDRN\")\n\nprint(\"Number of nodes in DRN graph:\", len(gD))\nprint(\"Number of edges in DRN graph:\", len(gD.edges()))\nprint(\"Density: \", (float(len(gD.edges()))/ (len(gD) * (len(gD) - 1))))\n\nhD = pickle.load(open(data_directory + \"HO.p\", \"rb\" ))\nsD = pickle.load(open(data_directory + \"SO.p\", \"rb\" ))\nnD = pickle.load(open(data_directory + \"NO.p\", \"rb\" ))\n\nhCount = len(hD)\nsCount = len(sD)\nnCount = len(nD)\n\nprint(\"hD\", hD, sD)\n\n# Find reference GRN\nrG = refGRN(gG, hG, sG, nG, hCount, sCount, nCount, kh, kn, mC)\n#print \"Number of nodes in Reference GRN graph:\", len(rG)\n#print \"Number of edges in Reference GRN graph:\", len(rG.edges())\n\n_rG = nx.convert_node_labels_to_integers(rG, first_label=0, label_attribute='old_label')\n\nmNO = {}\nmON = {}\nfor u in _rG.nodes():\n v = _rG.node[u]['old_label']\n mNO[u] = v\n mON[v] = u\n\nsim = similarity(_rG, gD)\n\n_hG, _sG, _nG, hD, sD, nD = prep(hG[:hCount], sG[:sCount], nG[:nCount], hD, sD, nD, sim, mNO)\n\nmgD = mapToGRN(rG, gD, _hG, _sG, _nG, hD, sD, nD)\n\nprint(\"Number of nodes in BIO-DRN graph:\", len(mgD))\nprint(\"Number of edges in BIO-DRN graph:\", len(mgD.edges()))\nprint(\"Is Bio-DRN connected?\", nx.is_connected(mgD.to_undirected()))\n#print(\"Largest connected component\", len(list(nx.connected_component_subgraphs(mgD.to_undirected()))))\n\n#plot_deg_dist(mgD, 'Plots/notFinal_bio_degree')\n#plot_graph(mgD, \"Plots/notFinal_bio\")\n\nmgD = supplement(mgD, gD, hD)\nprint (\"FINAL NODE COUNT:\", len(mgD))\nprint (\"FINAL EDGE COUNT:\", len(mgD.edges()))\nprint(\"Is Bio-DRN connected after supplementary step?\", nx.is_connected(mgD.to_undirected()))\n\n#print(mgD.nodes())\n\n#plot_deg_dist(mgD, 'Plots/bio_final_degree')\n#plot_graph(mgD, \"Plots/bio_final\")\n\n'''\nwhile(True):\n\n if cnt >= iter:\n break\n\n cnt += 1\n\n if isFirst:\n gD,coor = place(Xlim,Ylim,TRan,ND)\n isFirst = False\n\n else:\n hD,sD,nD,coor = reassign(r,hD,sD,nD,gD,coor,Xlim,Ylim)\n gD = create(ND,coor,TRan)\n\n\n #gD = nx.erdos_renyi_graph(ND, p, seed=None, directed = True)\n #gD = nx.convert_node_labels_to_integers(gD,first_label = 0)\n print \"Number of nodes in DRN graph:\",len(gD)\n print \"Number of edges in DRN graph:\",len(gD.edges())\n\n #Distribution of hub, sub and non nodes in DRN graph gD\n hCount = int(hR * float(ND))\n sCount = int(sR * float(ND))\n nCount = ND - hCount - sCount\n\n #List of hub, sub and non nodes in DRN graph gD and GRN graph gG\n hD = [i for i in range(hCount)]\n sD = [(i + hCount) for i in range(sCount)]\n nD = [(i + hCount + sCount) for i in range(nCount)]\n #print \"Number of hub, sub and non nodes in DRN graph gD\",len(hD),len(sD),len(nD)\n #print \"Number of hub, sub and non nodes in GRN graph gD\",len(hG),len(sG), len(nG)\n\n #Find reference GRN\n rG = refGRN(gG,hG,sG,nG,hCount,sCount,nCount,kh,kn,mC)\n print \"Number of nodes in Reference GRN graph:\",len(rG)\n print \"Number of edges in Reference GRN graph:\",len(rG.edges())\n\n\n _rG = nx.convert_node_labels_to_integers(rG,first_label = 0,label_attribute = 'old_label')\n\n mNO = {}\n mON = {}\n for u in _rG.nodes():\n v = _rG.node[u]['old_label']\n mNO[u] = v\n mON[v] = u\n\n sim = similarity(_rG,gD)\n\n _hG,_sG,_nG,hD,sD,nD = prep(hG[:hCount],sG[:sCount],nG[:nCount],hD,sD,nD,sim,mNO)\n\n mgD = mapToGRN(rG,gD,_hG,_sG,_nG,hD,sD,nD)\n print \"Number of nodes in BIO-DRN graph:\",len(mgD)\n print \"Number of edges in BIO-DRN graph:\",len(mgD.edges())\n\n m = mapping(hD,sD,nD,_hG,_sG,_nG)\n print m\n print (\"THIS IS:\",str([mgD.out_degree(mON[each]) for each in _hG]))\n\n if not correctness(gG,mgD,m):\n print \"ALARM!\"\n else:\n print \"WELL MAPPED!\"\n\n neighbor(gD,mgD,m,hD,cnt)\n writers(gD,coor,cnt)\n #fname_o = str(ND) + 'o.gml'\n #nx.write_gml(gD,fname_o)\n\n #fname_m = str(ND) + 'm.gml'\n #nx.write_gml(mgD,fname_m)\n\n #input('check')\n #findSinks(mgD,gD,sink,cnt,gsink,dsink)\n\n #E = set(gD.edges()) # optimization\n #G_st = nx.DiGraph()\n #G_st.add_edges_from([e for e in gD.edges() if e in E])\n G_st = nx.minimum_spanning_tree(gD.to_undirected())\n\n s_st = ''\n for u in G_st.nodes():\n s_st += str(cnt * 600) + ' ' + str(u)\n\n for v in G_st.neighbors(u):\n if v < u:\n s_st += ' ' + str(v)\n s_st += '\\n'\n\n f_st = str(len(gD)) + '_st.txt'\n with open(f_st, \"a\") as myfile:\n myfile.write(s_st)\n\n\n G_os = gD.copy()\n diff = len(gD.edges()) - len(mgD.edges())\n for i in range(diff):\n e = random.choice(G_os.edges())\n G_os.remove_edge(e[0],e[1])\n\n\n s_os = ''\n for u in G_os.nodes():\n s_os += str(cnt * 600) + ' ' + str(u)\n for v in G_os.successors(u):\n s_os += ' ' + str(v)\n\n s_os += '\\n'\n\n f_os = str(len(gD)) + '_os.txt'\n with open(f_os, \"a\") as myfile:\n myfile.write(s_os)\n\n'''\n","sub_path":"LCN/referenceGRN4.py","file_name":"referenceGRN4.py","file_ext":"py","file_size_in_byte":17597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"421612761","text":"import matplotlib.pyplot as mt\r\nimport numpy as np\r\n#2 collmn plot\r\nx=np.array([0,1,2,3])\r\ny=np.array([3,8,1,10])\r\n#plot1\r\nmt.subplot(1,2,1)#the figure has 1 row, 2 columns, and this plot is the first plot.\r\nmt.title(\"Sales\")\r\nmt.plot(x,y)\r\n\r\n#plot2\r\nx1=np.array([7,8,9,10])\r\ny1=np.array([10,20,30,40])\r\nmt.subplot(1,2,2)\r\nmt.title(\"Income\")\r\nmt.plot(x1,y1)\r\nmt.suptitle(\"My Shope\")\r\nmt.show()\r\n\r\n#2 raw plot\r\nx2=np.array([0,1,2,3])\r\ny2=np.array([3,7,4,8])\r\n#plot1\r\nmt.subplot(2,1,1)\r\nmt.plot(x2,y2)\r\n#plot2\r\nmt.subplot(2,1,2)\r\nmt.plot(x2,y2)\r\n\r\nmt.show()\r\n","sub_path":"python_special/subplotMat.py","file_name":"subplotMat.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"349886292","text":"#import tinder_api_sms\nimport random\n\nmessages = [\"If you where a transformer you would be optimus fine\",\n \"Guess what you and my dog have in common?\",\n \"you remind me of a hot tub on a winter day.\",\n \"Hey good lookin\",\n \"How much does a polar bear weigh?\"]\n#things = [\" sc\",\" SC\",\" Sc\",\" insta\",\" Insta\",\" Snap\",\" snap\",\" snapchat\",\" Snapchat\", \"@\"]\n#things = [\"boat\",\"Boat\"]\n\nmatches = tinder_api_sms.all_matches()['data']['matches']\njew = 0\ndaybe = 0\nsam = 0\nMrs_cat=0\nblake = 0\n\nfor match in matches:\n match_id = match['_id']\n match_name = match['person']['name']\n try:\n match_bio = match['person']['bio']\n except:\n match_bio = \"No Bio\"\n\n message = random.choice(messages)\n if message == messages[0]:\n jew += 1\n if message == messages[1]:\n daybe += 1\n if message == messages[2]:\n sam += 1\n if message == messages[3]:\n blake += 1\n if message == messages[4]:\n Mrs_cat += 1\n try:\n tinder_api_sms.send_msg(match_id,message)\n except:\n print(\"not sent\")\n\nprint(\"Justin:\"+ str(jew) + \"\\nDaybe:\" + str(daybe) + \"\\nSam:\" + str(sam) + \"\\nBlake:\" + str(blake) + \"\\nMrs.Cat:\" + str(Mrs_cat))\n","sub_path":"tinder_bot/Match_stuff.py","file_name":"Match_stuff.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"67859142","text":"#-*-coding:utf-8 -*-\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport string\nimport datetime\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import plot\nfrom matplotlib.pyplot import show\nmovie=pd.read_csv('ans020173.csv',delimiter=',',usecols=[1,4],header=0,encoding='utf-8',names=['电影','票房'])\nmoviename=movie['电影']\nmoney=movie['票房']\nname_list=[]\nmoney_list=[]\nfor i in moviename:\n name_list.append(i)\nfor i in money:\n money_list.append(i)\nx=range(len(name_list))\ny=money_list\n# plt.plot(x,y,'yo-')\nrect = plt.bar(x,y,align=\"center\")\nplt.xlabel(u'电影名称')\nplt.ylabel(u'票房')\nplt.title(u'实时票房')\nplt.yticks(y,money_list,rotation=45)\nplt.xticks(x, name_list, rotation=45)\nplt.margins(0.08)\nplt.subplots_adjust(bottom=0.15)\nplt.legend('best')\nplt.show()\n","sub_path":"Test/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"229250051","text":"\"\"\"\nClassic cart-pole system implemented by Rich Sutton et al.\nCopied from http://incompleteideas.net/sutton/book/code/pole.c\npermalink: https://perma.cc/C9ZM-652R\n\nPymunk version by Ian Danforth\n\"\"\"\n\nimport math\nimport gym\nimport pygame\nimport pymunk\nimport pymunk.pygame_util\nimport numpy as np\nfrom gym import spaces, logger\nfrom gym.utils import seeding\n\nfrom . import cartpole_utils as utils\n\n\"\"\"\nTODO\n----\n\n - Observation space should include force output of springs and their len\n - Ask Will if a muscle can apply an elongation force\n (even if only mechanically)\n - Action space should be function of added muscles\n - Observation space should include arrays which are a function of added\n muscles\n - Add hill-type muscle variant\n\"\"\"\n\n\ndef addAnchors(space, screen_width, track_pos_y, cart_shape):\n anchor_dim = 10\n anchors = getAnchors(cart_shape, anchor_dim)\n (left_anchor_body,\n left_anchor_shape,\n right_anchor_body,\n right_anchor_shape) = anchors\n cart_width, cart_height = utils.getShapeWidthHeight(cart_shape)\n anchor_pos_y = track_pos_y + (cart_height / 2) - (anchor_dim / 2)\n left_anchor_body.position = (0, anchor_pos_y)\n right_anchor_body.position = (screen_width - anchor_dim, anchor_pos_y)\n\n anchors = (\n left_anchor_body,\n left_anchor_shape,\n right_anchor_body,\n right_anchor_shape\n )\n space.add(*anchors)\n return anchors\n\n\ndef addSpringConstraints(\n space,\n cart_shape,\n left_anchor_shape,\n right_anchor_shape,\n rest_len,\n stiffness,\n damping):\n springs = getSpringConstraints(\n cart_shape,\n left_anchor_shape,\n right_anchor_shape,\n rest_len,\n stiffness,\n damping\n )\n space.add(*springs)\n return springs\n\n\ndef getAnchors(cart_shape, anchor_dim):\n anchor_width = anchor_height = anchor_dim\n cart_width, cart_height = utils.getShapeWidthHeight(cart_shape)\n\n left_anchor_body = pymunk.Body(body_type=pymunk.Body.STATIC)\n\n left_anchor_shape = utils.getPymunkRect(\n left_anchor_body,\n anchor_width,\n anchor_height\n )\n left_anchor_shape.sensor = True\n\n right_anchor_body = pymunk.Body(body_type=pymunk.Body.STATIC)\n right_anchor_shape = utils.getPymunkRect(\n right_anchor_body,\n anchor_width,\n anchor_height\n )\n right_anchor_shape.sensor = True\n\n return (\n left_anchor_body,\n left_anchor_shape,\n right_anchor_body,\n right_anchor_shape\n )\n\n\ndef getSpringConstraints(\n cart_shape,\n left_anchor_shape,\n right_anchor_shape,\n rest_len,\n stiffness,\n damping):\n cart_width, cart_height = utils.getShapeWidthHeight(cart_shape)\n anchor_width, anchor_height = utils.getShapeWidthHeight(left_anchor_shape)\n\n left_spring = pymunk.constraint.DampedSpring(\n cart_shape.body,\n left_anchor_shape.body,\n (0, cart_height / 2),\n (anchor_width / 2, anchor_height / 2), # Center of anchor\n rest_len,\n stiffness,\n damping\n )\n\n right_spring = pymunk.constraint.DampedSpring(\n right_anchor_shape.body,\n cart_shape.body,\n (anchor_width / 2, anchor_height / 2),\n (cart_width, cart_height / 2),\n rest_len,\n stiffness,\n damping\n )\n\n return left_spring, right_spring\n\n\ndef getSpringLength(spring):\n\n # Body positions are in world coords\n # Anchor's are positions in body coords.\n anchor_a_world = spring.a.position + spring.anchor_a\n anchor_b_world = spring.b.position + spring.anchor_b\n\n length = np.linalg.norm(anchor_a_world - anchor_b_world)\n\n return length\n\n\nclass MuscleCartPoleEnv(gym.Env):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 50\n }\n\n def __init__(self):\n # Pygame and display setup\n pygame.init()\n self.screen_width = 600\n self.screen_height = 400\n self.screen = pygame.display.set_mode(\n (self.screen_width, self.screen_height)\n )\n pygame.display.set_caption(\"muscled_cartpole.py\")\n self.clock = pygame.time.Clock()\n self.timesteps_per_second = 50\n self.timestep = 1 / self.timesteps_per_second\n\n self._initPymunk()\n\n # Action is four floats\n # [ left_stiffness, left_rest_len, right_stiffness, right_rest_len ]\n low_stiffness = 1.0\n high_stiffness = 20.0\n low_rest_len = 20.0\n high_rest_len = (self.screen_width / 2) - 30 - 5\n self.action_space = spaces.Box(\n low=np.array([\n low_stiffness,\n low_rest_len,\n low_stiffness,\n low_rest_len]),\n high=np.array([\n high_stiffness,\n high_rest_len,\n high_stiffness,\n high_rest_len])\n )\n\n # Observation Space\n # Angle at which to fail the episode\n self.theta_threshold_radians = 12 * 2 * math.pi / 360\n self.x_threshold = 2.4\n\n # Angle limit set to 2 * theta_threshold_radians so failing observation\n # is still within bounds\n high = np.array([\n self.x_threshold * 2,\n np.finfo(np.float32).max,\n self.theta_threshold_radians * 2,\n np.finfo(np.float32).max])\n\n self.observation_space = spaces.Box(-high, high)\n\n self.steps_beyond_done = None\n\n def _initPymunk(self):\n # Simulation space\n self.space = pymunk.Space()\n self.space.gravity = (0.0, -900.0)\n self.space.iterations = 20 # Double default\n\n # Debug draw setup (called in render())\n self.draw_options = pymunk.pygame_util.DrawOptions(self.screen)\n self.draw_options.flags = 3\n\n # Track\n track_pos_y = 100\n # Track outside of view area\n padding = 400\n self.track_body, self.track_shape = utils.addTrack(\n self.screen_width,\n self.space,\n track_pos_y,\n padding\n )\n\n # Cart\n cart_width = 60\n cart_height = 30\n cart_mass = 1.0\n self.cart_body, self.cart_shape = utils.addCart(\n self.screen_width,\n self.space,\n cart_width,\n cart_height,\n cart_mass,\n track_pos_y\n )\n\n # Pole\n pole_length = 100\n pole_mass = 0.1\n self.pole_body, self.pole_shape = utils.addPole(\n self.screen_width,\n self.space,\n pole_length,\n pole_mass,\n track_pos_y,\n cart_height\n )\n\n # Constraints\n self.constraints = utils.addConstraints(\n self.space,\n self.cart_shape,\n self.track_shape,\n self.pole_shape\n )\n\n # Spring Anchors\n spring_anchors = addAnchors(\n self.space,\n self.screen_width,\n track_pos_y,\n self.cart_shape\n )\n (self.left_anchor_body,\n self.left_anchor_shape,\n self.right_anchor_body,\n self.right_anchor_shape) = spring_anchors\n\n # Spring Constraints\n spring_initial_rest_len = 100\n spring_initial_stiffness = 1\n spring_damping = 1\n self.left_spring, self.right_spring = addSpringConstraints(\n self.space,\n self.cart_shape,\n self.left_anchor_shape,\n self.right_anchor_shape,\n spring_initial_rest_len,\n spring_initial_stiffness,\n spring_damping\n )\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, action):\n \"\"\"\n - Take action\n - Step the physics of the world\n - Check for 'done' conditions\n - Return reward as appropriate\n\n Note: render() must be called at least once before\n this method is called otherwise pymunk breaks.\n # e.g. OverflowError: Python int too large to convert to C long\n \"\"\"\n\n # Update spring states\n left_stiffness, left_rest_len, right_stiffness, right_rest_len = action\n self.left_spring.stiffness = left_stiffness\n self.left_spring.rest_length = left_rest_len\n self.right_spring.stiffness = right_stiffness\n self.right_spring.rest_length = right_rest_len\n\n # The most recent forces springs applied\n # Convert to a force, divide by the timestep passed to space.step()\n # TODO: Understand why these are uniformly negative\n left_spring_force = self.left_spring.impulse / self.timestep\n right_spring_force = self.right_spring.impulse / self.timestep\n\n left_spring_len = getSpringLength(self.left_spring)\n right_spring_len = getSpringLength(self.right_spring)\n\n theta = self.pole_body.angle % (math.pi * 2)\n x = self.cart_body.position[0]\n done = x < 0.0 \\\n or x > self.screen_width \\\n or theta < -self.theta_threshold_radians \\\n or theta > self.theta_threshold_radians\n done = bool(done)\n\n if not done:\n reward = 1.0\n elif self.steps_beyond_done is None:\n # Pole just fell!\n self.steps_beyond_done = 0\n reward = 1.0\n else:\n if self.steps_beyond_done == 0:\n logger.warn(\"\"\"\nYou are calling 'step()' even though this environment has already returned\ndone = True. You should always call 'reset()' once you receive 'done = True'\nAny further steps are undefined behavior.\n \"\"\")\n self.steps_beyond_done += 1\n reward = 0.0\n\n self.space.step(self.timestep)\n\n obs = [\n x,\n theta,\n left_spring_force,\n left_spring_len,\n right_spring_force,\n right_spring_len\n ]\n\n return obs, reward, done, {}\n\n def render(self, mode='human'):\n utils.handlePygameEvents()\n\n # Redraw all objects\n self.screen.fill((255, 255, 255))\n self.space.debug_draw(self.draw_options)\n pygame.display.flip()\n self.clock.tick(self.timesteps_per_second)\n\n def reset(self):\n if self.space:\n del self.space\n self._initPymunk()\n","sub_path":"envs/muscle_cartpole.py","file_name":"muscle_cartpole.py","file_ext":"py","file_size_in_byte":10420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"654487951","text":"from modeller import *\nfrom modeller.scripts import complete_pdb\n\nlog.verbose()\nenv = environ()\nenv.libs.topology.read(file='$(LIB)/top_heav.lib')\nenv.libs.parameters.read(file='$(LIB)/par.lib')\nenv.io.atom_files_directory = './:../atom_files/'\n\naln = alignment(env)\nfor (code) in (('TvLDH.BL00080001'), ('TvLDH.B99990001')):\n mdl = complete_pdb(env, code)\n aln.append_model(mdl, atom_files=code, align_codes=code)\naln.write(file='bmodel-loop.ali', alignment_format='PIR')\n","sub_path":"resource/align_seq.py","file_name":"align_seq.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"325862154","text":"#Cameron Kramr\n#10/09/2020\n#EENG 350\n#Section A \n#Computer Vision\n#NOTE, this module requires pygame to be installed in order to run\n#This file contains the subroutines that are used in the aruco marker detection scheme.\n\nimport multiprocessing as mp\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport time\nimport cv2\nimport numpy as np\nimport pygame\nimport math\n\n#Calculates the FPS\ndef calc_fps(start_time, end_time):\n return 1/(end_time-start_time)\n\n#Display the aruco markers onto the pigame display\ndef pygame_aruco_display_manager(input_pipe, debug = False):\n #Initialize pygame objects\n pygame.init()\n gameDisplay = pygame.display.set_mode((800, 600))\n gameDisplay.fill((0,0,0))\n font = pygame.font.SysFont(None, 20)\n\n inputs = []\n\n gain = 1000\n width, height = pygame.display.get_surface().get_size()\n\n #count = 0\n\n #Infinite loop to handle drawing new frames of the locations of markers\n while(True):\n\n #expect data of this form:\n #[(id, rvecs, tvecs), ...])\n\n #Blocking wait for input into the pipe\n inputs = input_pipe.recv()\n \n #record the time\n start_time = time.time()\n \n #Clear the display\n gameDisplay.fill((0,0,0))\n \n #Loop over all the inputs of the form described above\n for i in inputs:\n #print(\"Pygame processing: \" + str(i[0]))\n #create text image for showing the marker ID\n img = font.render(\"ID: \" + str(i[0]), True, (255, 0, 0))\n\n #Find the position the marker should appear on the display\n px = int(i[1][0] * gain + width/2)\n py = int(height - i[1][2] * gain)\n #Draw the circle and blit the text onto the display\n pygame.draw.circle(gameDisplay, (255, 255, 255), (px, py), 10)\n gameDisplay.blit(img, (px, py - 10))\n\n #Record the end time \n end_time = time.time()\n \n #Calculate the FPS the code runs at if in debugging mode\n if(debug):\n print(\"Pygame FPS: \" + str(int(1/(end_time- start_time))))\n \n #Update the display with the new images and clear the input\n pygame.display.update()\n inputs = []\n\n#Estimates the pose/position of a marker from the pipes\ndef cv2_estimate_pose(input_pipe, side_length, cam_mtx, dis_coefs, debug = False, offset_mat = np.zeros((3))):\n output = []\n input = []\n #input_pipe.set_blocking(False)\n #count = 0\n #print(\"DETECTING\")\n #Infinite loop runs subroutine until terminated by parent thread\n while(True):\n #Expect data of shape:\n #([id, corners], ...)\n #blocking wait on data to appear at input pipe\n #print(\"CV_Thread getting pipe\")\n inputs = input_pipe.recv()\n #print(\"CV_Thread got pipe\")\n\n #Record the start time\n start_time = time.time()\n\n #Loop over the inputs received from the pipe\n for i in inputs:\n #Find the translation and rotation vectors. Object points less important\n rvecs, tvecs, _objPoints = cv2.aruco.estimatePoseSingleMarkers(i[1], side_length, cam_mtx, dis_coefs)\n #Offset can be used to set the center position of the marker within itself, if it is in a cube for example\n\n dst, _ = cv2.Rodrigues(rvecs)\n\n tvecs = tvecs + dst@offset_mat\n output.append([i[0][0], tvecs.reshape(3), rvecs.reshape(3)])\n\n #Send output back to main thread for further processing\n #print(\"sending out pipe\")\n input_pipe.send(output)\n #print(\"sent out pipe\")\n\n #Clear output\n output = []\n\n #Record end time and print out if in debugging mode\n end_time = time.time()\n if(debug):\n print(\"Cv2 Pose FPS: \" + str(int(1/(end_time - start_time))))\n\n\n#Detects the aruco markers from image pipes\ndef cv2_detect_aruco_routine(input_pipe, aruco_dict, parameters, debug = False):\n\n output = []\n\n while(True):\n #Grab a frame\n frame_grab_start = time.time()\n frame_grey = input_pipe.recv()\n frame_grab_end = time.time()\n \n #Convert to grey scale\n\n #debug info\n if(debug):\n print(\"CV2 grab Frame FPS: \" + str(int(calc_fps(frame_grab_start, frame_grab_end))))\n #Find the aruco markers\n frame_det_start = time.time()\n corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(frame_grey, aruco_dict, parameters = parameters)\n frame_det_end = time.time()\n\n if(debug):\n print(\"CV2 Detect FPS: \" + str(int(calc_fps(frame_det_start, frame_det_end))))\n\n\n #if any are present, send the data out\n if(len(corners) != 0):\n #construct & send output\n output = [(item, corners[iter]) for iter, item in enumerate(ids)]\n #print(\"Found: \" + str(output))\n input_pipe.send(output)\n\n#Grabblers the images and stuffs them into appropriate pipes\ndef picam_image_grabbler(inputpipe, image_pipes, resolution, frame_rate, debug = False, format = \"bgr\"):\n #Pi camera setup\n camera = PiCamera()\n camera.resolution = resolution\n camera.framerate = frame_rate\n camera.ISO = 1600\n camera.sensor_mode = 7\n\n rawCapture = PiRGBArray(camera, size = resolution)\n #If there is only one pipe, make it into an array to not break future code\n if(not isinstance(image_pipes, (list,tuple))):\n image_pipes = [image_pipes]\n\n out_pipe_count = len(image_pipes)\n output_counter = 0\n\n #Capture the frames continuously from the camera\n for frame in camera.capture_continuous(rawCapture, format = format, use_video_port = True):\n #print(\"Grabbled Frame!\")\n #send image down appropriate pipes\n start_time = time.time()\n #print(\"PI Cam sending image into pipe:\")\n frame_grey = cv2.cvtColor(frame.array, cv2.COLOR_BGR2GRAY)\n\n image_pipes[output_counter].send(frame_grey)\n output_counter += 1\n #print(\"Pi cam done sending image\")\n if(debug):\n #key = cv2.waitKey(1)\n #print(\"Grabbled at: \" + str(int(calc_fps(start_time, time.time()))))\n #cv2.imshow(\"Image\", frame.array)\n pass\n #Clear the pipe counter if necessary\n if(output_counter >= out_pipe_count):\n output_counter = 0\n\n #Clear this mmal buffer so it won't overflow\n rawCapture.truncate(0)\n\n end = time.time()\n if(debug):\n print(\"Pi_CAM_FPS: \" + str(int(calc_fps(start_time, end))))\n","sub_path":"Demo_2/Raspberry_Pi/Threading/Aruco_Multi_Threading.py","file_name":"Aruco_Multi_Threading.py","file_ext":"py","file_size_in_byte":6576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"44814369","text":"\"\"\"This is the main module for the online database search of LIPIDMAPS, LipidHome and HMDB.\nEach database is searched separately and with different parameter settings, taken from the\nwebsearch_parameters.csv file. \nThe input file is a .csv format and is generated from SIEVE or XCMS and processed through \nAmalgamator (if combining positive and negative mode data). \nIf a database search does not complete successfully, data up to that point is saved and \nthe program moves to the next database search. At the end of the run, there is a listing on\nscreen to indicate which database searches (if any) did not complete successfully.\n\nAttributes:\n endTime (float): Time the program ended running\n failTaghm (int): If equal to 1, the HMDB database search did not complete successfully\n failTaglh (int): If equal to 1, the LipidHome database search did not complete successfully\n failTaglm (int): If equal to 1, the LIPIDMAPS database search did not complete successfully\n inputFile (dataframe): The data from the input file (entered by the user)\n startTime (float): Time the program started running\n\"\"\"\nfrom __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\nimport time\nimport pandas as pd\nimport io\n\nimport getFile as gif\nimport lipidMapsWebSearch as lms\nimport lipidhomeWebSearch as lhs\nimport hmdbWebSearch as hs\n\n\n# Disable chained assignment warning\npd.options.mode.chained_assignment = None\n\n# gets input file as a DF\ninputFile = gif.readFile()\nlipidmapsTol, lipidhomeTol, hmdbTol, searchCriteria = gif.readParameters()\n\nstartTime = time.time()\n# At the end each of these flags will tell us if a database search completed successfully (=1)\n# Initialised to zero here\nfailTaglm = 0\nfailTaglh = 0\nfailTaghm = 0\n\n# new option ot search by 'computational', 'curated' or both\n# set this option in the websearch_parameters.csv\n\nif searchCriteria == 'ALL':\n LMstatus = 'all'\n # comment out whichever website you DON'T want to search\n failTaglm = lms.lipmapSearch(inputFile, lipidmapsTol, LMstatus)\n failTaglh = lhs.liphomeSearch(inputFile, lipidhomeTol)\n failTaghm = hs.hmdbSearch(inputFile, hmdbTol)\n\nif searchCriteria == 'COM':\n LMstatus = 'computational'\n # comment out whichever website you DON'T want to search\n failTaglm = lms.lipmapSearch(inputFile, lipidmapsTol, LMstatus)\n failTaglh = lhs.liphomeSearch(inputFile, lipidhomeTol)\n failTaghm = hs.hmdbSearch(inputFile, hmdbTol)\n\nif searchCriteria == 'CUR':\n LMstatus = 'curated'\n failTaglm = lms.lipmapSearch(inputFile, lipidmapsTol, LMstatus)\n\nendTime = time.time()\n\nprint(\"\\n\\nSUMMARY:\")\n\nprint(\"Processing complete in\", int((endTime - startTime)), \"seconds\\n\")\n\nif failTaglm != 0:\n print('LIPIDMAPS search did not complete successfully\\n')\nif failTaglh != 0:\n print('LipidHome search did not complete successfully\\n')\nif failTaghm != 0:\n print('HMDB search did not complete successfully\\n')\n","sub_path":"Web search/webSearch.py","file_name":"webSearch.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"470825824","text":"#!/usr/bin/env python\n#\n# Electrum - lightweight Bitcoin client\n# Copyright (C) 2012 thomasv@gitorious\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport sys, time, datetime, re, threading\nfrom electrum.i18n import _, set_language\nfrom electrum.util import print_error, print_msg, parse_url\nfrom electrum.plugins import run_hook\nimport os.path, json, ast, traceback\nimport shutil\n\n\ntry:\n import PyQt4\nexcept Exception:\n sys.exit(\"Error: Could not import PyQt4 on Linux systems, you may try 'sudo apt-get install python-qt4'\")\n\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nimport PyQt4.QtCore as QtCore\n\nfrom electrum import WalletStorage, Wallet\nfrom electrum.i18n import _\nfrom electrum.bitcoin import MIN_RELAY_TX_FEE\n\ntry:\n import icons_rc\nexcept Exception:\n sys.exit(\"Error: Could not import icons_rc.py, please generate it with: 'pyrcc4 icons.qrc -o gui/qt/icons_rc.py'\")\n\nfrom util import *\nfrom main_window import ElectrumWindow\nfrom electrum.plugins import init_plugins\n\nclass Timer(QtCore.QThread):\n def run(self):\n while True:\n self.emit(QtCore.SIGNAL('timersignal'))\n time.sleep(0.5)\n\nclass OpenFileEventFilter(QObject):\n def __init__(self, windows):\n self.windows = windows\n super(OpenFileEventFilter, self).__init__()\n\n def eventFilter(self, obj, event):\n if event.type() == QtCore.QEvent.FileOpen:\n if len(self.windows) >= 1:\n self.windows[0].set_url(event.url().toString())\n return True\n return False\n\n\nclass ElectrumGui:\n\n def __init__(self, config, network, app=None):\n self.network = network\n self.config = config\n self.windows = []\n self.efilter = OpenFileEventFilter(self.windows)\n if app is None:\n self.app = QApplication(sys.argv)\n self.app.installEventFilter(self.efilter)\n\n init_plugins(self)\n\n\n def main(self, url):\n\n storage = WalletStorage(self.config)\n if not storage.file_exists:\n import installwizard\n wizard = installwizard.InstallWizard(self.config, self.network, storage)\n wallet = wizard.run()\n if not wallet: \n exit()\n else:\n wallet = Wallet(storage)\n wallet.start_threads(self.network)\n \n self.main_window = w = ElectrumWindow(self.config, self.network)\n\n # plugins that need to change the GUI do it here\n run_hook('init')\n\n w.load_wallet(wallet)\n\n s = Timer()\n s.start()\n\n self.windows.append(w)\n if url: w.set_url(url)\n w.app = self.app\n w.connect_slots(s)\n w.update_wallet()\n\n self.app.exec_()\n\n wallet.stop_threads()\n\n\n","sub_path":"gui/qt/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"410268108","text":"import tensorflow as tf\nimport numpy as np\n\n\ndef _blur2d(x, f=[1, 2, 1], normalize=True, flip=False, stride=1):\n\n # Finalize filter kernel.\n f = np.array(f, dtype=np.float32)\n if f.ndim == 1:\n f = f[:, np.newaxis] * f[np.newaxis, :]\n assert f.ndim == 2\n if normalize:\n f /= np.sum(f)\n if flip:\n f = f[::-1, ::-1]\n f = f[:, :, np.newaxis, np.newaxis]\n f = np.tile(f, [1, 1, int(x.shape[1]), 1])\n\n # No-op => early exit.\n if f.shape == (1, 1) and f[0, 0] == 1:\n return x\n\n # Convolve using depthwise_conv2d.\n orig_dtype = x.dtype\n x = tf.cast(x, tf.float32) # tf.nn.depthwise_conv2d() doesn't support fp16\n f = tf.constant(f, dtype=x.dtype, name='filter')\n strides = [1, 1, stride, stride]\n x = tf.nn.depthwise_conv2d(x,\n f,\n strides=strides,\n padding='SAME',\n data_format='NCHW')\n x = tf.cast(x, orig_dtype)\n return x\n\n\ndef _upscale2d(x, factor=2, gain=1):\n assert x.shape.ndims == 4 and all(dim.value is not None\n for dim in x.shape[1:])\n assert isinstance(factor, int) and factor >= 1\n\n # Apply gain.\n if gain != 1:\n x *= gain\n\n # No-op => early exit.\n if factor == 1:\n return x\n\n # Upscale using tf.tile().\n s = x.shape\n x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])\n x = tf.tile(x, [1, 1, 1, factor, 1, factor])\n x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])\n return x\n\n\ndef _downscale2d(x, factor=2, gain=1):\n\n # 2x2, float32 => downscale using _blur2d().\n if factor == 2 and x.dtype == tf.float32:\n f = [np.sqrt(gain) / factor] * factor\n return _blur2d(x, f=f, normalize=False, stride=factor)\n\n # Apply gain.\n if gain != 1:\n x *= gain\n\n # No-op => early exit.\n if factor == 1:\n return x\n\n # Large factor => downscale using tf.nn.avg_pool().\n # NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work.\n ksize = [1, 1, factor, factor]\n return tf.nn.avg_pool(x,\n ksize=ksize,\n strides=ksize,\n padding='VALID',\n data_format='NCHW')\n\n\ndef blur2d(x, f=[1, 2, 1], normalize=True):\n with tf.variable_scope('Blur2D'):\n\n @tf.custom_gradient\n def func(x):\n y = _blur2d(x, f, normalize)\n\n @tf.custom_gradient\n def grad(dy):\n dx = _blur2d(dy, f, normalize, flip=True)\n return dx, lambda ddx: _blur2d(ddx, f, normalize)\n\n return y, grad\n\n return func(x)\n\n\ndef upscale2d(x, factor=2):\n with tf.variable_scope('Upscale2D'):\n\n @tf.custom_gradient\n def func(x):\n y = _upscale2d(x, factor)\n\n @tf.custom_gradient\n def grad(dy):\n dx = _downscale2d(dy, factor, gain=factor**2)\n return dx, lambda ddx: _upscale2d(ddx, factor)\n\n return y, grad\n\n return func(x)\n\n\ndef downscale2d(x, factor=2):\n with tf.variable_scope('Downscale2D'):\n\n @tf.custom_gradient\n def func(x):\n y = _downscale2d(x, factor)\n\n @tf.custom_gradient\n def grad(dy):\n dx = _upscale2d(dy, factor, gain=1 / factor**2)\n return dx, lambda ddx: _downscale2d(ddx, factor)\n\n return y, grad\n\n return func(x)\n\n\ndef nf(stage, fmap_base=8192, fmap_decay=1.0, fmap_max=512):\n return min(int(fmap_base / (2.0**(stage * fmap_decay))), fmap_max)\n\n\ndef instance_norm(x, epsilon=1e-8):\n assert len(x.shape) == 4 # NCHW\n x -= tf.math.reduce_mean(x, axis=[2, 3], keepdims=True)\n epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')\n x *= tf.math.rsqrt(\n tf.reduce_mean(tf.square(x), axis=[2, 3], keepdims=True) + epsilon)\n return x\n\n\ndef apply_noise(x, weight, noise_var=None, randomize_noise=True):\n assert len(x.shape) == 4 # NCHW\n if noise_var is None or randomize_noise:\n noise = tf.random.normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]],\n dtype=tf.float32)\n else:\n noise = tf.cast(noise_var, x.dtype)\n\n return x + noise * tf.reshape(tf.cast(weight, x.dtype), [1, -1, 1, 1])\n\n\ndef pixel_norm(x, epsilon=1e-8):\n epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')\n return x * tf.math.rsqrt(\n tf.reduce_mean(tf.square(x), axis=-1, keepdims=True) + epsilon)\n\n\ndef style_mod(x, dlatent, bias, weights, **kwargs):\n style = tf.matmul(dlatent, weights) + bias\n style = tf.reshape(style, [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2))\n return x * (style[:, 0] + 1) + style[:, 1]\n","sub_path":"customlayers/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"71972974","text":"import spy #spy.py acts hear as a module\n\nprint(\"Enter two numbers:\")\na=int(input())\nb=int(input())\n\nprint(\"Enter the choice: 1.add 2.sub 3.mul 4.div\")\nch=int(input())\n\nif ch==1:\n\tspy.add(a,b) #can access add function in spy.py i.e.,module\nif ch==2: #spy module contains add sub mul div functions\n\tspy.sub(a,b) #thus now spy module is user defined that contains built-in functions\nif ch==3:\n\tspy.mul(a,b)\nif ch==4:\n\tspy.div(a,b)\n\n'''OUTPUT:\nstud@HP-246-Notebook-PC:~$ python abi_modules.py\nEnter two numbers:\n5\n4\nEnter the choice: 1.add 2.sub 3.mul 4.div\n3\nResult is 20\nstud@HP-246-Notebook-PC:~$ python abi_modules.py\nEnter two numbers:\n3\n2\nEnter the choice: 1.add 2.sub 3.mul 4.div\n4\nResult is 1.5\n'''","sub_path":"abi_modules.py","file_name":"abi_modules.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"584897427","text":"import logging\n\nfrom sklearn.manifold import TSNE\n\nimport matplotlib.pyplot as plt\n\nfrom crits.vectorizer import TFIDF\nfrom crits.dataset import CriticalTextDataset\n\n\ndef visualize_data():\n criticaltext_dataset = CriticalTextDataset()\n tweets, labels = criticaltext_dataset.load_dataset()\n\n logging.info(\"Initializing vectorizer\")\n vectorizer = TFIDF()\n vectorizer.load_idf_values()\n tsne_object = TSNE()\n\n training_vectors = vectorizer.convert_corpus_to_vectors(\n tweets)\n\n reduced_vectors = tsne_object.fit_transform(training_vectors)\n\n critical_tweet_vectors = []\n non_critical_tweet_vectors = []\n\n for index, vector in enumerate(reduced_vectors):\n if labels[index]:\n critical_tweet_vectors.append(vector)\n else:\n non_critical_tweet_vectors.append(vector)\n\n plt.scatter(*list(zip(*critical_tweet_vectors)), c=\"r\")\n plt.scatter(*list(zip(*non_critical_tweet_vectors)), c=\"b\")\n plt.savefig(\"data_visualization.png\")\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)\n logger = logging.getLogger(__name__)\n visualize_data()\n","sub_path":"visualize_training_data.py","file_name":"visualize_training_data.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"589235333","text":"# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #\n# Date: 14.10.2016 #\n# Author: Ole-Johan Skrede #\n# #\n# Solution proposal as part of the exercise program in #\n# INF4300 - Digital image analysis at the University of Oslo #\n# #\n# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #\n\"\"\"\nSolution proposal for exercise 8, task 1.\n\"\"\"\n\n# pylint: disable=expression-not-assigned\n# pylint: disable=bad-indentation\n\nimport os\nimport sys\n\nimport numpy as np\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport seaborn as sns # pylint: disable=import-error,unused-import\n\n# Import module from week_07\npath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../week_07'))\nif not path in sys.path:\n sys.path.insert(1, path)\ndel path\nimport multivariate_gauss_classifier#pylint:disable=import-error,wrong-import-position\n\ndef plot_image(image, fig_num, colormap='gray'):\n \"\"\"Plot image\"\"\"\n #plt.figure(fig_num)\n _, ax = plt.subplots()\n im = ax.imshow(image, cmap=colormap, interpolation='none')\n #ax.set_xticks(np.arange(0, image.shape[1]))\n ax.set_xticks(np.arange(0.5, image.shape[1] + 0.5), minor=True)\n ax.get_xaxis().set_ticks([]) # Hide tick labels\n ax.set_xlabel(r'$x_1$', fontsize=16) # Latex rendering\n #ax.set_yticks(np.arange(0, image.shape[0]))\n ax.set_yticks(np.arange(0.5, image.shape[0] + 0.5), minor=True)\n ax.get_yaxis().set_ticks([]) # Hide tick labels\n ax.set_ylabel(r'$x_2$', fontsize=16)\n ax.grid(b=False, which='major')\n ax.grid(b=True, which='minor', linestyle='-', color='gray')\n plt.xticks(np.arange(0, 21), np.arange(-10, 11))\n plt.yticks(np.arange(0, 21), np.arange(10, -11, -1))\n plt.colorbar(im)\n\n plt.tight_layout()\n\n fig_num += 1\n return fig_num\n\ndef plot_contours(fig_num, matrix, xmin, xmax, ymin, ymax, levels, vmin, vmax, emph_zero=False):\n \"\"\"Contour plot\"\"\"\n plt.figure(fig_num)\n matplotlib.rcParams['contour.negative_linestyle'] = 'solid'\n im = plt.imshow(matrix,\n interpolation='bilinear', origin='lower',\n cmap='magma', extent=[xmin, xmax, ymin, ymax],\n vmin=vmin, vmax=vmax)\n CS = plt.contour(matrix, levels, origin='lower', linewidths=2,\n colors='white', extent=[xmin, xmax, ymin, ymax])\n\n if emph_zero:\n # Thicken and dash zero contour (which is the decision boundary)\n zc = CS.collections[10] # You must know the index (in levels) of the zero contour\n plt.setp(zc, linewidth=4, linestyle='--')\n\n plt.clabel(CS, levels[0::2], inline=1, fmt='%1.0f', fontsize=14)\n plt.colorbar(im)\n plt.xlabel(r'$x_1$', fontsize=16)\n plt.ylabel(r'$x_2$', fontsize=16)\n plt.grid('off')\n plt.tight_layout()\n\n fig_num += 1\n return fig_num\n\ndef plot_class_image(image, fig_num, classes=None, name=None, colormap='gray',\n write_file=None, pixelated=False):\n \"\"\"Plot class image\"\"\"\n if classes is None:\n classes = np.unique(image)\n\n if not isinstance(classes, list):\n classes = list(classes)\n\n cmap = plt.cm.get_cmap(colormap)\n\n patches = []\n for c in classes:\n color = cmap(c/np.max(classes))\n classname = 'C '+str(c)\n patches.append(mpatches.Patch(color=color, label=classname))\n\n if pixelated: # Display pixel boundaries\n _, ax = plt.subplots()\n ax.imshow(image, cmap=colormap, interpolation='none', vmin=0,\n vmax=classes[-1])\n #ax.set_xticks(np.arange(0, image.shape[1]))\n ax.set_xticks(np.arange(0.5, image.shape[1] + 0.5), minor=True)\n ax.get_xaxis().set_ticks([]) # Hide tick labels\n ax.set_xlabel(r'$x_1$', fontsize=16)\n #ax.set_yticks(np.arange(0, image.shape[0]))\n ax.set_yticks(np.arange(0.5, image.shape[0] + 0.5), minor=True)\n ax.get_yaxis().set_ticks([]) # Hide tick labels\n ax.set_ylabel(r'$x_2$', fontsize=16)\n ax.grid(b=False, which='major')\n ax.grid(b=True, which='minor', linestyle='-', color='gray')\n plt.xticks(np.arange(0, 21), np.arange(-10, 11))\n plt.yticks(np.arange(0, 21), np.arange(10, -11, -1))\n plt.legend(bbox_to_anchor=(0, 1.02, 1., .102), loc=3, ncol=len(classes),\n mode=\"expand\", borderaxespad=0, handles=patches)\n\n else:\n plt.figure(fig_num)\n plt.imshow(image, cmap=colormap, interpolation='none', vmin=0, vmax=classes[-1])\n if name is not None:\n plt.title(name)\n plt.xticks([]), plt.yticks([])\n plt.legend(bbox_to_anchor=(0, 1.02, 1., .102), loc=3, ncol=len(classes),\n mode=\"expand\", borderaxespad=0, handles=patches)\n\n if write_file:\n cv2.imwrite(write_file, image)\n fig_num += 1\n return fig_num\n\ndef main():\n \"\"\"main\"\"\"\n\n print('='*80)\n print('Solution to weekly exercises in INF4300')\n print('Exercise 8')\n print('Task 1')\n print('-'*80)\n\n subtasks = ['a', 'c', 'd', 'e']\n fig_num = 0\n\n classes = np.array([1, 2])\n prior = np.array([0.5, 0.5])\n class_means = np.array([[3, 6], [3, -2]])\n class_covs = np.array([[[0.5, 0], [0, 2]], [[2, 0], [0, 2]]])\n class_inv_covs = np.array([[[2, 0], [0, 0.5]], [[0.5, 0], [0, 0.5]]])\n\n\n # Task 1 a)\n if 'a' in subtasks:\n # Eigenvalues and eigenvectors of the covariance matrix\n for c in range(2):\n eig_vals, eig_vecs = np.linalg.eig(class_covs[c, :, :])\n print(\"Eigenvalues for class {}: {}\".format(classes[c], eig_vals))\n print(\"Eigenvectors for class {}:\".format(classes[c]))\n print(eig_vecs)\n\n delta = 0.1\n xmin = -8\n xmax = 10\n ymin = -8\n ymax = 10\n vmin = -130\n vmax = 40\n x_vals = np.arange(xmin, xmax, delta)\n y_vals = np.arange(ymin, ymax, delta)\n xx, _ = np.meshgrid(x_vals, y_vals)\n discr = np.zeros((2, xx.shape[0], xx.shape[1]))\n for i, x in enumerate(x_vals):\n for j, y in enumerate(y_vals):\n for c in range(2):\n mean_diff = np.array([x, y]) - class_means[c, :]\n # Note j, i because of pyplot imshow convention\n discr[c, j, i] = (np.log(prior[c]) -\n 1/2*np.log(np.linalg.det(class_covs[c, :, :])) -\n 1/2*np.dot(mean_diff.T, np.dot(class_inv_covs[c, :, :], mean_diff)))\n\n\n # Separate\n levels = [np.arange(-100, 0, 10), np.arange(-60, 0, 10)]\n for c in range(2):\n fig_num = plot_contours(fig_num, discr[c, :, :], xmin, xmax, ymin, ymax,\n levels[c], vmin, vmax)\n plt.plot(class_means[c, 0], class_means[c, 1], 'ko')\n plt.xlim([xmin, xmax])\n plt.ylim([ymin, ymax])\n\n # Both (all points < 0 belongs to class 2)\n fig_num = plot_contours(fig_num, discr[0, :, :] - discr[1, :, :], xmin, xmax,\n ymin, ymax, np.arange(-100, 40, 10), vmin, vmax, emph_zero=True)\n plt.plot(class_means[0, 0], class_means[0, 1], 'ko')\n plt.plot(class_means[1, 0], class_means[1, 1], 'ko')\n plt.xlim([xmin, xmax])\n plt.ylim([ymin, ymax])\n\n\n # Task 1 c)\n if 'c' and 'a' in subtasks:\n # Plot desicion boundary on top of existing contour-plot. This line should\n # lie on top of the zero-contour.\n x1 = np.linspace(-3.60233769, 9.60233769, 1000)\n x2 = 59/16 - np.log(2)/4 - 9/8*x1 + 3/16*x1*x1\n plt.plot(x1, x2, c=\"k\")\n plt.xlim([xmin, xmax])\n plt.ylim([ymin, ymax])\n\n # Task 1 d)\n if 'd' in subtasks:\n feature_images = np.zeros((2, 21, 21))\n for i in range(21):\n for j in range(21):\n x = -10 + i\n y = -10 + j\n\n feature_images[0, i, j] = y\n feature_images[1, i, j] = x\n\n\n fig_num = plot_image(np.flipud(feature_images[0, :, :]), fig_num,\n colormap='inferno')\n fig_num = plot_image(np.flipud(feature_images[1, :, :]), fig_num,\n colormap='inferno')\n\n # Task 1 e)\n if 'd' and 'e' in subtasks:\n # Classify an image based on the two feature images from 'd', both with the\n # analytically computed desicion boundary, the multivariate gaussian\n # classifier from last week, and the k-nearest-neighbour classifier which\n # we implemented in Task 2 this week.\n\n classes = np.array([1, 2])\n\n # Analytical\n classified_anal = np.zeros((21, 21))\n for i in range(21):\n for j in range(21):\n x1 = feature_images[0, i, j]\n x2 = feature_images[1, i, j]\n if x2 > 59/16 - np.log(2)/4 - 9/8*x1 + 3/16*x1*x1:\n classified_anal[i, j] = classes[0]\n else:\n classified_anal[i, j] = classes[1]\n\n fig_num = plot_class_image(np.flipud(classified_anal), fig_num, classes=classes,\n colormap='rainbow', pixelated=True)\n\n # Multivariate gauss\n mvgc = multivariate_gauss_classifier.Classification()\n mvgc.classify([feature_images[0, :, :], feature_images[1, :, :]],\n class_means, class_covs, classes)\n classified_gauss = mvgc.prop_classified_image\n fig_num = plot_class_image(np.flipud(classified_gauss), fig_num, classes=classes,\n colormap='rainbow', pixelated=True)\n\n\n\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"solutions/week_08/inf4300_h16_ex08_t01.py","file_name":"inf4300_h16_ex08_t01.py","file_ext":"py","file_size_in_byte":9301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"87453940","text":"## -*- coding: utf-8 -*-\n\n__author__ = \"sylvain@infrae.com\"\n__format__ = \"plaintext\"\n__version__ = \"$Id: test_impl.py 31670 2008-10-31 10:21:14Z sylvain $\"\n\nimport doctest\nimport os\nimport os.path\nimport sys\nimport unittest\nfrom doctest import DocFileSuite\n\nimport py\nimport svnhelper.testing\nimport svnhelper.tests\nimport zc.buildout.testing\nfrom svnhelper.core import helper\n\nimport infrae.subversion\n\n\ndef setUp(test):\n test_package = os.path.dirname(svnhelper.tests.__file__)\n test_package = os.path.join(test_package, 'tests', 'my.testing')\n tested_package = os.path.dirname(infrae.subversion.__file__)\n\n zc.buildout.testing.buildoutSetUp(test)\n zc.buildout.testing.install('py', test)\n zc.buildout.testing.install_develop('infrae.subversion', test)\n svnhelper.testing.setUpRepository(test)\n test.globs['init_test_package'](test_package)\n helper.import_to(test_package,\n test.globs['repository'])\n helper.import_to(tested_package,\n test.globs['repository'] + '/infrae.subversion/trunk/infrae.subversion')\n\ndef tearDown(test):\n svnhelper.testing.tearDownRepository(test)\n zc.buildout.testing.buildoutTearDown(test)\n\nflags = (doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE |\n doctest.REPORT_ONLY_FIRST_FAILURE | doctest.REPORT_NDIFF)\n\ndef have_pysvn():\n impl = os.getenv('INFRAE_SUBVERSION_IMPL', 'PYSVN')\n if impl == 'PYSVN':\n try:\n import pysvn\n return True\n except:\n pass\n return False\n\ndef test_file(name):\n return os.path.join(os.path.dirname(__file__), name)\n\ndef test_suite():\n tests = [DocFileSuite(test_file('IMPL.txt'),\n optionflags=flags,\n globs=globals(),\n setUp=setUp,\n tearDown=tearDown,\n module_relative=False)]\n if have_pysvn():\n tests += [DocFileSuite(test_file('EXPORT.txt'),\n optionflags=flags,\n globs=globals(),\n setUp=setUp,\n tearDown=tearDown,\n module_relative=False)]\n return unittest.TestSuite(tests)\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n","sub_path":"eggs/infrae.subversion-1.4.5-py2.6.egg/infrae/subversion/tests/test_impl.py","file_name":"test_impl.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"624692877","text":"'''\nCreated on Aug 21, 2012\n\n@author: tle\n'''\nimport importlib\n\nimport Controllers\nimport Views\n\nclass ModelManager(object):\n '''\n '''\n MODEL_ID = 0\n\n def __init__(self):\n self.Reset()\n\n def Reset(self):\n self.controllers = {}\n self.views = {}\n self.models = {}\n\n def Add(self, model, name):\n name = name\n \n self.models[name] = model['json']\n\n #\n # Import packages for the game object's controller and view\n #\n\n importlib.import_module('MVC.Controllers.' + model['name'] + 'Controller')\n\n mStr = \"Controllers.\" + \\\n model[\"name\"] + \"Controller.\" + \\\n model[\"name\"] + \"Controller(model['json'])\"\n self.controllers[name] = eval(mStr)\n\n importlib.import_module('MVC.Views.' + model['name'] + 'View')\n\n mStr = \"Views.\" + \\\n model[\"name\"] + \"View.\" + \\\n model[\"name\"] + \"View(model['json'])\"\n self.views[name] = eval(mStr)\n\n ModelManager.MODEL_ID += 1\n\n def Get(self, name):\n for k in self.models.keys():\n if name == k:\n return self.models[k]\n\n def Size(self):\n return len(self.models.keys())\n\ng_modelManager = ModelManager()","sub_path":"GameSource/MVC/BModelManager.py","file_name":"BModelManager.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"572113516","text":"#\n# Copyright 2012, Piston Cloud Computing, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport hashlib\nimport time\nimport uuid\n\nfrom burrow.common import exc\nfrom burrow.queue import base as queue\nfrom burrow.queue.sqlite import db\n\n\nqueue_table_create = \"\"\"\nCREATE TABLE queues (uuid CHAR(36),\n owner CHAR(36),\n tenant CHAR(36),\n name VARCHAR(80),\n UNIQUE (tenant, name));\n\nCREATE TABLE roles (uuid CHAR(36),\n queue CHAR(36),\n owner CHAR(36),\n tenant CHAR(36),\n user CHAR(36),\n action CHAR(60),\n name VARCHAR(80),\n UNIQUE (tenant, name));\n\"\"\"\n\nmessages_table_create = \"\"\"\nCREATE TABLE messages (uuid CHAR(36),\n queue CHAR(36),\n tenant CHAR(36),\n checksum CHAR(32),\n timestamp TIMESTAMP,\n first_accessed TIMESTAMP,\n payload TEXT);\n\"\"\"\n\n\ndef get_queue_uuid(obj, context, queue_name):\n query = \"\"\"SELECT uuid FROM queues\n WHERE name = :name\n AND tenant = :tenant\"\"\"\n args = {'name': queue_name,\n 'tenant': context.tenant,}\n\n _q = obj.execute((query, args)).fetchone()\n if not _q:\n raise exc.DatabaseError(\"No queue %s for tenant %s\" % (args['name'],\n args['tenant']))\n return _q['uuid']\n\n\n@db.lazy_init(queue_table_create)\nclass Backend(queue.Backend, db.BaseModel):\n def __init__(self):\n queue.Backend.__init__(self)\n db.BaseModel.__init__(self)\n\n def create(self, context, queue_name):\n q_uuid = str(uuid.uuid4())\n try:\n self.execute((\"\"\"INSERT\n INTO queues (uuid, owner, tenant, name)\n VALUES (?, ?, ?, ?)\"\"\", (q_uuid,\n context.user,\n context.tenant,\n queue_name)))\n except exc.DatabaseError:\n # If the queue exists, return it anyway.\n pass\n\n return self.get_url(context, queue_name)\n\n def delete(self, context, queue_name):\n self.execute((\"\"\"DELETE FROM queues\n WHERE uuid = ?\n AND tenant = ?\"\"\",\n (get_queue_uuid(self, context, queue_name),\n context.tenant)))\n\n def list(self, context, prefix=None):\n query = \"SELECT tenant, name FROM queues WHERE tenant = ?\"\n bindings = (context.tenant,)\n if prefix:\n query += \" AND name LIKE '?%'\"\n bindings = (context.tenant, prefix)\n\n queues = self.execute((query, bindings)).fetchall()\n return [queue.url(q) for q in queues]\n\n def get_url(self, context, queue_name):\n _queue = self.execute((\"\"\"SELECT *\n FROM queues\n WHERE tenant=?\n AND name=?\"\"\", (context.tenant, queue_name)\n )).fetchone()\n\n return queue.url(_queue)\n\n def create_role(self, context, queue_name, role_name, account, action):\n params = {'uuid': uuid.uuid4(),\n 'queue': get_queue_uuid(self, context, queue_name),\n 'name': role_name,\n 'owner': context.user,\n 'tenant': context.tenant,\n 'user': account,\n 'action': action}\n\n try:\n self.execute(('''INSERT INTO roles (uuid, queue, owner, tenant,\n user, action, name)\n VALUES (:uuid, :queue, :owner, :tenant,\n :user, :action, :name)''', params))\n return True\n except exc.DatabaseError:\n return False\n\n def delete_role(self, context, queue_name, role_name):\n try:\n self.execute((\"\"\"DELETE FROM roles\n WHERE queue = :queue AND name = :name\"\"\",\n {'queue': get_queue_uuid(self, context, queue_name),\n 'name': role_name}))\n return True\n except exc.DatabaseError:\n return False\n\n\n@db.lazy_init(messages_table_create)\nclass MessageBroker(queue.MessageBroker, db.BaseModel):\n def __init__(self):\n queue.MessageBroker.__init__(self)\n db.BaseModel.__init__(self)\n\n def send(self, context, queue_name, payload):\n args = {'uuid': str(uuid.uuid4()),\n 'queue': get_queue_uuid(self, context, queue_name),\n 'tenant': context.tenant,\n 'timestamp': time.time(),\n 'checksum': hashlib.md5(payload).hexdigest(),\n 'payload': payload,}\n\n query = \"\"\"INSERT INTO messages\n (uuid, queue, tenant, timestamp, checksum, payload)\n VALUES (:uuid, :queue, :tenant, :timestamp, \n :checksum, :payload)\"\"\"\n\n self.execute((query, args))\n\n return {'message_id': args['uuid'],\n 'checksum': args['checksum'],}\n\n def receive(self, context, queue_name, limit=1):\n cursor = self.execute((\"\"\"SELECT uuid AS message_id, \n payload,\n checksum, \n timestamp\n FROM messages\n WHERE tenant = ?\n AND queue = ?;\n \"\"\", (context.tenant,\n get_queue_uuid(self, context, queue_name)\n )))\n\n result = []\n for msg in cursor.fetchall():\n msg.update({'receipt': queue.receipt(msg['message_id'],\n msg['checksum'])})\n result.append({'message': msg})\n return result\n\n def delete(self, context, queue_name, uuid, checksum, received_at):\n self.execute((\"\"\"DELETE FROM messages\n WHERE tenant= ?\n AND queue= ?\n AND uuid = ?\n AND checksum = ?\"\"\",\n (context.tenant,\n get_queue_uuid(self, context, queue_name),\n uuid,\n checksum)))\n","sub_path":"burrow/queue/sqlite/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"491001200","text":"import tensorflow as tf\nimport config as cfg\n\ndef fc(input_, shape_, act=None):\n w = tf.Variable(tf.truncated_normal(shape_, stddev=0.1))\n b = tf.Variable(tf.constant(0.0, shape=[shape_[1]]))\n if act is not None:\n return act(tf.matmul(input_, w) + b)\n else:\n return tf.matmul(input_, w) + b\n\ndef conv(input_, shape_, act=None):\n w = tf.Variable(tf.truncated_normal(shape_, stddev=0.1))\n b = tf.Variable(tf.constant(0.1, shape=[shape_[3]]))\n if act is not None:\n return act(tf.nn.conv2d(input_, w, strides=[1, 1, 1, 1], padding='SAME') + b)\n else:\n return tf.nn.conv2d(input_, w, strides=[1, 1, 1, 1], padding='SAME') + b\n\ndef pool(input_):\n return tf.nn.max_pool(input_, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\nclass FeatureExtractor():\n def __init__(self):\n self.image = tf.placeholder(tf.float32, [None, 784])\n\n fc1 = fc(self.image, [784, 256], tf.nn.relu)\n #fc2 = fc(fc1, [512, 256], tf.nn.relu)\n fc3 = fc(fc1, [256, 128], tf.nn.relu)\n\n self.flat = fc3\n\n fc11 = fc(self.flat, [128, 256], tf.nn.relu)\n #fc22 = fc(fc11, [256, 512], tf.nn.relu)\n fc33 = fc(fc11, [256, 784], tf.nn.sigmoid)\n \n self.reconstruct = tf.reshape(fc33, [-1, 28, 28])\n\n self.loss = tf.reduce_mean(tf.square(self.image - fc33))\n trainer = tf.train.AdamOptimizer(cfg.learning_rate)\n self.train_step = trainer.minimize(self.loss)\n\n self.saver = tf.train.Saver()\n \n def init(self, sess):\n sess.run(tf.global_variables_initializer())\n print('Variables initialized')\n \n def load(self, sess):\n ckpt = tf.train.get_checkpoint_state('.')\n self.saver.restore(sess, ckpt.model_checkpoint_path)\n print('Model loaded from', ckpt.model_checkpoint_path, '.')\n \n def save(self, sess):\n self.saver.save(sess, './model.ckpt')\n print('Model saved.')\n \n def train(self, sess, images):\n loss, _ = sess.run([self.loss, self.train_step], feed_dict={self.image:images})\n return loss\n \n def test(self, sess, images):\n images_ = sess.run(self.reconstruct, feed_dict={self.image:images})\n return images_\n\n def get_feature(self, sess, images):\n feature = sess.run(self.flat, feed_dict={self.image:images})\n return feature\n ","sub_path":"hw6/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"560160786","text":"## Nathan Harris\n## 4-23-15\n## Assignment 4.1\n\n## import math for \"fabs\" instead of abs() for float values\nfrom math import fabs\n\n## global dictionary references\n## symbol : name\nstockNames = {'AAPL' : \"Apple\", 'TEX' : \"Terex\", 'GOOGL' : \"Google\", 'MSFT' : \"Microsoft\" }\n## symbol : [buy price , current price]\nstockPrices = { 'AAPL' : [65.04, 130.08], 'TEX' : [24, 26.93], 'GOOGL' : [550, 576.85], 'MSFT' : [21.67, 43.34] }\n## symbol : [risk value , share count]\nstockExposure = { 'AAPL' : [.05, 2], 'TEX' : [.35, 3], 'GOOGL' : [.22, 3], 'MSFT' : [.15, 10] }\n\n\n## asks the user for input, stores it, and returns the stockSymbol for use\n## in AddPrice() and AddExposure()\ndef GetNewStockInput(): \n ## pretty menu\n print(\"\\n-- 1. ADDING STOCK --\\n\")\n\n ## continue looping until a valid symbol is provided and returned\n while True:\n ## get input\n stockSymbol = str(input(\"What is the symbol of this stock? (ie. AAPL for Apple)\\n\"))\n\n if stockSymbol in stockNames:\n print(\"\\nError! The stock you entered is already on file!\\n\\nTry again.\\n\")\n\n else:\n stockName = str(input(\"What is the full name of this stock?\\n\"))\n\n ## add key : value pair\n stockNames[stockSymbol] = stockName\n\n ## return the stockSymbol to use in the other functions\n return stockSymbol\n\n## gets the buy price & current price of stock and stores the values.\ndef AddPrice(stockSymbol):\n ## get input\n buyPrice = float(input(\"What is the buy price of this stock?\\n\"))\n currentPrice = float(input(\"What is the current price of this stock?\\n\"))\n\n ## add dictionary key : value pair\n stockPrices[stockSymbol] = [buyPrice, currentPrice]\n\n## gets the risk value and total shares, then stores the values\ndef AddExposure(stockSymbol):\n ## get input\n riskValue = float(input(\"What is the risk of the stock? (ie. .5 for 50%)\\n\"))\n totalShares = int(input(\"How many shares are there?\\n\"))\n\n ## add dictionary key : value pair\n stockExposure[stockSymbol] = [riskValue, totalShares]\n\n #pretty formatting\n print(\"\")\n\n## menu function to recommend a specific stock to purchase.\ndef GetRecommendedSale():\n print(\"\\n-- 2. RECOMMENDING STOCK --\\n\")\n\n ## create dictionary\n expectedSaleValues = {}\n\n ## iterate through stockNames\n for symbol, name in stockNames.items():\n ## get references to the prices & exposure matching the current iteration symbol\n currentSymbolPrices = stockPrices[symbol]\n currentSymbolExposure = stockExposure[symbol]\n\n ## get the expected sale value ( ( Current Price - Buy Price ) - Risk * Current Price ) * Shares\n expectedSaleValue = (( currentSymbolPrices[1] - currentSymbolPrices[0] ) - currentSymbolExposure[0] * currentSymbolPrices[1]) * currentSymbolExposure[1]\n\n ## add the symbol : expected value pair to the dictionary\n expectedSaleValues[symbol] = expectedSaleValue\n\n ## variables to display later\n sellNow = ['', 0]\n sellNext = ['', 0]\n sellSoon = ['', 0]\n\n ## iterate through the expected values\n for symbol, value in expectedSaleValues.items():\n ## if the absolute (float) of the current item's value is greater than the absolute value of the top stored\n ## or the top stored hasn't be assigned yet...\n if fabs(value) > fabs(sellNow[1]) or sellNow == ['', 0]:\n ## move the list down\n sellSoon = sellNext\n sellNext = sellNow\n ## assign it\n sellNow = [symbol, value]\n \n elif fabs(value) > fabs(sellNext[1]) or sellNext == ['', 0]:\n sellSoon = sellNext\n sellNext = [symbol, value]\n \n elif fabs(value) > fabs(sellSoon[1]) or sellSoon == ['', 0]:\n sellSoon = [symbol, value]\n\n print(\"\\nRECOMMENDATIONS:\\n\")\n\n if sellSoon[1] < 0 and sellNext[1] < 0 and sellNow[1] < 0:\n print(\"Cut your losses! You're bleeding money!\\n\")\n elif sellNow[1] < 0:\n print(\"Your biggest issue is you're losing money in at least one stock.\\n\")\n\n print(\"Sell NOW: \" + sellNow[0] + \" \" + stockNames[sellNow[0]] + \" \" + \"${:,.2f}\".format(sellNow[1]))\n print(\"Sell NEXT: \" + sellNext[0] + \" \" + stockNames[sellNext[0]] + \" \" + \"${:,.2f}\".format(sellNext[1]))\n print(\"Sell SOON: \" + sellSoon[0] + \" \" + stockNames[sellSoon[0]] + \" \" + \"${:,.2f}\".format(sellSoon[1]) + \"\\n\")\n \n\n## menu function to start populating the lists.\ndef AddNewStock():\n symbol = GetNewStockInput()\n AddPrice(symbol)\n AddExposure(symbol)\n\n Main()\n\n\n## Main function\ndef Main():\n ## display fancy stuff\n print(\"---- MAIN MENU ----\\n\")\n print(\"Options:\")\n print(\"1. Add Stock\")\n print(\"2. Recommend Sale\")\n print(\"3. Exit\\n\")\n\n while True:\n userSelection = int(input(\"Please enter the number of your choice.\\n\"))\n\n if userSelection == 1:\n AddNewStock()\n \n elif userSelection == 2:\n GetRecommendedSale()\n break\n \n elif userSelection == 3:\n exit()\n \n Main()\n\n\n## starts program\nMain()\n","sub_path":"Week4_Assignment_4.1.py","file_name":"Week4_Assignment_4.1.py","file_ext":"py","file_size_in_byte":5153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"507487278","text":"##############################################################################################\n## Company: FSAE Lafayette College \n## Engineers:Irwin Frimpong,Harrison Walker,Lia Chrysanthopoulos, Mithil Shah, Adam Tunnell \n## Last Updated : 05/10/2021 02:32:17 PM \n## Project Name: SCADA FSAE 2021 \n## Module Name: i2c_driver.py \n## Description: I2C Driver moduule with read and write methods \n#############################################################################################\nimport sys, os\nimport smbus\nimport redis\nimport time\nfrom datetime import datetime\n\n#CONFIG PATH\nlib_path = '/usr/etc/scada'\nconfig_path = '/usr/etc/scada/config'\n\nsys.path.append(lib_path)\nsys.path.append(config_path)\n\nimport config\n\n##########Declariing i2C Bus##############\nbus = smbus.SMBus(3) \n##########################################\n\n# read method for I2C Configured sensor \n# @param Sensor - Sensor of Interest\n# @return data - Value stored in the secondary register addresses defined in config\n\ndef read(Sensor):\n try:\n # Retrieving sensor address from Configuration Yaml File\n sensor_address = config.get('Sensors').get(str(Sensor)).get('primary_address') \n #Use RTC read method if primary address is 0x68 -> RTC\n if( sensor_address == 0x68):\n return read_rtc(Sensor)\n else:\n data = 0\n reg_address = config.get('Sensors').get(str(Sensor)).get('secondary_address')\n bit_length = config.get('Sensors').get(str(Sensor)).get('bit_length')\n if(bit_length == 8): #Read Byte\n if (type(reg_address) == list): \n #adds the values for each byte of the sensor together to get the overall result of the sensor\n for i in range(len(reg_address)):\n data = data|bus.read_byte_data(sensor_address,reg_address[i]) << (8 * i)\n else: \n data = bus.read_byte_data(sensor_address,reg_address) \n else: # Read Word\n data = bus.read_word_data(sensor_address,reg_address)\n \n return data\n except IOError:\n time.sleep(.0001)\n\n# write method for I2C driver writes specifed value to the sensor \n# @param Sensor - Sensor of Interest\n# @param Value - Value to written to the sensor\n\ndef write(Sensor, Value):\n try:\n #Use RTC write method if primary address is 0x68 -> RTC\n sensor_address = config.get('Sensors').get(str(Sensor)).get('primary_address')\n if(sensor_address == 0x68):\n return write_rtc(Sensor,Value)\n else:\n #Obtaining reg_adress list from Config YAML file\n reg_address = config.get('Sensors').get(str(Sensor)).get('secondary_address')\n numofBits = countTotalBits(Value)\n\n if(numofBits <= 8): #Use write_byte_data to write 8 bits\n bus.write_byte_data(sensor_address,reg_address,Value)\n else: #Use write_word_data to write value in 16 bits\n bus.write_word_data(sensor_address,reg_address,Value)\n\n except IOError:\n time.sleep(.0001)\n\n# read_rtc method for PCF-8523 RTC which reads the (month,day,year,hour,minutes,seconds) registers of the \n# rtc and returns unix time \n# @param Sensor - Sensor input of interest\n# @return Unix Time of RTC\n\ndef read_rtc(Sensor):\n data = \"\"\n seconds_data = \"\"\n mins_data = \"\"\n hours_data= \"\"\n\n try:\n sensor_address = config.get('Sensors').get(str(Sensor)).get('primary_address') \n reg_address = config.get('Sensors').get(str(Sensor)).get('secondary_address')\n FMT = '%Y-%m-%d %H:%M:%S'\n\n for i in range(len(config.get('Sensors').get(str(Sensor)).get('secondary_address'))):\n busval = bus.read_byte_data(sensor_address,reg_address[i])\n if (i == 0):\n seconds_data = str(hex(((busval & 0xF0)>> 4))) + str(hex((busval & 0xF))) \n elif (i == 1):\n mins_data = str(hex(((busval & 0xF0)>> 4))) + str(hex((busval & 0xF)))\n elif (i == 2):\n hours_data = str(hex(((busval & 0xF0)>> 4))) + str(hex((busval & 0xF)))\n elif (i == 3):\n days_data = str(hex(((busval & 0xF0)>> 4))) + str(hex((busval & 0xF))) \n elif (i == 4):\n months_data = str(hex(((busval & 0xF0)>> 4))) + str(hex((busval & 0xF)))\n elif (i == 5):\n years_data = str(hex(((busval & 0xF0)>> 4))) + str(hex((busval & 0xF)))\n\n time_str = (\"20\"+ years_data + \"-\" + months_data + \"-\" + days_data + \" \" + hours_data + \":\" + mins_data + \":\" + seconds_data).replace(\"0x\",\"\")\n return datetime.strptime(time_str, FMT).timestamp()\n #return (hours_data + \":\" + mins_data + \":\" + seconds_data).replace(\"0x\",\"\")\n\n except IOError:\n time.sleep(.0001)\n\n\n# write_rtc method for PCF-8523 RTC which writes in year,month,day,hour,minutes,seconds into their respective registers \n# @param Sensor - Sensor input of interest\n# @param Value - 'YR:MO:DD:HR:MI:SS' How we want value to be inputted\ndef write_rtc(Sensor,Value):\n val=Value.split(\":\")\n \n #Obtaining Primary and Secondary Addresses from Config YAML\n sensor_address = config.get('Sensors').get(str(Sensor)).get('primary_address') \n reg_address = config.get('Sensors').get(str(Sensor)).get('secondary_address')\n try:\n bus.write_byte_data(sensor_address,reg_address[0],int(val[0],16)) #Year Resgiter Address\n bus.write_byte_data(sensor_address,reg_address[1],int(val[1],16)) #Month Register Address\n bus.write_byte_data(sensor_address,reg_address[2],int(val[2],16)) #Day Register Address\n bus.write_byte_data(sensor_address,reg_address[3],int(val[3],16)) #Hours Register Address\n bus.write_byte_data(sensor_address,reg_address[4],int(val[4],16)) #Minutes Register Address \n bus.write_byte_data(sensor_address,reg_address[5],int(val[5],16)) #Second Register Address\n \n except IOError:\n time.sleep(.0001)\n\n#countTotalBits method finds the number of bits used to represent a number. This function to be used in the write i2c write method\n# @param num - Number of interest\n# @return Number of bits needed to represent the num input in binary\ndef countTotalBits(num):\n #convert number into it's binary and remove first two characters 0b.\n binary = bin(num)[2:]\n return len(binary)\n\n\n \n \n\n\n \n\n","sub_path":"drivers/i2c_driver.py","file_name":"i2c_driver.py","file_ext":"py","file_size_in_byte":6651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"78155574","text":"import torch.nn as nn\n\nfrom models.feedforwardnn import FeedForwardNN\nfrom models.multihead_attention import MultiHeadAttention\n\n\nclass Decoder(nn.Module):\n \"\"\"\n Decoder object/sublayer.\n\n Attributes (in alphabetical order)\n ----------------------------------\n dropout: Dropout from https://jmlr.org/papers/v15/srivastava14a.html.\n ffnn: Two-layer feedforward neural network with ReLU activation in between layers.\n layernorm: Layer normalization from https://arxiv.org/abs/1607.06450.\n masked_multihead_attention: Multihead Attention with mask after QK operation.\n multihead_attention: Multihead Attention to perform self-attention.\n \"\"\"\n\n def __init__(self, args):\n \"\"\"\n Basic initialization of Decoder.\n\n Arguments\n ---------\n args: Arguments used for overall process.\n \"\"\"\n super().__init__()\n\n self.ffnn = FeedForwardNN(args)\n self.layernorm = nn.LayerNorm(normalized_shape=args.d_model)\n self.multihead_attention = MultiHeadAttention(args=args)\n self.masked_multihead_attention = MultiHeadAttention(args=args, mask=True)\n self.dropout = nn.Dropout(p=0.1)\n\n def forward(self, x, enc_x):\n \"\"\"\n Forward pass for decoding.\n\n Arguments\n ---------\n x: Target data.\n enc_x: Output from the Encoder stack.\n\n Returns\n -------\n Output after one layer of decoding.\n \"\"\"\n attn_output = self.masked_multihead_attention(x, x, x)\n output1 = self.layernorm(x + attn_output)\n\n attn_output2 = self.multihead_attention(x, enc_x, enc_x)\n output2 = self.layernorm(attn_output2 + output1)\n\n output3 = self.layernorm(output2 + self.dropout(self.ffnn(output2)))\n\n return output3\n","sub_path":"src/models/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"514312576","text":"# Define a procedure, measure_udacity,\n# that takes as its input a list of strings,\n# and returns a number that is a count\n# of the number of elements in the input\n# list that start with the uppercase \n# letter 'U'.\n\ndef measure_udacity(x):\n result = 0\n for N in x:\n if N[0] == \"U\":\n result = result+1\n else:\n result\n return result\n\nprint (measure_udacity(['Dave','Sebastian','Katy']))\n#>>> 0\n\nprint (measure_udacity(['Umika','Umberto']))\n#>>> 2","sub_path":"Unit_3/Unit3_Quiz3.py","file_name":"Unit3_Quiz3.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"39599409","text":"import os\nimport cv2\nimport numpy as np\nimport torch\nfrom torchvision import transforms\nfrom tqdm import tqdm\nfrom models.unet import UNet\n\nimg_path = r\"I:\\dataset\\GID\\Large-scale Classification_5classes\\temp\"\n\n\ndef predict():\n img_path = r\"I:\\dataset\\GID\\Large-scale Classification_5classes\\temp\"\n\n im_w, im_h = 256, 256\n crop_w, crop_h = 2, 2\n stride_w, stride_h = im_w // crop_w, im_h // crop_h\n check_point = r\".\\saved\\unet.pth\"\n\n classes = [0, 1, 2, 3, 4]\n\n transform = transforms.Compose(\n [\n transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]\n )\n\n model = UNet(len(classes))\n model.load_state_dict(torch.load(check_point))\n model.cuda()\n model.eval()\n\n for im in os.listdir(img_path):\n image_path = os.path.join(img_path, im)\n\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n h, w, _ = image.shape\n padding_h = (h//im_h + 1) * im_h\n padding_w = (w//im_w + 1) * im_w\n padding_image = np.zeros((padding_h, padding_w, 3), dtype=np.uint8) + 4\n padding_image[0:h, 0:w, :] = image[:, :, :]\n mask_image = np.zeros((padding_image.shape[0], padding_image.shape[1]), dtype=np.uint8) + 4\n padding_image = np.asarray(padding_image, dtype=np.uint8)\n\n with torch.no_grad():\n for i in tqdm(range(padding_h//im_h)):\n for j in range(padding_w//im_w):\n crop_image = padding_image[i*im_h:(i+1)*im_h,j*im_w:(j+1)*im_w, :]\n crop_image = transform(crop_image).cuda()\n crop_image = crop_image.reshape(1, 3, im_h, im_w)\n\n output = model(crop_image)\n\n _, pred = output.max(1)\n pred = pred.view(256, 256)\n\n start_h, start_w = i*im_h, j*im_w\n for si in range(stride_h):\n for sj in range(stride_w):\n #temp = pred.cpu().numpy()[si*crop_h : (si+1)*crop_h, sj*crop_w : (sj+1)*crop_w]\n mask_image[start_h+si*crop_h : start_h+(si+1)*crop_h,\n start_w+sj*crop_w : start_w+(sj+1)*crop_w] = \\\n pred.cpu().numpy()[si*crop_h : (si+1)*crop_h, sj*crop_w : (sj+1)*crop_w]\n save_label = os.path.join(r\".\\data\\GID_15classes\\predict_label\", im)\n # cv2.imshow('show', mask_image[0:h, 0:w])\n # cv2.waitKey(0)\n cv2.imwrite(save_label, mask_image[0:h, 0:w])\n save_visual = os.path.join(r\".\\data\\GID_15classes\\predict_visual\", im.split('.')[0] + 'visual.tif')\n translabeltovisual(save_label, save_visual)\n\n\ndef translabeltovisual(save_label, path):\n num_classes5 = [[255, 0, 0], [0, 255, 0], [0, 255, 255], [255, 255, 0], [0, 0, 255]]\n num_classes = [\n [200, 0, 0],\n [250, 0, 150],\n [200, 150, 150],\n [250, 150, 150],\n\n [0, 200, 0],\n [150, 250, 0],\n [150, 200, 150],\n\n [200, 0, 200],\n [150, 0, 250],\n [150, 150, 250],\n\n [250, 200, 0],\n [200, 200, 0],\n\n [0, 0, 200],\n [0, 150, 200],\n [0, 200, 250]\n ]\n im = cv2.imread(save_label)\n for i in tqdm(range(im.shape[0])):\n for j in range(im.shape[1]):\n rgb = [im[i][j][0], im[i][j][1], im[i][j][2]]\n if rgb == [0, 0, 0]:\n im[i][j] = num_classes5[0]\n elif rgb == [1, 1, 1]:\n im[i][j] = num_classes5[1]\n elif rgb == [2, 2, 2]:\n im[i][j] = num_classes5[2]\n elif rgb == [3, 3, 3]:\n im[i][j] = num_classes5[3]\n elif rgb == [4, 4, 4]:\n im[i][j] = num_classes5[4]\n\n cv2.imwrite(path, im)\nif __name__ == '__main__':\n # im = \"GF2_PMS1__L1A0000564539-MSS1.tif\"\n # save_label = os.path.join(r\"I:\\learn\\remote_sensing_semantic_segmentation\\data\\GID_5classes\\predict_label\", im)\n # save_visual = os.path.join(r\"I:\\learn\\remote_sensing_semantic_segmentation\\data\\GID_5classes\\predict_label\",\n # im.split('.')[0] + 'visual.tif')\n # translabeltovisual(save_label, save_visual)\n predict()\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15786495","text":"# coding: utf-8\n\n\"\"\"\n Investor API\n\n All decimal values are accepted and returned with 2 decimal place precision, e.g., 150.21. All date fields are sent in ISO 8601 format YYYY-MM-DD, e.g., 2016-11-30. # noqa: E501\n\n OpenAPI spec version: 1.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass StrategyModel(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'active': 'bool',\n 'already_invested': 'bool',\n 'balance': 'float',\n 'countries': 'list[int]',\n 'extension_opt_in': 'bool',\n 'id': 'int',\n 'loan_ratings': 'list[str]',\n 'loan_statuses': 'list[str]',\n 'loan_types': 'list[str]',\n 'max_interest_rate': 'float',\n 'max_ltv': 'float',\n 'max_note_amount': 'float',\n 'max_remaining_loan_term_months': 'int',\n 'min_interest_rate': 'float',\n 'min_remaining_loan_term_months': 'int',\n 'name': 'str',\n 'originators': 'list[int]',\n 'total_size': 'float'\n }\n\n attribute_map = {\n 'active': 'active',\n 'already_invested': 'already_invested',\n 'balance': 'balance',\n 'countries': 'countries',\n 'extension_opt_in': 'extension_opt_in',\n 'id': 'id',\n 'loan_ratings': 'loan_ratings',\n 'loan_statuses': 'loan_statuses',\n 'loan_types': 'loan_types',\n 'max_interest_rate': 'max_interest_rate',\n 'max_ltv': 'max_ltv',\n 'max_note_amount': 'max_note_amount',\n 'max_remaining_loan_term_months': 'max_remaining_loan_term_months',\n 'min_interest_rate': 'min_interest_rate',\n 'min_remaining_loan_term_months': 'min_remaining_loan_term_months',\n 'name': 'name',\n 'originators': 'originators',\n 'total_size': 'total_size'\n }\n\n def __init__(self, active=None, already_invested=None, balance=None, countries=None, extension_opt_in=None, id=None, loan_ratings=None, loan_statuses=None, loan_types=None, max_interest_rate=None, max_ltv=None, max_note_amount=None, max_remaining_loan_term_months=None, min_interest_rate=None, min_remaining_loan_term_months=None, name=None, originators=None, total_size=None): # noqa: E501\n \"\"\"StrategyModel - a model defined in Swagger\"\"\" # noqa: E501\n\n self._active = None\n self._already_invested = None\n self._balance = None\n self._countries = None\n self._extension_opt_in = None\n self._id = None\n self._loan_ratings = None\n self._loan_statuses = None\n self._loan_types = None\n self._max_interest_rate = None\n self._max_ltv = None\n self._max_note_amount = None\n self._max_remaining_loan_term_months = None\n self._min_interest_rate = None\n self._min_remaining_loan_term_months = None\n self._name = None\n self._originators = None\n self._total_size = None\n self.discriminator = None\n\n if active is not None:\n self.active = active\n if already_invested is not None:\n self.already_invested = already_invested\n if balance is not None:\n self.balance = balance\n if countries is not None:\n self.countries = countries\n if extension_opt_in is not None:\n self.extension_opt_in = extension_opt_in\n if id is not None:\n self.id = id\n if loan_ratings is not None:\n self.loan_ratings = loan_ratings\n if loan_statuses is not None:\n self.loan_statuses = loan_statuses\n if loan_types is not None:\n self.loan_types = loan_types\n if max_interest_rate is not None:\n self.max_interest_rate = max_interest_rate\n if max_ltv is not None:\n self.max_ltv = max_ltv\n if max_note_amount is not None:\n self.max_note_amount = max_note_amount\n if max_remaining_loan_term_months is not None:\n self.max_remaining_loan_term_months = max_remaining_loan_term_months\n if min_interest_rate is not None:\n self.min_interest_rate = min_interest_rate\n if min_remaining_loan_term_months is not None:\n self.min_remaining_loan_term_months = min_remaining_loan_term_months\n if name is not None:\n self.name = name\n if originators is not None:\n self.originators = originators\n if total_size is not None:\n self.total_size = total_size\n\n @property\n def active(self):\n \"\"\"Gets the active of this StrategyModel. # noqa: E501\n\n\n :return: The active of this StrategyModel. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._active\n\n @active.setter\n def active(self, active):\n \"\"\"Sets the active of this StrategyModel.\n\n\n :param active: The active of this StrategyModel. # noqa: E501\n :type: bool\n \"\"\"\n\n self._active = active\n\n @property\n def already_invested(self):\n \"\"\"Gets the already_invested of this StrategyModel. # noqa: E501\n\n\n :return: The already_invested of this StrategyModel. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._already_invested\n\n @already_invested.setter\n def already_invested(self, already_invested):\n \"\"\"Sets the already_invested of this StrategyModel.\n\n\n :param already_invested: The already_invested of this StrategyModel. # noqa: E501\n :type: bool\n \"\"\"\n\n self._already_invested = already_invested\n\n @property\n def balance(self):\n \"\"\"Gets the balance of this StrategyModel. # noqa: E501\n\n\n :return: The balance of this StrategyModel. # noqa: E501\n :rtype: float\n \"\"\"\n return self._balance\n\n @balance.setter\n def balance(self, balance):\n \"\"\"Sets the balance of this StrategyModel.\n\n\n :param balance: The balance of this StrategyModel. # noqa: E501\n :type: float\n \"\"\"\n\n self._balance = balance\n\n @property\n def countries(self):\n \"\"\"Gets the countries of this StrategyModel. # noqa: E501\n\n\n :return: The countries of this StrategyModel. # noqa: E501\n :rtype: list[int]\n \"\"\"\n return self._countries\n\n @countries.setter\n def countries(self, countries):\n \"\"\"Sets the countries of this StrategyModel.\n\n\n :param countries: The countries of this StrategyModel. # noqa: E501\n :type: list[int]\n \"\"\"\n\n self._countries = countries\n\n @property\n def extension_opt_in(self):\n \"\"\"Gets the extension_opt_in of this StrategyModel. # noqa: E501\n\n\n :return: The extension_opt_in of this StrategyModel. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._extension_opt_in\n\n @extension_opt_in.setter\n def extension_opt_in(self, extension_opt_in):\n \"\"\"Sets the extension_opt_in of this StrategyModel.\n\n\n :param extension_opt_in: The extension_opt_in of this StrategyModel. # noqa: E501\n :type: bool\n \"\"\"\n\n self._extension_opt_in = extension_opt_in\n\n @property\n def id(self):\n \"\"\"Gets the id of this StrategyModel. # noqa: E501\n\n\n :return: The id of this StrategyModel. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this StrategyModel.\n\n\n :param id: The id of this StrategyModel. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def loan_ratings(self):\n \"\"\"Gets the loan_ratings of this StrategyModel. # noqa: E501\n\n\n :return: The loan_ratings of this StrategyModel. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._loan_ratings\n\n @loan_ratings.setter\n def loan_ratings(self, loan_ratings):\n \"\"\"Sets the loan_ratings of this StrategyModel.\n\n\n :param loan_ratings: The loan_ratings of this StrategyModel. # noqa: E501\n :type: list[str]\n \"\"\"\n allowed_values = [\"NO_GUARANTEE\", \"BUYBACK\", \"PAYMENT_GUARANTEE\", \"BANK_GUARANTEE\"] # noqa: E501\n if not set(loan_ratings).issubset(set(allowed_values)):\n raise ValueError(\n \"Invalid values for `loan_ratings` [{0}], must be a subset of [{1}]\" # noqa: E501\n .format(\", \".join(map(str, set(loan_ratings) - set(allowed_values))), # noqa: E501\n \", \".join(map(str, allowed_values)))\n )\n\n self._loan_ratings = loan_ratings\n\n @property\n def loan_statuses(self):\n \"\"\"Gets the loan_statuses of this StrategyModel. # noqa: E501\n\n\n :return: The loan_statuses of this StrategyModel. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._loan_statuses\n\n @loan_statuses.setter\n def loan_statuses(self, loan_statuses):\n \"\"\"Sets the loan_statuses of this StrategyModel.\n\n\n :param loan_statuses: The loan_statuses of this StrategyModel. # noqa: E501\n :type: list[str]\n \"\"\"\n allowed_values = [\"CURRENT\", \"DELAYED_1_30\", \"DELAYED_31_60\", \"DELAYED_61_PLUS\", \"CLOSED\"] # noqa: E501\n if not set(loan_statuses).issubset(set(allowed_values)):\n raise ValueError(\n \"Invalid values for `loan_statuses` [{0}], must be a subset of [{1}]\" # noqa: E501\n .format(\", \".join(map(str, set(loan_statuses) - set(allowed_values))), # noqa: E501\n \", \".join(map(str, allowed_values)))\n )\n\n self._loan_statuses = loan_statuses\n\n @property\n def loan_types(self):\n \"\"\"Gets the loan_types of this StrategyModel. # noqa: E501\n\n\n :return: The loan_types of this StrategyModel. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._loan_types\n\n @loan_types.setter\n def loan_types(self, loan_types):\n \"\"\"Sets the loan_types of this StrategyModel.\n\n\n :param loan_types: The loan_types of this StrategyModel. # noqa: E501\n :type: list[str]\n \"\"\"\n allowed_values = [\"MORTGAGE\", \"CONSUMER\", \"INVOICE_FINANCING\", \"BUSINESS\", \"LINE_OF_CREDIT\", \"PAWNBROKING\"] # noqa: E501\n if not set(loan_types).issubset(set(allowed_values)):\n raise ValueError(\n \"Invalid values for `loan_types` [{0}], must be a subset of [{1}]\" # noqa: E501\n .format(\", \".join(map(str, set(loan_types) - set(allowed_values))), # noqa: E501\n \", \".join(map(str, allowed_values)))\n )\n\n self._loan_types = loan_types\n\n @property\n def max_interest_rate(self):\n \"\"\"Gets the max_interest_rate of this StrategyModel. # noqa: E501\n\n\n :return: The max_interest_rate of this StrategyModel. # noqa: E501\n :rtype: float\n \"\"\"\n return self._max_interest_rate\n\n @max_interest_rate.setter\n def max_interest_rate(self, max_interest_rate):\n \"\"\"Sets the max_interest_rate of this StrategyModel.\n\n\n :param max_interest_rate: The max_interest_rate of this StrategyModel. # noqa: E501\n :type: float\n \"\"\"\n\n self._max_interest_rate = max_interest_rate\n\n @property\n def max_ltv(self):\n \"\"\"Gets the max_ltv of this StrategyModel. # noqa: E501\n\n\n :return: The max_ltv of this StrategyModel. # noqa: E501\n :rtype: float\n \"\"\"\n return self._max_ltv\n\n @max_ltv.setter\n def max_ltv(self, max_ltv):\n \"\"\"Sets the max_ltv of this StrategyModel.\n\n\n :param max_ltv: The max_ltv of this StrategyModel. # noqa: E501\n :type: float\n \"\"\"\n\n self._max_ltv = max_ltv\n\n @property\n def max_note_amount(self):\n \"\"\"Gets the max_note_amount of this StrategyModel. # noqa: E501\n\n\n :return: The max_note_amount of this StrategyModel. # noqa: E501\n :rtype: float\n \"\"\"\n return self._max_note_amount\n\n @max_note_amount.setter\n def max_note_amount(self, max_note_amount):\n \"\"\"Sets the max_note_amount of this StrategyModel.\n\n\n :param max_note_amount: The max_note_amount of this StrategyModel. # noqa: E501\n :type: float\n \"\"\"\n\n self._max_note_amount = max_note_amount\n\n @property\n def max_remaining_loan_term_months(self):\n \"\"\"Gets the max_remaining_loan_term_months of this StrategyModel. # noqa: E501\n\n\n :return: The max_remaining_loan_term_months of this StrategyModel. # noqa: E501\n :rtype: int\n \"\"\"\n return self._max_remaining_loan_term_months\n\n @max_remaining_loan_term_months.setter\n def max_remaining_loan_term_months(self, max_remaining_loan_term_months):\n \"\"\"Sets the max_remaining_loan_term_months of this StrategyModel.\n\n\n :param max_remaining_loan_term_months: The max_remaining_loan_term_months of this StrategyModel. # noqa: E501\n :type: int\n \"\"\"\n\n self._max_remaining_loan_term_months = max_remaining_loan_term_months\n\n @property\n def min_interest_rate(self):\n \"\"\"Gets the min_interest_rate of this StrategyModel. # noqa: E501\n\n\n :return: The min_interest_rate of this StrategyModel. # noqa: E501\n :rtype: float\n \"\"\"\n return self._min_interest_rate\n\n @min_interest_rate.setter\n def min_interest_rate(self, min_interest_rate):\n \"\"\"Sets the min_interest_rate of this StrategyModel.\n\n\n :param min_interest_rate: The min_interest_rate of this StrategyModel. # noqa: E501\n :type: float\n \"\"\"\n\n self._min_interest_rate = min_interest_rate\n\n @property\n def min_remaining_loan_term_months(self):\n \"\"\"Gets the min_remaining_loan_term_months of this StrategyModel. # noqa: E501\n\n\n :return: The min_remaining_loan_term_months of this StrategyModel. # noqa: E501\n :rtype: int\n \"\"\"\n return self._min_remaining_loan_term_months\n\n @min_remaining_loan_term_months.setter\n def min_remaining_loan_term_months(self, min_remaining_loan_term_months):\n \"\"\"Sets the min_remaining_loan_term_months of this StrategyModel.\n\n\n :param min_remaining_loan_term_months: The min_remaining_loan_term_months of this StrategyModel. # noqa: E501\n :type: int\n \"\"\"\n\n self._min_remaining_loan_term_months = min_remaining_loan_term_months\n\n @property\n def name(self):\n \"\"\"Gets the name of this StrategyModel. # noqa: E501\n\n\n :return: The name of this StrategyModel. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this StrategyModel.\n\n\n :param name: The name of this StrategyModel. # noqa: E501\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def originators(self):\n \"\"\"Gets the originators of this StrategyModel. # noqa: E501\n\n\n :return: The originators of this StrategyModel. # noqa: E501\n :rtype: list[int]\n \"\"\"\n return self._originators\n\n @originators.setter\n def originators(self, originators):\n \"\"\"Sets the originators of this StrategyModel.\n\n\n :param originators: The originators of this StrategyModel. # noqa: E501\n :type: list[int]\n \"\"\"\n\n self._originators = originators\n\n @property\n def total_size(self):\n \"\"\"Gets the total_size of this StrategyModel. # noqa: E501\n\n\n :return: The total_size of this StrategyModel. # noqa: E501\n :rtype: float\n \"\"\"\n return self._total_size\n\n @total_size.setter\n def total_size(self, total_size):\n \"\"\"Sets the total_size of this StrategyModel.\n\n\n :param total_size: The total_size of this StrategyModel. # noqa: E501\n :type: float\n \"\"\"\n\n self._total_size = total_size\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(StrategyModel, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, StrategyModel):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"swagger_client/models/strategy_model.py","file_name":"strategy_model.py","file_ext":"py","file_size_in_byte":17753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"461568617","text":"#!/usr/bin/env python\n\"\"\" Evaluator Class and builder \"\"\"\nfrom __future__ import print_function\nimport os\n\n\ndef build_evaluator(opt, logger=None):\n evaluator = Evaluator.from_opt(\n opt,\n logger=logger\n )\n\n return evaluator\n\n\nclass Evaluator(object):\n \"\"\"Evaluates translations.\n\n Args:\n report_rouge (bool): Print/log Rouge metric.\n report_bleu (bool): Print/log Bleu metric.\n report_sari (bool): Print/log Sari metric.\n report_flesch_reading_ease (bool): Print/log flesch reading ease metric.\n report_flesch_kincaid_grade_level (bool): Print/log flesch kincaid grade level metric.\n logger (logging.Logger or NoneType): Logger.\n \"\"\"\n\n def __init__(\n self,\n report_rouge=False,\n report_bleu=False,\n report_sari=False,\n report_flesch_reading_ease=False,\n report_flesch_kincaid_grade_level=False,\n logger=None):\n self.report_rouge = report_rouge\n self.report_bleu = report_bleu\n self.report_rouge = report_rouge\n self.report_sari = report_sari\n self.report_flesch_reading_ease = report_flesch_reading_ease\n self.report_flesch_kincaid_grade_level = report_flesch_kincaid_grade_level\n\n self.logger = logger\n\n @classmethod\n def from_opt(\n cls,\n opt,\n logger=None):\n \"\"\"Alternate constructor.\n\n Args:\n opt (argparse.Namespace): Command line options.\n logger (logging.Logger or NoneType): See :func:`__init__()`.\n \"\"\"\n\n return cls(\n report_rouge=opt.report_rouge,\n report_bleu=opt.report_bleu,\n report_sari=opt.report_sari,\n report_flesch_reading_ease=opt.report_flesch_reading_ease,\n report_flesch_kincaid_grade_level=opt.report_flesch_kincaid_grade_level,\n logger=logger)\n\n def _log(self, msg):\n if self.logger:\n self.logger.info(msg)\n else:\n print(msg)\n\n def evaluate(\n self,\n src_path,\n tgt_path,\n pred_path):\n \"\"\"Evaluates content of src, tgt and pred.\n\n Args:\n\n Returns:\n\n \"\"\"\n\n if self.report_rouge:\n msg = self._report_rouge(tgt_path, pred_path)\n self._log(msg)\n if self.report_bleu:\n msg = self._report_bleu(tgt_path, pred_path)\n self._log(msg)\n if self.report_sari:\n msg = self._report_sari(src_path, tgt_path, pred_path)\n self._log(msg)\n if self.report_flesch_reading_ease:\n msg = self._report_flesch_reading_ease(pred_path)\n self._log(msg)\n if self.report_flesch_kincaid_grade_level:\n msg = self._report_flesch_kincaid_grade_level(pred_path)\n self._log(msg)\n\n def _report_rouge(self, tgt_path, pred_path):\n import subprocess\n path = os.path.split(os.path.realpath(__file__))[0]\n msg = subprocess.check_output(\n \"python %s/tools/test_rouge.py -r %s -c STDIN\" % (path, tgt_path),\n shell=True, stdin=open(pred_path, \"r\")\n ).decode(\"utf-8\").strip()\n return msg\n\n def _report_bleu(self, tgt_path, pred_path):\n import subprocess\n base_dir = os.path.abspath(__file__ + \"/../../..\")\n\n res = subprocess.check_output(\n \"perl %s/tools/multi-bleu.perl %s\" % (base_dir, os.path.abspath(tgt_path)),\n stdin=open(pred_path, \"r\"), shell=True\n ).decode(\"utf-8\")\n\n msg = \">> \" + res.strip()\n return msg\n\n def _report_sari(self, src_path, tgt_path, pred_path):\n import subprocess\n base_dir = os.path.abspath(__file__ + \"/../../..\")\n\n res = subprocess.check_output(\n \"python %s/tools/sari.py %s %s\" % (\n base_dir, os.path.abspath(src_path), os.path.abspath(tgt_path)),\n stdin=open(pred_path, \"r\"), shell=True\n ).decode(\"utf-8\")\n\n msg = \">> \" + res.strip()\n return msg\n\n def _report_flesch_reading_ease(self, pred_path):\n import subprocess\n base_dir = os.path.abspath(__file__ + \"/../../..\")\n\n res = subprocess.check_output(\n \"python %s/tools/readability/readability.py \\\"Flesch Reading Ease\\\"\" % base_dir,\n stdin=open(pred_path, \"r\"), shell=True\n ).decode(\"utf-8\")\n\n msg = \">> \" + res.strip()\n return msg\n\n def _report_flesch_kincaid_grade_level(self, pred_path):\n import subprocess\n base_dir = os.path.abspath(__file__ + \"/../../..\")\n\n res = subprocess.check_output(\n \"python %s/tools/readability/readability.py \\\"Flesch-Kincaid Grade Level\\\"\" % base_dir,\n stdin=open(pred_path, \"r\"), shell=True\n ).decode(\"utf-8\")\n\n msg = \">> \" + res.strip()\n return msg\n","sub_path":"onmt/evaluate/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122235804","text":"#!/usr/bin/env python\n# coding:utf8\n\"\"\"\n@Time : 2019/09/15\n@Author : fls\n@Contact : fls@darkripples.com\n@Desc : darkripples总平台相关-应用内utils\n\n@Modify Time @Author @Version @Desciption\n------------ ------- -------- -----------\n2019/09/15 08:43 fls 1.0 create\n\"\"\"\n\nimport re\nimport uuid\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom conf import REQ_HEADER_PWD\nfrom ez_utils import connection, after_seconds, is_internal_ip, get_ip, fls_log\nfrom .models import SQL_DIC_VISITOR, DrVisitorInfo\n\nflog = fls_log(handler_name=\"app_dr.dr_utils\")\n\n\ndef add_visitor(ip, app_type, visitor_type, link_id):\n \"\"\"\n 添加到访客记录\n :param ip: 同ip,5分钟内,只记录一次\n :param app_type:\n :param visitor_type:\n :param link_id: 关联的id,可以不关联到业务表,用于记录访客随便输入的id\n :return:\n \"\"\"\n with connection() as con:\n rs = con.execute_sql(\n \"SELECT count(1) as cnt FROM {table1} \" \\\n \"WHERE {table1_ip} = %(ip)s and {table1_atime}>=%(time)s and {table1_linkid}=%(link_id)s\".format(\n **SQL_DIC_VISITOR), {'ip': ip, 'time': after_seconds(seconds=-1 * 5 * 60), 'link_id': link_id})\n if not rs or rs[0].cnt == 0:\n # 若登记过该ip,则获取它的信息\n cnt_ipstack = 0\n visitor_lat = ''\n visitor_lng = ''\n if is_internal_ip(ip) or '127.0.0.1' == ip:\n # 内网ip,免解析\n cnt_ipstack = -1\n else:\n rs_over = con.execute_sql(\n \"SELECT {table1_lat},{table1_lng} FROM {table1} \" \\\n \"WHERE {table1_ip} = %(ip)s and {table1_lat} is not null and {table1_lat}!='' limit 1\".format(\n **SQL_DIC_VISITOR), {'ip': ip}, hump=False)\n\n if rs_over and rs_over[0]:\n cnt_ipstack = -1\n visitor_lat = rs_over[0][SQL_DIC_VISITOR[\"table1_lat\"]]\n visitor_lng = rs_over[0][SQL_DIC_VISITOR[\"table1_lng\"]]\n\n # 登记信息\n DrVisitorInfo.objects.create(id=str(uuid.uuid1()).replace('-', ''), visitor_ip=ip,\n add_time=timezone.now(), app_name=app_type,\n visitor_type=visitor_type, link_id=link_id,\n cnt_ipstack=cnt_ipstack, visitor_lat=visitor_lat,\n visitor_lng=visitor_lng)\n\n return 1\n return 0\n\n\ndef req_invalid_check(req):\n \"\"\"\n request合法性校验\n :param req:\n :return: 校验通过return '',否则return错误信息\n \"\"\"\n flag = \"\"\n if req.META.get(\"HTTP_DR_DEBUG\") == REQ_HEADER_PWD:\n # 测试时,传递该密参,不进行校验\n return flag\n\n # 其他情况,校验header中的参数\n ip = get_ip(req)\n if req.META.get(\"HTTP_TOKEN\") is None:\n flag = 1\n\n allowed_hosts = settings.ALLOWED_HOSTS\n\n referer_rule = r'//(.*?)/'\n referer_rs = re.findall(referer_rule, req.META.get(\"HTTP_REFERER\", \"\"))\n if (not referer_rs) or (referer_rs[0] not in allowed_hosts):\n flag = 1\n # origin_rs = req.META.get(\"HTTP_ORIGIN\", \"\").split(\"//\")\n # if (len(origin_rs) < 2) or (origin_rs[1] not in allowed_hosts):\n # flag = 1\n\n if flag:\n flag = f\"当前客户端外网IP:{ip}.请斟酌调取本接口\"\n return flag\n","sub_path":"app_dr/dr_utils.py","file_name":"dr_utils.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"146722056","text":"#test linearity of du/dc\nif __name__ == \"__main__\":\n\tfrom sys import path\n\tpath.append(\"../src\")\n\nimport domain\nimport fastsweeping\nimport wavespeed\nimport basis\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\ndef test1():\n\t# create domain\n\tM = 1.0\n\tll = np.array([-M,0])\n\tn = 200\n\tgrid = domain.Rectangle(-M, M, 0, M, n, debug=True)\n\n\tsources = np.array([grid.convert_to_flattened_indices(grid.nx/2, 0)], dtype = np.int)\n\tfeasible_receivers = grid.boundary()\n\n\tc_true = wavespeed.constant\n\tc = c_true(grid.X, grid.Y)\n\n\t#compute u\n\tu_infty = 1e3\n\tu0 = u_infty*np.ones(grid.shape)\n\tu0.ravel()[sources[0]] = 0.0\n\tu, H, U = fastsweeping.first_arrivals(grid, u0, c, maxiter = 40, tol = 1e-10, memory=True)\n\n\tdc1 = np.random.rand(grid.nx, grid.ny)\n\tdc2 = np.random.rand(grid.nx, grid.ny)\n\n\tplt.imshow(u.T, origin=\"lower\")\n\tplt.title(\"u\")\n\tplt.colorbar()\n\tplt.show()\n\n#\tcompute du\n\tv1 = fastsweeping.compute_du(grid, c, dc1, H, U, debug=False)\n\tv2 = fastsweeping.compute_du(grid, c, dc2, H, U, debug=False)\n\tv3 = fastsweeping.compute_du(grid, c, dc1 + dc2, H, U, debug=False)\n\n\tfig = plt.figure(1)\n\tax1 = fig.add_subplot(221)\n\tplot1 = ax1.imshow(v1.T, origin=\"lower\")\n\tax1.set_title(\"dA(dc1)\")\n\tplt.colorbar(plot1)\n\n\tax2 = fig.add_subplot(222)\n\tplot2 = ax2.imshow(v2.T, origin=\"lower\")\n\tax2.set_title(\"dA(dc2)\")\n\tplt.colorbar(plot2)\n\n\tax3 = fig.add_subplot(223)\n\tplot3 = ax3.imshow(v3.T, origin=\"lower\")\n\tax3.set_title(\"dA(dc1 + dc2)\")\n\tplt.colorbar(plot3)\n\n\terror = np.abs(v3 - v1 - v2)/np.linalg.norm(v3)\n\tax4 = fig.add_subplot(224)\n\tplot4 = ax4.imshow(error.T, origin=\"lower\")\n\tax4.set_title(\"|dA(dc1 + dc2) - dA(dc1) + dA(dc2)|\")\n\tplt.colorbar(plot4)\n\tplt.show()\n\n\nif __name__==\"__main__\":\n\ttest1()\n","sub_path":"tests/dulinearitytest.py","file_name":"dulinearitytest.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"59523382","text":"#!/usr/bin/env python3\nimport argparse\nimport gym\nimport numpy as np\nimport scipy.stats\nfrom itertools import count\n\n# make it possible to import from ../../utils/\nimport os.path, sys\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\nfrom utils.optim import adam\n\nparser = argparse.ArgumentParser(description='Numpy REINFORCE')\nparser.add_argument('--gamma', type=float, default=0.99, metavar='G',\n help='discount factor (default: 0.99)')\nparser.add_argument('--seed', type=int, default=42, metavar='N',\n help='random seed (default: 42)')\nparser.add_argument('--log_interval', type=int, default=100, metavar='N',\n help='interval between training status logs (default: 100)')\nparser.add_argument('--render_interval', type=int, default=-1, metavar='N',\n help='interval between rendering (default: -1)')\nparser.add_argument('--env_id', type=str, default='LunarLander-v2',\n help='gym environment to load')\nargs = parser.parse_args()\n\n\"\"\"\n Glossary:\n (w.r.t.) = with respect to (as in taking gradient with respect to a variable)\n\"\"\"\n\nclass PolicyNetworkContinuous(object):\n \"\"\"\n Neural network policy. Takes in observations and returns mean and\n standard deviations for actions\n\n ARCHITECTURE:\n {affine - relu } x (L - 1) - affine - softmax \n\n \"\"\"\n def __init__(self, ob_n, ac_n, hidden_dim=500, dtype=np.float32):\n \"\"\"\n Initialize a neural network to choose actions\n\n Inputs:\n - ob_n: Length of observation vector\n - ac_n: Number of possible actions\n - hidden_dims: List of size of hidden layer sizes\n - dtype: A numpy datatype object; all computations will be performed using\n this datatype. float32 is faster but less accurate, so you should use\n float64 for numeric gradient checking.\n \"\"\"\n self.ob_n = ob_n\n self.ac_n = ac_n\n self.hidden_dim = H = hidden_dim\n self.dtype = dtype\n self.out_n = ac_n * 2 # for mean and standard deviation\n\n # Initialize all weights (model params) with \"Xavier Initialization\" \n # weight matrix init = uniform(-1, 1) / sqrt(layer_input)\n # bias init = zeros()\n self.params = {}\n self.params['W1'] = (-1 + 2*np.random.rand(ob_n, H)) / np.sqrt(ob_n)\n self.params['b1'] = np.zeros(H)\n self.params['W2'] = (-1 + 2*np.random.rand(H, self.out_n)) / np.sqrt(H)\n self.params['b2'] = np.zeros(self.out_n)\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(self.dtype)\n\n # Neural net bookkeeping \n self.cache = {}\n self.grads = {}\n # Configuration for Adam optimization\n self.optimization_config = {'learning_rate': 1e-3}\n self.adam_configs = {}\n for p in self.params:\n d = {k: v for k, v in self.optimization_config.items()}\n self.adam_configs[p] = d\n\n # RL specific bookkeeping\n self.saved_action_gradients = []\n self.rewards = []\n\n ### HELPER FUNCTIONS\n def _zero_grads(self):\n \"\"\"Reset gradients to 0. This should be called during optimization steps\"\"\"\n for g in self.grads:\n self.grads[g] = np.zeros_like(self.grads[g])\n\n def _add_to_cache(self, name, val):\n \"\"\"Helper function to add a parameter to the cache without having to do checks\"\"\"\n if name in self.cache:\n self.cache[name].append(val)\n else:\n self.cache[name] = [val]\n\n def _update_grad(self, name, val):\n \"\"\"Helper fucntion to set gradient without having to do checks\"\"\"\n if name in self.grads:\n self.grads[name] += val\n else:\n self.grads[name] = val\n\n def _softmax(self, x):\n shifted_logits = x - np.max(x, axis=1, keepdims=True)\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\n log_probs = shifted_logits - np.log(Z)\n probs = np.exp(log_probs)\n return probs\n\n ### MAIN NEURAL NETWORK STUFF \n def forward(self, x):\n \"\"\"\n Forward pass observations (x) through network to get probabilities \n of taking each action \n\n [input] --> affine --> relu --> affine --> softmax/output\n\n \"\"\"\n p = self.params\n W1, b1, W2, b2 = p['W1'], p['b1'], p['W2'], p['b2']\n\n # forward computations\n affine1 = x.dot(W1) + b1\n relu1 = np.maximum(0, affine1)\n affine2 = relu1.dot(W2) + b2 \n means, stds = np.split(affine2, 2, axis=1)\n relu_stds = np.maximum(0, stds)\n\n # TODO need to do better handling here, like switching to sigmoid\n # if the range is not symmetric, or multiplying by a constant if it\n # is not ranged [-1,1]\n\n\n # cache values for backward (based on what is needed for analytic gradient calc)\n self._add_to_cache('affine1', x) \n self._add_to_cache('relu1', affine1) \n self._add_to_cache('affine2', relu1) \n self._add_to_cache('relu_stds', stds) \n return means.squeeze(), relu_stds.squeeze()\n \n def backward(self, dout):\n \"\"\"\n Backwards pass of the network.\n\n affine <-- relu <-- affine <-- [gradient signal of softmax/output]\n\n Params:\n dout: gradient signal for backpropagation\n \n\n Chain rule the derivatives backward through all network computations \n to compute gradients of output probabilities w.r.t. each network weight.\n (to be used in stochastic gradient descent optimization (adam))\n \"\"\"\n p = self.params\n W1, b1, W2, b2 = p['W1'], p['b1'], p['W2'], p['b2']\n\n # get values from network forward passes (for analytic gradient computations)\n fwd_relu1 = np.concatenate(self.cache['affine2'])\n fwd_affine1 = np.concatenate(self.cache['relu1'])\n fwd_x = np.concatenate(self.cache['affine1'])\n fwd_relu_stds = np.concatenate(self.cache['relu_stds'])\n\n dout[:,2:] = np.where(fwd_relu_stds > 0, dout[:,2:], 0)\n\n # Analytic gradient of last layer for backprop \n # affine2 = W2*relu1 + b2\n # drelu1 = W2 * dout\n # dW2 = relu1 * dout\n # db2 = dout\n drelu1 = dout.dot(W2.T)\n dW2 = fwd_relu1.T.dot(dout)\n db2 = np.sum(dout, axis=0)\n\n # gradient of relu (non-negative for values that were above 0 in forward)\n daffine1 = np.where(fwd_affine1 > 0, drelu1, 0)\n\n # affine1 = W1*x + b1\n dW1 = fwd_x.T.dot(daffine1)\n db1 = np.sum(daffine1)\n\n # update gradients \n self._update_grad('W1', dW1)\n self._update_grad('b1', db1)\n self._update_grad('W2', dW2)\n self._update_grad('b2', db2)\n\n # reset cache for next backward pass\n self.cache = {}\n\nclass REINFORCE(object):\n \"\"\"\n Object to handle running the algorithm. Uses a PolicyNetwork\n \"\"\"\n def __init__(self, env):\n ob_n = env.observation_space.shape[0]\n if type(env.action_space) == gym.spaces.box.Box:\n ac_n = env.action_space.shape[0]\n else:\n raise Exception(\"this only supports continuous envs\")\n\n self.policy = PolicyNetworkContinuous(ob_n, ac_n)\n\n def select_action(self, obs):\n \"\"\"\n Pass observations through network and sample an action to take. Keep track\n of dh to use to update weights\n \"\"\"\n obs = np.reshape(obs, [1, -1])\n means, stds = self.policy.forward(obs)\n stds += 1e-5\n\n action = np.random.normal(means, stds)\n action = action.clip(-1,1)\n\n dmeans = (action - means)/stds**2\n dstds = (-1/stds + ((action - means)**2 / (np.power(stds, 3))))\n print(means, stds)\n\n # I think the analytic gradient is undefined because we are clipping. It should be\n #dh = (action - netout)/std**2\n # Instead, we have to compute it numerically\n #normal_dist = scipy.stats.norm(means, stds)\n #dmeans = normal_dist.logpdf(action) # TODO: does this need scaling by std?\n #dstds = 1e-1 * normal_dist.entropy() # Add cross entropy cost to encourage exploration\n dh = np.hstack([dmeans, dstds])\n\n #dh = (1 - netout**2) * dnetout\n self.policy.saved_action_gradients.append(dh)\n \n return action\n\n\n def calculate_discounted_returns(self, rewards):\n \"\"\"\n Calculate discounted reward and then normalize it\n See Sutton book for definition \n Params:\n rewards: list of rewards for every episode\n \"\"\"\n returns = np.zeros(len(rewards))\n \n next_return = 0 # 0 because we start at the last timestep\n for t in reversed(range(0, len(rewards))):\n next_return = rewards[t] + args.gamma * next_return\n returns[t] = next_return\n # normalize for better statistical properties\n returns = (returns - returns.mean()) / (returns.std() + np.finfo(np.float32).eps)\n return returns\n \n def finish_episode(self):\n \"\"\"\n At the end of the episode, calculate the discounted return for each time step\n \"\"\"\n action_gradient = np.array(self.policy.saved_action_gradients)\n returns = self.calculate_discounted_returns(self.policy.rewards)\n # Multiply the signal that makes actions taken more probable by the discounted\n # return of that action. This will pull the weights in the direction that\n # makes *better* actions more probable.\n self.policy_gradient = np.zeros(action_gradient.shape)\n for t in range(0, len(returns)):\n self.policy_gradient[t] = action_gradient[t] * returns[t]\n \n # negate because we want gradient ascent, not descent\n self.policy.backward(-self.policy_gradient)\n \n # run an optimization step on all of the model parameters\n for p in self.policy.params:\n next_w, self.policy.adam_configs[p] = adam(self.policy.params[p], self.policy.grads[p], config=self.policy.adam_configs[p])\n self.policy.params[p] = next_w\n self.policy._zero_grads() # required every call to adam\n \n # reset stuff\n del self.policy.rewards[:]\n del self.policy.saved_action_gradients[:]\n\n\ndef main():\n \"\"\"Run REINFORCE algorithm to train on the environment\"\"\"\n avg_reward = []\n for i_episode in count(1):\n ep_reward = 0\n obs = env.reset()\n for t in range(10000): # Don't infinite loop while learning\n action = reinforce.select_action(obs)\n obs, reward, done, _ = env.step(action)\n ep_reward += reward\n reinforce.policy.rewards.append(reward)\n\n if args.render_interval != -1 and i_episode % args.render_interval == 0:\n env.render()\n\n if done:\n break\n\n reinforce.finish_episode()\n\n if i_episode % args.log_interval == 0:\n print(\"Ave reward: {}\".format(sum(avg_reward)/len(avg_reward)))\n avg_reward = []\n\n else:\n avg_reward.append(ep_reward)\n\nif __name__ == '__main__':\n env = gym.make(args.env_id)\n env.seed(args.seed)\n np.random.seed(args.seed)\n reinforce = REINFORCE(env)\n main()\n\n\n\n","sub_path":"numpy/rl/reinforce_continuous.py","file_name":"reinforce_continuous.py","file_ext":"py","file_size_in_byte":11363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"193421733","text":"#encoding=utf-8\n\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport random\nimport time\nimport logging\nimport logging.handlers\n\n# from sklearn.cross_validation import train_test_split\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import accuracy_score\n\nnp.set_printoptions(threshold='nan')\n\nlogger = logging.getLogger(\"naive\")\nlogger.setLevel(logging.DEBUG)\nfile_handler = logging.FileHandler(\"./log.txt\",mode='w')\nfile_handler.setLevel(logging.DEBUG)\nlogger.addHandler(file_handler)\n\n# 二值化\ndef binaryzation(img):\n cv_img = img.astype(np.uint8)\n cv2.threshold(cv_img,50,1,cv2.THRESH_BINARY_INV,cv_img) # 二值化处理,高于阈值50,设值为1\n return cv_img\n\ndef Train(trainset,train_labels):\n # 需要依据trian数据集,计算先验概率和条件概率\n # 先验概率\n prior_probability = np.zeros(class_num)\n # 条件概率: class_num:Cj(Y的划分空间); feature_len:维度p ; 2:p维度的Xi,每个Xi的取值范围(只有0,1)两种取值\n conditional_probability = np.zeros((class_num,feature_len,2))\n print(\"train_set shape = \", trainset.shape)# train_set shape = (67, 784)\n\n # 2.遍历train数据集,开始:计算先验概率及条件概率\n for i in range(len(train_labels)):\n img = binaryzation(trainset[i]) # 图片二值化 , 二值化处理之后,图片变成黑白2色,只有0,1两种值\n label = train_labels[i]\n # 2.1:统计到先验概率(以个数表示,没有换算成概率)\n prior_probability[label] += 1\n # 2.2:label与feature及值的数量统计\n for j in range(feature_len):\n conditional_probability[label][j][img[j]] += 1 # img[j]为0/1\n\n # 将概率归到[1.10001]\n for i in range(class_num):\n for j in range(feature_len):\n\n # 经过二值化后图像只有0,1两种取值\n pix_0 = conditional_probability[i][j][0]\n pix_1 = conditional_probability[i][j][1]\n\n # 计算0,1像素点对应的条件概率\n probalility_0 = (float(pix_0)/float(pix_0+pix_1))*magnification + 1 # 概率值:全部计算到1.xxxx\n probalility_1 = (float(pix_1)/float(pix_0+pix_1))*magnification + 1 # 方便计算概率时,直接连乘\n\n conditional_probability[i][j][0] = probalility_0\n conditional_probability[i][j][1] = probalility_1\n\n return prior_probability,conditional_probability\n\n# 计算概率\ndef calculate_probability(img,label):\n # 1.1 类别为label(Ck)的数量,认为是(P(Y=Ck))\n probability = int(prior_probability[label])\n for i in range(len(img)):\n # 1.2 在y=label(Ck)条件下,对于每个testimg, X(i)=i的概率,由于概率值已经被归化到1.xxxx,所以可以直接连乘\n probability *= int(conditional_probability[label][i][img[i]]) # trick:img[i]只能为0或1, 最终的值,需要除以maga么?\n return probability\n\ndef Predict(testset,prior_probability,conditional_probability):\n predict = []\n for img in testset:\n # 图像二值化\n img = binaryzation(img)\n\n max_label = 0\n # 1.1 先计算第0个类别C0的概率\n max_probability = calculate_probability(img,0)\n # 1.2 然后计算第1-10个类别Ci的概率,跟P0比较,找到最大的概率\n for j in range(1,10):\n probability = calculate_probability(img,j)\n if max_probability < probability:\n max_label = j\n max_probability = probability\n\n predict.append(max_label)\n return np.array(predict)\n\nclass_num = 10\nfeature_len = 784\nmagnification = 100 # 1000000\ntrain_num = 3000\n\nif __name__ == '__main__':\n print('Start read data')\n time_1 = time.time()\n # 1.读取数据\n raw_data = pd.read_csv('../data/train.csv',header=0)\n data = raw_data.values\n # 2.切片数据:分img_raw和label\n imgs = data[0:train_num:,1::]\n labels = data[0:train_num:,0]\n # 3.选取 2/3 数据作为训练集, 1/3 数据作为测试集\n train_features, test_features, train_labels, test_labels = train_test_split(imgs, labels, test_size=0.33, random_state=23323)\n time_2 = time.time()\n print('read data cost ',time_2 - time_1,' second','\\n')\n print('Start training')\n # 4.Train: 计算先验概率和条件概率\n prior_probability,conditional_probability = Train(train_features,train_labels)\n time_3 = time.time()\n print('training cost ',time_3 - time_2,' second','\\n')\n\n print(\"prior = \", prior_probability.tobytes())\n print(\"condition = \", conditional_probability.tobytes())\n\n print('Start predicting')\n # 5.预测结果\n test_predict = Predict(test_features,prior_probability,conditional_probability)\n time_4 = time.time()\n print('predicting cost ',time_4 - time_3,' second','\\n')\n # 6.查看预测结果\n score = accuracy_score(test_labels,test_predict)\n print(\"The accruacy socre is \", score)","sub_path":"3.naive_bayes/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"415562780","text":"import numpy as np\ndef scale_features(features, mode=1):\n # Max & min scaling\n if mode == 1:\n for c in range(len(features[0])):\n col_max = 0\n col_min = 9999999\n\n # Get the right min and max value for the column\n for f in features:\n if f[c] > col_max:\n col_max = f[c]\n elif f[c] < col_min:\n col_min = f[c]\n\n # Scale each feature value\n for f in features:\n f[c] = (f[c] - col_min) / (col_max - col_min)\n # Mean & stdev scaling\n else:\n for c in range(len(features[0])):\n col_my = sum([f[c] for f in features]) / len(features)\n col_sigma = np.std([f[c] for f in features])\n\n for f in features:\n f[c] = (f[c] - col_my) / col_sigma\n\n return features\n\n\nfeatures = [[1,1,1],[2,2,2],[3,20,211]]\n\n\ndef number_of_labels(labels):\n uniques = []\n for l in labels:\n if(int(l) not in uniques):\n uniques.append(int(l))\n return uniques\n\ndef int_to_one_hot(int, size, off_val=0, on_val=1, floats=False):\n if floats:\n off_val = float(off_val);\n on_val = float(on_val)\n if int < size:\n v = [off_val] * size\n v[int] = on_val\n return v\nprint(int_to_one_hot(6,7))","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"91766441","text":"from django.shortcuts import render\n\n# Create your views here.\ndef encode(msg, key):\n msgl = len(msg)\n keyl = len(key)\n res = \"\"\n counter = 0\n forward = True\n for i in range(msgl):\n msgc = ord(msg[i]) - 97\n keyc = ord(key[counter]) - 96\n res += chr((msgc + keyc) % 26 + 97) if msg[i].isalpha() else msg[i]\n counter += 1 if forward else -1\n if counter > keyl - 1:\n counter = keyl - 1\n forward = False\n elif counter < 0:\n counter = 0\n forward = True\n return res\n\ndef encode_view(request):\n if request.method == 'POST':\n context = {'result': None}\n\n context['msg'], context['k'] = request.POST['message'], request.POST['key']\n\n context['result'] = encode(context['msg'].lower(), context['k'].lower())\n\n return render(request, '../../crypto/templates/result.html', context=context)\n\n else:\n return render(request, 'encode.html')\n","sub_path":"assignments/003/anna-lou/crypto/encode/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"45355956","text":"from .models import Veh_activity, Veh_movement, Member, Vehicle, currSeason, Contact, StationEvent, \\\n EventAttendance, StationActivity, Memb_qualification\n\nfrom django.forms import ModelForm, Form, SelectDateWidget, TimeInput, DateInput\nfrom bootstrap_datepicker.widgets import DatePicker\nfrom django import forms\nimport datetime\n\n\nfrom django.core.urlresolvers import reverse\nfrom crispy_forms.bootstrap import Field, InlineRadios, TabHolder, Tab\nfrom crispy_forms.helper import FormHelper\nfrom django.db.models.fields import BLANK_CHOICE_DASH\n\nfrom crispy_forms.layout import Submit, Layout, Div, Fieldset, Button, HTML, ButtonHolder\nfrom django.contrib.admin.widgets import AdminDateWidget, AdminTimeWidget\n\n\nclass Veh_movementForm(ModelForm):\n DATE = forms.DateField(widget=forms.TextInput(attrs={'type': 'date'})) # AdminDateWidget()) # widget=DatePicker(options={\"format\": \"dd/mm/yyyy\",\"autoclose\": True}))\n DEPARTED = forms.TimeField(widget=forms.TextInput(attrs={'type': 'time'}))\n RETURNED = forms.TimeField(widget=TimeInput(format='%H:%M'))\n DRIVER = forms.ModelChoiceField(queryset=Member.all_drivers.all())\n OIC = forms.ModelChoiceField(queryset=Member.field_officers.all())\n\n cchoices = ((1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12))\n CREW = forms.TypedChoiceField(choices=cchoices, coerce=int)\n\n Response = forms.CheckboxInput()\n\n class Meta:\n model = Veh_movement\n fields = ['DATE', 'VEHICLE', 'PURPOSE', 'Response', 'LOCATION', 'DEPARTED', 'RETURNED',\n 'ODOSTART', 'ODOFINISH', 'KM', 'DRIVER', 'OIC', 'CREW', ]\n\n def __init__(self, *args, **kwargs):\n super(Veh_movementForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id-veh_movement_form'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Submit'))\n self.helper.form_class = 'form-horizontal'\n\n if 'initial' in kwargs:\n for fieldname in ['ODOSTART', 'VEHICLE', ]: # Vehicle and ODOSTART already known\n self.fields[fieldname].disabled = True\n\n self.helper.layout = Layout(\n Fieldset('Start/Finish',\n Div(\n Div('DATE', css_class='col-md-2'),\n Div('DEPARTED', css_class='col-md-1 col-md-offset-1'),\n Div('RETURNED', css_class='col-md-1 col-md-offset-1'),\n Div('Response', css_class='col-md-1 col-md-offset-1'), # strong, color-red, ... (none work\n css_class='row',\n )),\n HTML(\"

     

    \"), # blank spacer\n Fieldset('Destination',\n Div(\n Div('VEHICLE', css_class='col-md-2'),\n Div('PURPOSE', css_class='col-md-2 col-md-offset-1'),\n Div('LOCATION', css_class='col-md-4 col-md-offset-1'),\n css_class='row',\n )),\n HTML(\"

     

    \"), # blank spacer\n Fieldset('Distance',\n Div(\n Div('ODOSTART', css_class='col-md-1 '),\n Div('ODOFINISH', css_class='col-md-1 col-md-offset-2'),\n # Div('KM', css_class='col-md-1 col-md-offset-1'),\n css_class='row',\n )),\n HTML(\"

     

    \"), # blank spacer\n Fieldset('Crew',\n Div(\n Div('DRIVER', css_class='col-md-2 '),\n Div('OIC', css_class='col-md-2 col-md-offset-1'),\n Div('CREW', css_class='col-md-1 col-md-offset-1'),\n css_class='row',\n )),\n HTML(\"

     

    \"), # blank spacer\n )\n\n\nclass VehicleForm(Form):\n\n def __init__(self, *args, **kwargs):\n super(VehicleForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper(form=self)\n self.helper.form_id = 'id-vehicle-form'\n self.helper.form_method = 'post'\n self.helper.form_class = 'form-horizontal'\n\n avehs = Vehicle.active_veh.all()\n i = 0\n for av in avehs:\n v1h = \" ' + av.FullName + \"\"\n self.helper.layout.append(Fieldset('', Div(Div(HTML(v2h)), css_class='input-lg col-xs-10 col-sm-10 col-md-6 col-lg-4',)))\n self.helper.layout.append(Fieldset('', Div(Div(HTML(\"

     

    \")), css_class='row',)))\n i = i + 1\n\n\nclass StationEventSignInForm(Form):\n\n def __init__(self, *args, **kwargs):\n super(StationEventSignInForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper(form=self)\n self.helper.form_id = 'id-stationev-form'\n self.helper.form_method = 'post'\n self.helper.form_class = 'form-horizontal'\n\n events_today = StationEvent.today.all()\n\n for ev in events_today: # Add one button for event scheduled for today. Clicking invokes checkin process\n evlabel = \"%s: %s (OIC:%s)\" % (format(ev.eventStart, \"%H:%M\"), ev.Activity.fullname, ev.OIC)\n v1h = \" ' + evlabel + \"\"\n self.helper.layout.append(Fieldset('', Div(Div(HTML(v2h)), css_class='input-lg col-xs-10 col-sm-10 col-md-6 col-lg-4',)))\n self.helper.layout.append(Fieldset('', Div(Div(HTML(\"

     

    \")), css_class='row',)))\n\n # Add button to allow new event to be created. Use bootstrap btn-primary to show as blue\n v1h = \" Create New Event \"\n self.helper.layout.append(Fieldset('', Div(Div(HTML(v1h)), css_class='input-lg col-xs-10 col-sm-10 col-md-6 col-lg-4', )))\n self.helper.layout.append(Fieldset('', Div(Div(HTML(\"

     

    \")), css_class='row', )))\n\n\n for ev in events_today: # Show who has checked in to each event, as well as in/out times\n self.helper.layout.append(Fieldset('', Div(Div(HTML(\"

    Signed In : %s, OIC:%s

    \"%(ev.Activity.fullname,ev.OIC))), css_class='row', )))\n\n attending = EventAttendance.objects.filter(event=ev).order_by('inTime')\n for att in attending:\n # check if multiple entries for same member. If so, remove duplicates, leaving latest only\n lastrec = attending.filter(member=att.member).latest('member')\n if (att != lastrec): # replace last entry for this member with this new one\n lastrec.delete()\n\n attending = EventAttendance.objects.filter(event=ev).order_by('inTime') # recalculate list for display\n for att in attending:\n\n # Prepare a button to allow this member to check out\n cout = \" ' + \"Sign Out\" + \"\"\n\n if att.outTime: # Show check out time if available\n outT = format(att.outTime, \"%H:%M\")\n v2h = \".\" # No button if already checked out\n else:\n outT = v2h # Show checkout button\n\n self.helper.layout.append(\n Fieldset('',\n Div(\n Div(HTML(format(att.inTime, \"%H:%M\")), css_class='col-md-1'),\n Div(HTML(att.member), css_class='col-md-2'),\n Div(HTML(outT), css_class='col-md-2'),\n # Div(HTML(v2h), css_class='col-md-2'),\n css_class='row',\n )),\n )\n\n\n\nclass StationEventForm(ModelForm):\n eventDate = forms.DateField(widget=DateInput()) # forms.TextInput(attrs={'type': 'date'})) # AdminDateWidget()) # widget=DatePicker(options={\"format\": \"dd/mm/yyyy\",\"autoclose\": True}))\n eventStart = forms.TimeField(widget=TimeInput(format='%H:%M')) # forms.TextInput(attrs={'type': 'time'}))\n eventEnd = forms.TimeField(widget=TimeInput(format='%H:%M'))\n\n OIC = forms.ModelChoiceField(queryset=Member.field_officers.all())\n IC2 = forms.ModelChoiceField(queryset=Member.active_members.all())\n DriverA = forms.ModelChoiceField(queryset=Member.all_drivers.all())\n DriverB = forms.ModelChoiceField(queryset=Member.all_drivers.all())\n\n Activity = forms.ModelChoiceField(queryset=StationActivity.objects.all().order_by('fullname'))\n# details = forms.CharField()\n\n class Meta:\n model = StationEvent\n fields = ['eventDate', 'eventStart', 'eventEnd', 'Activity', 'details', 'OIC', 'IC2', 'DriverA', 'DriverB', ]\n\n def __init__(self, *args, **kwargs):\n super(StationEventForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id-station_event_form'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Submit'))\n self.helper.form_class = 'form-horizontal'\n\n self.fields['OIC'].label = 'OIC'\n self.fields['IC2'].label = 'OIC2'\n self.fields['DriverA'].label = 'Driver 1'\n self.fields['DriverB'].label = 'Driver 2'\n\n for fieldname in ['IC2', 'DriverA', 'DriverB', ]:\n self.fields[fieldname].required = False\n\n self.helper.layout = Layout(\n Fieldset('Start/Finish',\n Div(\n Div('eventDate', css_class='col-md-2'),\n Div('eventStart', css_class='col-md-1 col-md-offset-1'),\n Div('eventEnd', css_class='col-md-1 col-md-offset-1'),\n css_class='row',\n )),\n HTML(\"

     

    \"), # blank spacer\n Fieldset('Purpose',\n Div(\n Div('Activity', css_class='col-md-2'),\n Div('details', css_class='col-md-3 col-md-offset-1'),\n css_class='row',\n )),\n HTML(\"

     

    \"), # blank spacer\n Fieldset('Officer',\n Div(\n Div('OIC', css_class='col-md-2 '),\n Div('IC2', css_class='col-md-2 col-md-offset-1'),\n css_class='row',\n ),\n Div(\n Div('DriverA', css_class='col-md-2 '),\n Div('DriverB', css_class='col-md-2 col-md-offset-1'),\n css_class='row',\n )\n ),\n HTML(\"

     

    \"), # blank spacer\n )\n\n\n\nclass EventAttendanceForm(ModelForm):\n\n event = forms.ModelChoiceField(queryset=StationEvent.today)\n member = forms.ModelChoiceField(queryset=Member.active_members.all())\n inTime = forms.TimeField(widget=TimeInput(format='%H:%M')) # Default value lost using TextInput widget\n outTime = forms.TimeField(widget=forms.TextInput(attrs={'type': 'time'}))\n mdetails = forms.CharField()\n\n class Meta:\n model = EventAttendance\n fields = [ 'member', 'event', 'inTime', 'outTime', 'mdetails', ]\n\n def __init__(self, *args, **kwargs):\n super(EventAttendanceForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id-event-att_form'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Submit'))\n self.helper.form_class = 'form-horizontal'\n\n evname = \".\"\n if 'initial' in kwargs:\n for fieldname in ['event', ]: # event and start time already known (allow time to be changed anyway)\n self.fields[fieldname].disabled = True\n eid = kwargs['initial']['event']\n evname = StationEvent.objects.get(id=eid).Activity.fullname\n for fieldname in ['outTime', 'mdetails', 'event', 'inTime', ]:\n self.fields[fieldname].required = False\n\n self.helper.layout = Layout(\n Fieldset('Attend Event : %s       Date: %s'% (evname, datetime.date.today().strftime('%d-%b-%Y')),\n Div(\n # Div('event__details', css_class='col-md-8'),\n # HTML(\"Date: %s\"%datetime.date.today()),\n HTML(\"

     

    \"), # blank spacer\n css_class='row',\n )),\n# HTML(\"

     

    \"), # blank spacer\n Fieldset('Member',\n Div(\n Div('member', css_class='col-md-2'),\n Div('inTime', css_class='col-md-1 col-md-offset-1'),\n Div('outTime', css_class='col-md-1 col-md-offset-1'),\n Div('mdetails', css_class='col-md-4 col-md-offset-1'),\n css_class='row',\n )),\n HTML(\"

     

    \"), # blank spacer\n )\n\n#\n# class MemberForm(ModelForm):\n# class Meta:\n# model = Member\n# fields = [ 'rfsID', 'mContact', 'joined', 'left', 'active', 'birthdate', 'curr_status',]\n#\n# def __init__(self, *args, **kwargs):\n# super(MemberForm, self).__init__(*args, **kwargs)\n# self.helper = FormHelper()\n# self.helper.form_id = 'id-member-form'\n# self.helper.form_method = 'post'\n# self.helper.add_input(Submit('submit', 'Submit'))\n# self.helper.form_class = 'form-horizontal'\n#\n# for fieldname in ['curr_status', 'active', 'joined', 'left', ]: # Calculated or editable only in admin\n# self.fields[fieldname].disabled = True\n#\n# mcx = self.instance.mContact.id\n#\n# mc1h = \"' \"'Contact Details'\"\n# mcs = \"href={% url \\\"Contact-update\\\" \" + \"%s \" % mcx + '%} />'\n#\n# v1h = \"' + \"Contact Details\"\n#\n# self.helper.layout = Layout(\n# Fieldset('Name',\n# Div(\n# Div(Submit(\"Contact Details\", mcs )),\n# Div('active', css_class='col-md-2 col-md-offset-1'),\n# Div('curr_status', css_class='col-md-2 col-md-offset-1'),\n# css_class='row',\n# )),\n# HTML(\"

     

    \"), # blank spacer\n# Fieldset('Dates',\n# Div(\n# Div('birthdate', css_class='col-md-2'),\n# Div('joined', css_class='col-md-2 col-md-offset-1'),\n# Div('left', css_class='col-md-2 col-md-offset-1'),\n# css_class='row',\n# )),\n# HTML(\"

     

    \"), # blank spacer\n# )\n#\n# self.helper.layout.append(Fieldset('', Div(Div(HTML(v2h)), css_class='input-lg col-xs-10 col-sm-10 col-md-6 col-lg-4', )))\n#\n\nclass ContactForm(ModelForm):\n class Meta:\n model = Contact\n fields = [ 'surname', 'firstname', 'pref_name', 'email',\n 'Home', 'Work', 'Mobile', 'homeaddress','homesuburb', 'homestate', 'homepostcode', ]\n\n def __init__(self, *args, **kwargs):\n super(ContactForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id-contact-form'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Submit'))\n self.helper.form_class = 'form-horizontal'\n\n for fieldname in ['Home', 'Work', 'Mobile', 'pref_name']: # Avoid cluttering layout\n self.fields[fieldname].help_text = None #\n\n self.helper.layout = Layout(\n Fieldset('Name',\n Div(\n Div('surname', css_class='col-md-2'),\n Div('firstname', css_class='col-md-2 col-md-offset-1'),\n Div('pref_name', css_class='col-md-2 col-md-offset-1'),\n css_class='row',\n )),\n HTML(\"

     

    \"), # blank spacer\n Fieldset('Contact',\n Div(\n Div('Home', css_class='col-md-2'),\n Div('Work', css_class='col-md-2 col-md-offset-1'),\n Div('Mobile', css_class='col-md-2 col-md-offset-1'),\n Div('email', css_class='col-md-3 col-md-offset-1'),\n css_class='row',\n )),\n HTML(\"

     

    \"), # blank spacer\n Fieldset('Address',\n Div(\n Div('homeaddress', css_class='col-md-2'),\n Div('homesuburb', css_class='col-md-2 col-md-offset-1'),\n Div('homestate', css_class='col-md-1 col-md-offset-1'),\n Div('homepostcode', css_class='col-md-1 col-md-offset-1'),\n css_class='row',\n )),\n HTML(\"

     

    \"), # blank spacer\n )\n\n\nclass MemberForm(ModelForm):\n\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.layout = Layout(\n Field('mContact'),\n )\n super(MemberForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = Member\n fields = ['rfsID', 'mContact', 'joined', 'left', 'active', 'birthdate', 'curr_status', ]\n\n\nclass Memb_qualificationForm(ModelForm):\n def __init__(self, *args, **kwargs):\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.template = 'bootstrap/table_inline_formset.html'\n# self.helper.layout = Layout(\n# Field('qual_id'),\n# )\n super(Memb_qualificationForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = Memb_qualification\n fields = ['memb', 'qual_id', 'cert_date', 'expiry_date']\n\n","sub_path":"personnel/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":19541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"255453010","text":"\r\n'''\r\ntopframe = Frame(root)\r\ntopframe.pack(side=TOP)\r\nmidframe = Frame(root)\r\nmidframe.pack()\r\nbottomframe = Frame(root)\r\nbottomframe.pack(side = BOTTOM)\r\n\r\nbutton1 = Button(topframe, text = \"fuckyes\", fg = \"red\")\r\nbutton2 = Button(topframe, text = \"fuckno\", fg=\"yellow\")\r\nbutton3 = Button(topframe, text = \"button3\", fg=\"green\")\r\nbutton4 = Button(bottomframe, text = \"Refresh\", fg=\"blue\")\r\n\r\nbutton1.pack(side=LEFT)\r\nbutton2.pack(side=LEFT)\r\nbutton3.pack(side=LEFT)\r\nbutton4.pack(side=BOTTOM)\r\n\r\n\r\ntwo = Label(root, text=\"Time\", bg=\"Red\", fg=\"black\")\r\ntwo.pack(fill=X)\r\nthree = Label(root, text=\"Three\", bg=\"red\", fg=\"black\")\r\nthree.pack(side=LEFT,fill=Y)\r\n\r\n\r\ndef printkint(event):\r\n\tprint(\"Running some shit sooooonnnnn\")\r\n\r\n\r\nlabel_1 = Label(root, text = \"name\")\r\nlabel_2 = Label(root, text = \"passward\")\r\nentry_1 = Entry(root)\r\nentry_2 = Entry(root)\r\nbutton1 = Button(root, text=\"Login!\")\r\n\r\n\r\n#sticky sticks alignment of cells (North, South, East, West)\r\nlabel_1.grid(row=0,column=0,sticky=E)\r\nlabel_2.grid(row=1,column=0,sticky=E)\r\n\r\nentry_1.grid(row=0, column=1)\r\nentry_2.grid(row=1, column=1)\r\n\r\nchkbox = Checkbutton(root, text=\"Click if you suck!\" )\r\nchkbox.grid(columnspan=2)\r\n\r\nbutton1.grid(row=4, column=1)\r\nbutton1.bind(\"\", printkint)\r\n\r\n'''\r\n'''\r\n\r\nfrom tkinter import *\r\n\r\n\r\nclass Buttons:\r\n\r\n\tdef __init__(self, master):\r\n\t\tframe = Frame(master)\r\n\t\tframe.pack()\r\n\r\n\t\tself.printButton = Button(frame, text=\"Print Message\", command=self.printMessage)\r\n\t\tself.printButton.pack(side=LEFT)\r\n\r\n\t\tself.quitButton = Button(frame, text=\"QUIT!\", command=frame.quit)\r\n\t\tself.quitButton.pack(side=LEFT)\r\n\r\n\tdef printMessage(self):\r\n\t\tprint(\"Wowzers !!!\")\r\n\r\n\tdef contactaliens(self, url):\r\n\t\tprint(\"contactingaliens\")\r\n\r\n\r\nroot = Tk()\r\n\r\ndef leftClick(event):\r\n\tprint(\"detected ... Left\")\r\n\tprint(\"clicklocation: \")\r\n\r\n\r\ndef middleClick(event):\r\n\tprint(\"detected ... middle\")\r\n\tprint(\"clicklocation: \")\r\n\r\n\r\ndef rightClick(event):\r\n\tprint(\"detected ... Right\")\r\n\tprint(\"clicklocation: \")\r\n\r\n\r\n\r\nroot = Tk()\r\nb = Buttons(root)\r\nroot.mainloop()\r\n'''\r\n\r\n\r\n\r\nfrom tkinter import *\r\nimport sys\r\nimport json\r\nimport time\r\nfrom websocket import create_connection\r\n\r\n\r\nclass toolbar:\r\n\r\n\r\n\tdef __init__(self, master):\r\n\t\t\r\n\t\tself.master=master\r\n\r\n\t\tprint(\"init\")\r\n\t\tframe = Frame(master)\r\n\t\tframe.pack()\r\n\r\n\tdef filemenu(self, master):\r\n\t\t\r\n\r\n\t\tdropdown = Menu(master)\r\n\r\n\t\tmaster.config(menu=dropdown)\r\n\r\n\t\tfileMenu = Menu(dropdown)\r\n\t\tlabel_1 = Label(master, text = \"server: \")\r\n\t\tlabel_1.pack(side=LEFT)\r\n\t\tbutton1 = Button(master, text = \"Connect\", command = self.serverconf)\r\n\t\tbutton1.pack(side=LEFT)\r\n\t\t\r\n\t\tself.entry_1 = Entry(master)\r\n\t\tself.entry_1.pack(side=LEFT)\r\n\r\n\t\t\r\n\r\n\r\n\t\t#FILE\r\n\t\tdropdown.add_cascade(label=\"File\", menu=fileMenu)\r\n\t\t#ITEM1\r\n\t\tfileMenu.add_command(label = \"New Project\", command = self.doNothing)\r\n\t\t#ITEM2\r\n\t\tfileMenu.add_command(label = \"Do more of nothing\", command = self.doNothing)\r\n\t\t#ITEM3\r\n\t\tfileMenu.add_command(label = \"Quit\", command = self.quitit)\r\n\t\t#ITEM4\r\n\t\tfileMenu.add_separator()\r\n\t\t\r\n\t\t#EDIT\r\n\t\teditMenu = Menu(dropdown)\r\n\r\n\t\tdropdown.add_cascade(label=\"Edit\", menu=editMenu)\r\n\t\t#ITEM1\r\n\t\teditMenu.add_command(label=\"Copy\", command = self.doNothing)\r\n\r\n\tdef doNothing(self):\r\n\t\tprint(\"doing nothing\")\r\n\r\n\r\n\tdef mainloopinit(self):\r\n\t\tmaster.mainloop()\r\n\r\n\tdef serverconf(self):\r\n\t\turl = self.entry_1.get()\r\n\t\tself.app = servconnect(self.serverconf, url)\r\n\r\n\t\r\n\r\n\r\n\tdef quitit(self):\r\n\t\tprint(\"its quittin' time!!!\")\r\n\t\tsys.exit(0)\r\n\r\n\r\nclass servconnect:\r\n\r\n\r\n\tdef __init__(self, master, url):\r\n\t\tself.master = master\r\n\t\tprint(\"Connecting to : \", url)\r\n\t\tws = self.create_connections(url)\r\n\t\tself.subscribe(ws)\r\n\t\tr = self.listen(ws)\r\n\t\t\r\n\r\n\tdef unsubscribe(self, ws):\r\n\t\tws.send(json.dumps({\r\n\r\n\t\t\t#unsubscribe...\r\n\r\n\t\t\t\"command\": \"unsubscribe\",\r\n\t\t\t\"channel\": \"1002\"\r\n\r\n\r\n\t\t\t}))\r\n\t\tresponse = ws.recv()\r\n\t\tresponse = json.loads(response)\r\n\t\tprint(\"\\n unsubbed\")\r\n\r\n\tdef listen(self, ws):\r\n\t\tresponse = []\r\n\r\n\t\t\r\n\t\tr = ws.recv()\r\n\t\tresponse1 = json.loads(r)\r\n\t\tresponse2 = json.loads(r)\r\n\t\tresponse3 = json.loads(r)\r\n\t\tresponse4 = json.loads(r)\r\n\t\tresponse5 = json.loads(r)\r\n\r\n\t\tprint(response5)\r\n\r\n\r\n\r\n\t\treturn response\r\n\r\n\r\n\tdef subscribe(self, ws):\r\n\t\tws.send(json.dumps({\r\n\t\t\t#subscribe to stream\r\n\t\t\t### The USER channel requires to be logged in\r\n\t\t\t### Not sure how to do this but works on webpage\r\n\t\t\t# ws.send(json.dumps({\r\n\t\t\t# \"command\": \"subscribe\",\r\n\t\t\t# \"channel\": \"1000\"\r\n\t\t\t# \"userID\": \"\"\r\n\t\t\t# }))\r\n\t\t\t# currencypair, \r\n\t\t\t\r\n\t\t\t\"command\" : \"subscribe\",\r\n\t\t\t\"channel\" : \"USDT_ETH\",\r\n\t\t\t\"pair\" : \"ETH_USDT\"\r\n\t\t\t#\"userID\": \"\"\r\n\r\n\t\t\t}))\r\n\r\n\tdef create_connections(self, url):\r\n\t\t\tws = create_connection(url)\r\n\t\t\tresponse = ws.recv()\r\n\t\t\tprint(ws, \"\\n Connection established to \", url)\r\n\t\t\treturn ws\r\n\r\n\r\n### FIX THE FOLLOWING LINES:\r\n\r\n''' \r\n\tdef makedisconnect(self, master):\r\n\t\tself.button2 = Button(self.master, text = \"Disconnect\", command = self.disconnect)\r\n#### self.button2 is the error^^^^\r\n\r\n\r\n\t\tself.button2.pack(side=LEFT)\r\n\r\n\tdef disconnect(self):\r\n\t\tprint(\"totally disconnecting\")\r\n'''\r\n\r\n### AttributeError: 'function' object has no attribute 'tk'\r\n\r\ndef main():\r\n\r\n\t\r\n\troot = Tk()\r\n\r\n\ttool = toolbar(root)\r\n\ttool.filemenu(root)\r\n\r\n\troot.mainloop()\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"GUI_Beta.py","file_name":"GUI_Beta.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"166670387","text":"from django.http import Http404\nfrom bs4 import BeautifulSoup\n\ndef check_perm(question, user):\n part = question.part\n order = part.order\n if order.teacher != user and not user.is_superuser:\n raise Http404\n return True\n\n\ndef check_expert_perm(question, user):\n part = question.part\n order = part.order\n if order.expert != user:\n raise Http404\n return True\n\n\ndef stripstyles(htmltext):\n soup = BeautifulSoup(htmltext)\n\n styles = soup.findAll('style') or []\n for style in styles:\n style = \"\"\n return soup.prettify()\n","sub_path":"orders/views/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157078015","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/miph/Development/logdog/python-logdog/logdog/version.py\n# Compiled at: 2015-07-04 19:11:35\nfrom __future__ import unicode_literals\nMAJOR_VERSION = 0\nMINOR_VERSION = 2\nPATCH_VERSION = 11\nversion = (MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION)\n__version__ = (b'.').join(map(str, version))\n__author__ = b'Evgeny Lychkovsky'\n__email__ = b'miphreal@gamil.com'","sub_path":"pycfiles/logdog-0.2.11.tar/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"342256062","text":"#!/usr/bin/python\n\n\"\"\"\nThis is an Activity in the context of AWS Step Functions.\nhttps://docs.aws.amazon.com/step-functions/latest/dg/concepts-activities.html\nThis activity retrieves a task (specified by the ACTIVITY_ARN env variable)\nwhich has been scheduled for execution by a running state machine.\nImage paths or images are to be passed to this activity.\nThe images will be pushed through the model,\nand the output along with some image identifier\nwill be passed on to the next activity.\n\"\"\"\nimport boto3\nimport botocore\nimport json\nimport logging\nimport numpy\nimport os\nimport sys\n\nimport cv2 as cv\nfrom botocore.client import Config\n\nfrom db import loadRects\n\n\n\nclass Rect:\n def __init__(self, x, y, w, h):\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n self.x2 = x + w\n self.y2 = y + h\n self.containedRects = []\n\n\ndef drawRects(image, rect):\n r = Rect(rect['x'], rect['y'], rect['w'], rect['h'])\n # roi = image[r.y:r.y2,r.x:r.x2]\n cv.rectangle(image,(r.x,r.y),(r.x+r.w,r.y+r.h),(0,255,0),2)\n cv.imshow('rects', image)\n cv.waitKey(0)\n\n\n\ndef main(filename, job, page):\n filename = './test-data/packet-1/2.png' if filename is None else filename\n job = '1' if job is None else job\n if (len(job) == 36):\n job_id = job\n else:\n end_id = job.zfill(len('000000000000'))\n job_id = f'00000000-0000-0000-0000-{end_id}'\n\n print(f'Job: {job_id}')\n print(f'File: {filename}')\n print(f'Page: {page}')\n\n if not os.path.isfile(filename):\n for path, dirs, files in os.walk('.'):\n for filename in files:\n print(os.path.join(path,filename))\n\n print(f'file does not exits {filename}')\n raise Exception('error file does not exist')\n\n base_filename = os.path.basename(filename);\n output_file = f'{filename}.out.png'\n rects = loadRects(job_id, page)\n print(len(rects))\n for i,r in enumerate(rects):\n print(r)\n image = cv.imread(filename)\n drawRects(image, (r))\n\n\n\ndef help():\n print('-h, --help | this list')\n print('* Set the image file to process:')\n print('-file=filename')\n print('--img, -i filename')\n print('* Set the job id used to save the records:')\n print('-job=jobId')\n print('--job, -j jobId | set the jobId')\n print('* set page number to work with inside the job')\n print('-page=pageNo')\n print('--page, -p pageNo | set the page number')\n print('argument at the end that matches nothing is taken as a image filename')\n\n\n\nif __name__ == \"__main__\":\n filename = './test-data/packet-1/2.png'\n job = 1\n testing = False\n piece = None\n page = 0\n # execute only if run as a script\n for arg in sys.argv[1:]:\n if (arg.startswith('-file=')):\n filename = arg[6:]\n elif (arg.startswith('-job=')):\n job = arg[5:]\n elif (arg.startswith('-page=')):\n page = int(arg[6:])\n elif (arg == '--job' or arg == '-j'):\n piece = 'jobid'\n elif (arg == '--img' or arg == '-i'):\n piece = 'filename'\n elif (arg == '--page' or arg == '-p'):\n piece = 'pageno'\n elif (piece == 'jobid'):\n piece = None\n job = arg\n elif (piece == 'filename'):\n piece = None\n filename = arg\n elif (piece == 'pageno'):\n piece = None\n page = int(arg)\n elif (arg == '-h' or arg == '--help'):\n help()\n exit()\n else:\n filename = arg\n\n cv.namedWindow('rects', cv.WINDOW_NORMAL)\n cv.resizeWindow('rects', 1000, 1000)\n\n main(filename, job, page)\n","sub_path":"select/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"489841025","text":"import os\nimport argparse\nfrom os import listdir\nfrom pydub import AudioSegment\nfrom pydub.utils import make_chunks\n\n\n# Parse the CLI arguments\n# -f split only one audio file\n# -d split all audio files in a direcotry\n# -o output directory where the splitted files will be saved\ndef parse_arguments():\n parser = argparse.ArgumentParser(description='Split wav file in to parts')\n parser.add_argument('-f', '--file')\n parser.add_argument('-d', '--dir', type=dir_path)\n parser.add_argument(\n '-o', '--output', help='Output directory', required=True)\n\n return parser.parse_args()\n\n\n# Check if the given path is valid\ndef dir_path(path):\n if os.path.isdir(path):\n return path\n else:\n raise argparse.ArgumentTypeError(\n f\"readable_dir:{path} is not a valid path\")\n\n\n# Split audio file in 2000 second parts\ndef split_audio_file(file, output):\n audio = AudioSegment.from_file(file, \"wav\")\n chunk_length_ms = 2000\n chunks = make_chunks(audio, chunk_length_ms)\n for i, chunk in enumerate(chunks):\n chunk_name = os.path.basename(file).replace(\n '.wav', '_') + \"{0}.wav\".format(i)\n print(\"exporting\", chunk_name)\n chunk.export(output + '/' + chunk_name, format=\"wav\")\n\n\n# Split all audio files a a direcotry\ndef split_audio_in_dir(directory, output):\n for file in listdir(directory):\n split_audio_file(directory + '/' + file, output)\n\n\n# Script start\n# parse all arguments and split the audio files\ndef main():\n args = parse_arguments()\n output_dir = dir_path(args.output)\n audio_file = args.file\n audio_dir = args.dir\n\n if audio_file != None:\n split_audio_file(audio_file, output_dir)\n elif audio_dir != None:\n split_audio_in_dir(dir_path(audio_dir), output_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"utils/split-auido-files.py","file_name":"split-auido-files.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"260687603","text":"\"\"\" Unit tests for pipelines expressed via dask.delayed\n\n\n\"\"\"\nimport logging\nimport sys\nimport unittest\n\nimport numpy\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\nfrom data_models.polarisation import PolarisationFrame\nfrom processing_components.imaging.base import predict_skycomponent_visibility\nfrom processing_components.simulation.configurations import create_named_configuration\nfrom processing_components.simulation.testing_support import ingest_unittest_visibility, \\\n create_unittest_model, create_unittest_components\nfrom processing_components.skycomponent.operations import insert_skycomponent\nfrom processing_components.visibility.coalesce import convert_blockvisibility_to_visibility\n\ntry:\n import casacore\n from casacore.tables import table # pylint: disable=import-error\n from processing_components.visibility.base import create_blockvisibility, create_blockvisibility_from_ms\n from processing_components.visibility.base import export_blockvisibility_to_ms\n \n run_ms_tests = True\n# except ModuleNotFoundError:\nexcept:\n run_ms_tests = False\n\nlog = logging.getLogger(__name__)\n\nlog.setLevel(logging.DEBUG)\nlog.addHandler(logging.StreamHandler(sys.stdout))\nlog.addHandler(logging.StreamHandler(sys.stderr))\n\n\nclass TestImaging(unittest.TestCase):\n def setUp(self):\n \n from data_models.parameters import arl_path\n self.dir = arl_path('test_results')\n \n def actualSetUp(self, freqwin=1, block=True, dopol=False):\n \n self.npixel = 512\n self.low = create_named_configuration('LOWBD2', rmax=750.0)\n self.freqwin = freqwin\n self.vis = list()\n self.ntimes = 5\n self.times = numpy.linspace(-3.0, +3.0, self.ntimes) * numpy.pi / 12.0\n\n if dopol:\n self.vis_pol = PolarisationFrame('linear')\n self.image_pol = PolarisationFrame('stokesIQUV')\n f = numpy.array([100.0, 20.0, -10.0, 1.0])\n else:\n self.vis_pol = PolarisationFrame('stokesI')\n self.image_pol = PolarisationFrame('stokesI')\n f = numpy.array([100.0])\n\n if freqwin > 1:\n self.frequency = numpy.linspace(0.8e8, 1.2e8, self.freqwin)\n self.channelwidth = numpy.array(freqwin * [self.frequency[1] - self.frequency[0]])\n flux = numpy.array([f * numpy.power(freq / 1e8, -0.7) for freq in self.frequency])\n else:\n self.frequency = numpy.array([1e8])\n self.channelwidth = numpy.array([1e6])\n flux = numpy.array([f])\n\n self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')\n self.bvis = ingest_unittest_visibility(self.low,\n self.frequency,\n self.channelwidth,\n self.times,\n self.vis_pol,\n self.phasecentre, block=block)\n \n self.vis = convert_blockvisibility_to_visibility(self.bvis)\n \n self.model = create_unittest_model(self.vis, self.image_pol, npixel=self.npixel, nchan=freqwin)\n \n self.components = create_unittest_components(self.model, flux)\n \n self.model = insert_skycomponent(self.model, self.components)\n \n self.bvis = predict_skycomponent_visibility(self.bvis, self.components)\n \n \n @unittest.skipUnless(run_ms_tests, \"requires the 'casacore' module\")\n def test_export_ms(self):\n self.actualSetUp()\n msoutfile = \"%s/test_imaging_ms_%dfreqwin.ms\" % (self.dir, len(self.frequency))\n export_blockvisibility_to_ms(msoutfile, [self.bvis], source_name='M31')\n\n @unittest.skipUnless(run_ms_tests, \"requires the 'casacore' module\")\n def test_export_ms_7freqwin(self):\n self.actualSetUp(freqwin=7)\n msoutfile = \"%s/test_imaging_ms_%dfreqwin.ms\" % (self.dir, len(self.frequency))\n export_blockvisibility_to_ms(msoutfile, [self.bvis], source_name='M31')\n\n @unittest.skipUnless(run_ms_tests, \"requires the 'casacore' module\")\n def test_export_ms_pol(self):\n self.actualSetUp(dopol=True)\n msoutfile = \"%s/test_imaging_ms_pol_%dfreqwin.ms\" % (self.dir, len(self.frequency))\n export_blockvisibility_to_ms(msoutfile, [self.bvis], source_name='M31')\n\n @unittest.skipUnless(run_ms_tests, \"requires the 'casacore' module\")\n def test_export_ms_7freqwin_pol(self):\n self.actualSetUp(freqwin=7, dopol=True)\n msoutfile = \"%s/test_imaging_ms_pol_%dfreqwin.ms\" % (self.dir, len(self.frequency))\n export_blockvisibility_to_ms(msoutfile, [self.bvis], source_name='M31')\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/processing_components/test_imaging_ms.py","file_name":"test_imaging_ms.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"207702644","text":"# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\nfrom PoPs.chemicalElements import misc as chemicalElementMiscPoPsModule\nfrom PoPs.families import nuclide as nuclideModule\nfrom xData import enums as xDataEnumsModule\n\nfrom brownies.legacy.converting import endf_endl as endf_endlModule\n\nfrom fudge.covariances import covarianceSuite as covarianceSuiteModule\nfrom fudge.covariances import mixed as covarianceMixedModule\n\nfrom .. import endfFormats as endfFormatsModule\nfrom .. import gndsToENDF6 as gndsToENDF6Module\nfrom .modelParameters import averageParametersToENDF6\n\ndef toENDF6(self, endfMFList, flags, targetInfo, verbosityIndent=''):\n \"\"\"Convert to ENDF format.\"\"\"\n\n reactionSuite = targetInfo['reactionSuite']\n\n ZAM, AWT = targetInfo['ZA'], targetInfo['mass']\n NIS, ABN = 1, 1.0\n ZAI = ZAM # assuming one isotope/file\n MTL = 0 # mtl=1 sections are handled in lumpedCovariance\n\n if self.parameterCovariances:\n self.parameterCovariances.toENDF6(endfMFList, flags, targetInfo, verbosityIndent)\n\n sections = self.covarianceSections\n # sort covariances by MF/MT:\n mfmts = []\n for section_ in sections:\n mfmts.append( list( map( int, section_.rowData.ENDF_MFMT.split(',') ) ) )\n if len(mfmts)==0: return\n\n mfs, mts = list( zip( *mfmts ) )\n zipList = list( zip( mfs, mts, sections ) )\n idx = 0\n while idx P, \"S must be greater than P\"\n return (S**2 - P**2)**0.5 \n \ndef calcs(MW=None, Mvar=None, I=None, V=None):\n \"\"\" Calculates S from two out of MW, Mvar, I, and V\n \n If providing I, then V is compulsory\n If provding I, then either MW and Mvar is compulsory\n \n \"\"\"\n #run some checks\n try:\n if I is not None and V is None:\n raise Exception(\"Voltage required if current entered\")\n except Exception as e:\n print(e)\n raise\n\n inputs = 0\n #count number of None arguments we get\n if Mvar is not None:\n inputs += 1\n if MW is not None:\n inputs += 1\n\n try:\n if inputs < 2 and I is None:\n raise Exception(\"MW, Mvar are required if I not entered.\")\n except Exception as e:\n print(e)\n raise\n\n if MW is not None and Mvar is not None: \n return (MW**2 + Mvar**2)**0.5\n else:\n #calculate using I and V\n return (3**0.5 * V * I) \n \ndef calcir(init,final,startyear,endyear):\n \"\"\"Calculates growth rate based on a start value and end value and a start\n year and end year. Assumed annual compounding.\"\"\"\n return (math.exp(math.log(final/init)/(endyear-startyear))-1)\n\ndef calcshcs(Zpu, Vnom, Vpu=1.0):\n \"\"\"Calculates the short circuit current in Amperes.\n Vpu = Pre-fault voltage (optional) \n Vnom = Vnom in kV\n Zpu = Impedance in per unit on 100 MVA base\n \"\"\"\n Spu = Vpu / Zpu\n return 100 * Spu / (3**0.5 * Vnom) \n ","sub_path":"psatools/eemath.py","file_name":"eemath.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"13026612","text":"\"\"\"\nThis script will get the description for a dossiertype. It will take Aanleg into account while avoiding the\ncartesian product.\nIt works together with get_graph_aanleg, where the graph will be created.\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nfrom lib import neostore\nfrom lib import my_env\n\n\nif __name__ == \"__main__\":\n # Configure Command line arguments\n parser = argparse.ArgumentParser(\n description=\"Script to create a Confluence description for a Dossiertype in 'Omgevingsvergunning' Archief\"\n )\n parser.add_argument('-t', '--type', default='Aanvraag Omgevingsvergunning',\n help='Set the Dossiertype (exact naam!) to start with. Default is \"Aanvraag Vergunning\".')\n args = parser.parse_args()\n items = []\n cfg = my_env.init_env(\"convert_protege\", __file__)\n logging.info(\"Arguments: {a}\".format(a=args))\n outfile = os.path.join(cfg['Main']['reportdir'], \"{n}.txt\".format(n=args.type))\n ns = neostore.NeoStore(cfg)\n dosstype_name = args.type\n # Get Node for DossierType\n attribs = {\n \"naam\": dosstype_name\n }\n dosstype_node = ns.get_node('Dossiertype', 'naam', dosstype_name)\n with open(outfile, 'w') as fh:\n intro = \"\"\n # fh.write(\"h1. {n}\\n\".format(n=dosstype_node[\"naam\"]))\n if dosstype_node[\"commentaar\"]:\n intro += \"{{color:purple}}Noot:{{color}} {c}\\n\".format(c=dosstype_node[\"commentaar\"])\n if ns.get_reference(dosstype_node[\"protege_id\"]):\n intro += \"{}\".format(ns.get_reference(dosstype_node[\"protege_id\"]))\n if len(intro) > 5:\n fh.write(\"h1. Introductie\\n{}\".format(intro))\n dosstype_id = dosstype_node[\"protege_id\"]\n # Get Aanleg for Dossiertype\n aanleg_nodes = ns.get_aanleg4type(dosstype_id)\n for aanleg_node in aanleg_nodes:\n fh.write(\"h1. {n}\\n\".format(n=aanleg_node['naam']))\n # Get ProcedureStap for Aanleg\n stap_nodes = ns.get_stap(dosstype_id, aanleg_node['naam'])\n for stap in stap_nodes:\n fh.write(\"h2. {n}\\n\".format(n=stap[\"naam\"]))\n if stap[\"commentaar\"]:\n fh.write(\"{{color:purple}}Noot:{{color}} {c}\\n\".format(c=stap[\"commentaar\"]))\n if ns.get_reference(stap[\"protege_id\"]):\n fh.write(\"{}\".format(ns.get_reference(stap[\"protege_id\"])))\n # Get documenten for Procedure Stap\n doc_array = ns.get_start_nodes(stap, 'bij_procedurestap')\n for doc in doc_array:\n fh.write(\"{{color:blue}}{n}{{color}}\\n\".format(n=doc.start_node()[\"naam\"]))\n if doc.start_node()[\"commentaar\"]:\n fh.write(\"{{color:purple}}Noot:{{color}} {c}\\n\".format(c=doc.start_node()[\"commentaar\"]))\n if ns.get_reference(doc.start_node()[\"protege_id\"]):\n fh.write(\"{}\".format(ns.get_reference(doc.start_node()[\"protege_id\"])))\n logging.info('End Application')\n","sub_path":"Python/get_description_aanleg.py","file_name":"get_description_aanleg.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"177618014","text":"import time;\nimport sys;\nimport os;\nscriptsDir = os.environ.get(\"UTIL_SCRIPTS_DIR\");\nif (scriptsDir is None):\n raise Exception(\"Please set environment variable UTIL_SCRIPTS_DIR\");\nsys.path.insert(0,scriptsDir);\nimport pathSetter;\nimport util;\nimport multiprocessing;\nimport parallelProcessKickerOffer as ppko;\n\nclass ParalleliserInfo:\n \"\"\"\n contains all the information necessary to do the parallelisation.\n \"\"\"\n def __init__(self, functionToExecute, collectReturnValues=False, waitInterval=5):\n \"\"\"\n collectReturnValues: boolean indicating whether you want to collect return\n values after the function is executed\n \"\"\"\n self.queue = multiprocessing.Queue() if collectReturnValues else None;\n self.parallelProcessKickerOffer = ppko.ParallelProcessKickerOffer_Multiprocessing(functionToExecute, returnQueue=self.queue); \n self.waitInterval = waitInterval;\n\nclass ParalleliserFactory: #wow this class has minimal functionality why do I even use it?\n def __init__(self, paralleliserInfo):\n self.paralleliserInfo = paralleliserInfo;\n def getParalleliser(self, inputs):\n \"\"\"\n inputs: an array of FunctionInputs\n \"\"\"\n return Paralleliser(inputs, self.paralleliserInfo);\n\nParalleliserState = util.enum(\n NOT_STARTED = \"NOT_STARTED\" \n , STARTED = \"STARTED\"\n , DONE = \"DONE\");\n\nclass FunctionInputs(object):\n \"\"\"\n Stores the inputs that will be used to call some function\n \"\"\"\n def __init__(self, args=[], kwargs={}):\n self.args = args;\n self.kwargs = kwargs;\n\nclass Paralleliser(object):\n \"\"\"\n takes an instance of paralleliserInfo (which contains info on how to kick off the jobs) and\n a series of inputs, and executes the jobs in parallel.\n \"\"\" \n def __init__(self, inputs, paralleliserInfo):\n \"\"\"\n inputs: an array of FunctionInputs\n \"\"\"\n self.doneCheckers = [];\n self.paralleliserState = ParalleliserState.NOT_STARTED;\n self.inputs = inputs;\n self.paralleliserInfo = paralleliserInfo;\n\n def execute(self): #wait interval is in seconds \n if (self.paralleliserState != ParalleliserState.NOT_STARTED):\n raise RuntimeError(\"Paralleliser was already started!\");\n self.paralleliserState = ParalleliserState.STARTED\n for anInput in self.inputs:\n self.doneCheckers.append(self.paralleliserInfo.parallelProcessKickerOffer.execute(*anInput.args, **anInput.kwargs)) \n isDone = False;\n numRunningJobs = self._numRunningJobs();\n #while (numRunningJobs != 0):\n # time.sleep(self.paralleliserInfo.waitInterval); \n # numRunningJobs = self._numRunningJobs();\n # print(\"Sleeping; number of running jobs is \"+str(numRunningJobs));\n self.paralleliserState = ParalleliserState.DONE;\n return self.paralleliserInfo.queue;\n \n def finish(self):\n for doneChecker in self.doneCheckers:\n doneChecker.finish();\n \n def _numRunningJobs(self): #for private use\n numRunningJobs = 0;\n for doneChecker in self.doneCheckers:\n if (doneChecker.isDone() != True):\n numRunningJobs += 1;\n return numRunningJobs; \n\n \n\n","sub_path":"parallelProcessing/parallelProcessing.py","file_name":"parallelProcessing.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"381678233","text":"# -*- coding: utf-8 -*-\n''' Vamos a crear el juego de adivinar un número.\nEl programa selecciona de forma aleatoria un número del 1 al 100 que el jugador\ntendrá que adivinar. El programa irá preguntando un número al jugador.\nSi el jugador dice uno mayor, el programa le informará de que el número oculto\nes menor (y al revés si dice uno menor). El jugador tiene 10 oportunidades\npara adivinar el número.\nCrea un nuevo proyecto para este programa en tu repositorio.\nDale una interfaz gráfica con easygui. Incluye alguna imagen en las ventanas.\nEscribe comentarios en el código que ayuden a otro programador\na modificar el programa.\nPuedes crear diferentes niveles en el juego dando menos oportunidades,\ncontando el tiempo, etc\n\n'''\nfrom easygui import *\n\n# Generamos número aleatorio\nimport random\n\nx = random.randint(1,100)\n\n\n# Se pide el nivel de dificultad\ndificultad = int(enterbox('Elige la dificultad\\nFácil --> 10 intentos (1)\\nMedio --> 8 intentos (2)\\nDifícil --> 5 intentos (3)'))\n\n# Según el nivel elegido, se le asigna a intentos un número diferente y se muestra un mensaje\nif dificultad == 1:\n intentos = 10\n msgbox('Has elegido la dificultad fácil. Tienes %d intentos.' % intentos)\nelif dificultad == 2:\n intentos = 8\n msgbox('Has elegido la dificultad media. Tienes %d intentos.' % intentos)\nelse:\n intentos = 5\n msgbox('Has elegido la dificultad difícil. Tienes %d intentos.' % intentos)\n\n\n# Inicializamos el contador a 1\ncont = 1\n\n# bucle para contabilizar el número de oportunidades\nwhile cont <= intentos:\n # Pedimos al usuario que introduzca el número a adivinar\n numero = int(enterbox('Introduce el número: '))\n # Comprobamos si el número introducido por el usuario es igual al generado aleatoriamente\n if numero > x:\n # Evaluamos el valor del contador para mostrar un mensaje u otro\n if cont <= (intentos-1):\n msgbox('Te has pasado. El número es menor.\\nLlevas %d intentos.' % cont)\n else:\n msgbox('Has fallado. Has agotado el número de intentos y no has acertado el número.\\nEl número era el %d' % x) \n elif numero < x:\n # Evaluamos el valor del contador para mostrar un mensaje u otro\n if cont <= (intentos -1):\n msgbox ('No has llegado. El número es mayor.\\nLlevas %d intentos.' % cont)\n else:\n msgbox('Has fallado. Has agotado el número de intentos y no has acertado el número.\\nEl número era el %d' % x) \n else:\n msgbox ('Has acertado!')\n break # en caso de acertar, salimos del while\n # Sumamos 1 al contador de oportunidades\n cont += 1\n\n","sub_path":"control_flujo/adivinar_numero.py","file_name":"adivinar_numero.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503480892","text":"from imworks import *\nfrom skimage.io import imread\nfrom skimage.draw import circle\nfrom morph import *\n\ndef iris_detect(fname):\n\timg = bnw(fname)\n\t\n\tiris = zeros([img.shape[0],img.shape[1]])\n\t\n\tfor eh in range(img.shape[0]):\n\t for ew in range(img.shape[1]):\n\t magnitude = img[eh,ew]\n\t if magnitude <= 0.4706 and magnitude > 0.1176:\n\t rr,cc = circle(eh,ew,1)\n\t iris[rr,cc] = 1\n\t\n\tiris = dilate(iris,15)\n\tiris = erode(iris,8)\n\t\n\treturn iris\n","sub_path":"future_work/iris2.py","file_name":"iris2.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"68352970","text":"import torch\nfrom torch.utils.tensorboard import SummaryWriter\nimport numpy as np\nimport math\n# pip install tensorboard\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\n\nclass RandomDataset(Dataset):\n def __init__(self, size, length):\n self.len = length\n self.data = torch.randn(length, size)\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return self.len\n\nclass Model(nn.Module):\n # Our model\n def __init__(self, input_size, output_size):\n super(Model, self).__init__()\n self.fc = nn.Linear(input_size, output_size)\n\n def forward(self, input):\n output = self.fc(input)\n print(\"\\tIn Model: input size\", input.size(),\n \"output size\", output.size())\n return output\n\n\ndef main():\n # Parameters and DataLoaders\n input_size = 5\n output_size = 2\n batch_size = 3000000\n data_size = 1000000000\n\n # Writer will output to ./runs/ directory by default\n writer = SummaryWriter()\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n if torch.cuda.is_available() :#and GPU_ID > -1:\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n MULTI_GPU = True\n else:\n device = torch.device('cuda:0') #{}'.format(args.gpu_id))\n print('device : ',device)\n MULTI_GPU = False\n else:\n device = torch.device('cpu')\n print('device : ',device)\n\n rand_loader = DataLoader(dataset=RandomDataset(input_size, data_size),\n batch_size=batch_size, shuffle=True)\n \n model = Model(input_size, output_size)\n\n if torch.cuda.device_count() > 1:\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs\n model = nn.DataParallel(model)\n\n model.to(device)\n\n print('before loop ')\n step = 1\n for data in rand_loader:\n input = data.to(device)\n output = model(input)\n print(\"Outside: input size\", input.size(), \"output_size\", output.size())\n\n angle_rad = step * math.pi / 180\n writer.add_scalars('loss and accuracy', \n {'loss': math.sin(angle_rad), 'accuracy': math.cos(angle_rad)}, step)\n writer.flush()\n step+= 1\n\n writer.close()\n\n # for n_iter in range(100):\n # writer.add_scalar('Loss/train_check', np.random.random(), n_iter)\n # # writer.add_scalar('Loss/test', np.random.random(), n_iter)\n # # writer.add_scalar('Accuracy/train', np.random.random(), n_iter)\n # # writer.add_scalar('Accuracy/test', np.random.random(), n_iter)\n\n # for step in range(-360, 360):\n # angle_rad = step * math.pi / 180\n # writer.add_scalar('sin', math.sin(angle_rad), step)\n # writer.add_scalar('cos', math.cos(angle_rad), step)\n # writer.add_scalars('sin and cos', {'sin': math.sin(angle_rad), 'cos': math.cos(angle_rad)}, step)\n # writer.close()\n\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"utils/misc_util.py","file_name":"misc_util.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"34448119","text":"# 7. Write a Python program to accept a filename from the user and print the extension of that. \n# Sample filename : abc.java\n# Output : java\n\nvalue = input(\"[*] Enter Filename: \")\nfilename = value.split(\".\")[0]\next = value.split(\".\")[-1]\n\nprint(\"[+] Filename:\", filename)\nprint(\"[+] Extension:\", ext)\n","sub_path":"python_learning/w3resource/basics/basic_1-7.py","file_name":"basic_1-7.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"186453812","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\n\ndef creat_windows():\n win = tk.Tk() # 创建窗口\n sw = win.winfo_screenwidth()\n sh = win.winfo_screenheight()\n ww, wh = 400, 450\n x, y = (sw-ww)/2, (sh-wh)/2\n win.geometry(\"%dx%d+%d+%d\"%(ww, wh, x, y-40)) # 居中放置窗口\n\n win.title('Hello World!') # 窗口命名\n\n bg1_open = Image.open(\"data/picture.jpg\").resize((300, 300))\n bg1 = ImageTk.PhotoImage(bg1_open)\n canvas = tk.Label(win, image=bg1)\n canvas.pack()\n\n var = tk.StringVar() # 创建变量文字\n tk.Label(win, textvariable=var, bg='#C1FFC1', font=('Arial', 21), width=20, height=2).pack()\n tk.Button(win, text='choose one picture to show', width=20, height=2, bg='#FF8C00', command=lambda:main(var, canvas), font=('Arial', 10)).pack()\n \n win.mainloop()\n\ndef main(var, canvas):\n var.set('Don\\'t touch me!')\n file_path = filedialog.askopenfilename()\n bg1_open = Image.open(file_path)\n bg1_resize = bg1_open.resize((280, 280))\n bg1 = ImageTk.PhotoImage(bg1_resize)\n canvas.configure(image=bg1)\n canvas.image = bg1\n\nif __name__ == '__main__':\n creat_windows()","sub_path":"simple_Tk.py","file_name":"simple_Tk.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"118837999","text":"# This script contains shared functionality for filtering\n# stopWords, unwanted characters, etc. from tweets. It\n# is called during createUserIndex, createWordIndex, and\n# createIdfFile.\n#\n\nimport re\nfrom PorterStemmer import PorterStemmer\n\ndef filterTweet(tweet, stopWords, stemmer):\n\n tweetTokens = re.split('[^a-zA-Z#]+', tweet)\n outputTokens = []\n\n for token in tweetTokens:\n\n # check for empty\n if not token:\n continue\n\n # lower\n token = token.lower()\n\n # check stop words\n if token in stopWords:\n continue\n\n # strip leading hash tag only, otherwise discard\n if token.startswith('#'):\n token = token[1:]\n if '#' in token:\n continue\n\n # stem\n token = stemmer.stem(token, 0, len(token)-1)\n\n # check stop words after stem too\n if token in stopWords or token == \"\":\n continue\n\n if len(token) >= 26:\n continue\n\n outputTokens.append(token)\n \n return outputTokens\n","sub_path":"topic_modelling/tf_idf/filterTweet.py","file_name":"filterTweet.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"225939683","text":"\"\"\"\nxplanet runner\n\n@author: Lorinc Czegledi\n\"\"\"\nfrom subprocess import call\nfrom shutil import copyfile\n#import re\n\nimport os\nimport sys\nimport pathlib\nimport urllib.request\nimport urllib.parse\nimport urllib.error\n\nfrom datetime import date\n\nfrom skyfield.api import now, JulianDate\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\ntry:\n from Utils import ensure_dir\nexcept:\n from .Utils import ensure_dir\n\ntry:\n from Settings import Settings\nexcept:\n from .Settings import Settings\n\ntry:\n from Config import Config\nexcept:\n from .Config import Config\n\ntry:\n from Images import Images\nexcept:\n from .Images import Images\n\ntry:\n from MarkerGenerator import Markers\nexcept:\n from .MarkerGenerator import Markers\n\n\nclass Xplanet(object):\n\n @staticmethod\n def prep_xplanet_config():\n wave = Settings().wave\n xplanet_planets_markers_file = Config().xplanet_planets_markers_file(wave)\n xplanet_stars_markers_file = Config().xplanet_stars_markers_file(wave)\n\n if pathlib.Path(xplanet_planets_markers_file).exists():\n os.remove(xplanet_planets_markers_file)\n if pathlib.Path(Config().xplanet_planets_arcs_file(wave)).exists():\n os.remove(Config().xplanet_planets_arcs_file(wave))\n copyfile(\n Config().xplanet_config_original,\n Config().xplanet_config_file(wave))\n replace = {}\n if not (Settings().draw_night and Settings().draw_earth):\n if Settings().draw_constellations_markers:\n replace.update({\"map=earth.jpg\": \"map=\" +\n Config().image_constellations.replace(\"./\", \"\")})\n replace.update({\"night_map=night.jpg\": \"night_map=\" +\n Config().image_constellations.replace(\"./\", \"\")})\n if Settings().draw_earth:\n replace.update({\"map=earth.jpg\": \"map=\" +\n Config().image_earth(wave).replace(\"./\", \"\")})\n else:\n replace.update({\"map=earth.jpg\": \"map=\"})\n\n if Settings().draw_night:\n replace.update({\"night_map=night.jpg\": \"night_map=\" +\n Config().image_night(wave).replace(\"./\", \"\")})\n else:\n replace.update({\"night_map=night.jpg\": \"night_map=\"})\n\n if Settings().draw_planets_wings_as_arcs:\n replace.update({\"marker_file=earth\": \"marker_file=\" +\n xplanet_planets_markers_file.replace(\"./\", \"\")})\n replace.update({\"arc_file=earth\": \"arc_file=\" +\n Config().xplanet_planets_arcs_file(wave).replace(\"./\", \"\")})\n else:\n replace.update({\"marker_file=earth\": \"marker_file=\"})\n replace.update({\"arc_file=earth\": \"arc_file=\"})\n\n if Settings().draw_planets_poles:\n replace.update({\"arc_file=poles\": \"arc_file=\" +\n Config().xplanet_planets_poles_file(wave).replace(\"./\", \"\")})\n else:\n replace.update({\"arc_file=poles\": \"arc_file=\"})\n\n if Settings().draw_clouds:\n replace.update({\"cloud_map=clouds.jpg\": \"cloud_map=\" +\n Config().image_cloud(wave).replace(\"./\", \"\")})\n else:\n replace.update({\"cloud_map=clouds.jpg\": \"#cloud_map=\"})\n\n if Settings().draw_earthquakes:\n replace.update({\"marker_file=quakes\": \"marker_file=\" +\n Config().xplanet_earthquakes_markers_file(wave).replace(\"./\", \"\")})\n else:\n replace.update({\"marker_file=quakes\": \"\"})\n\n if Settings().draw_volcanoes:\n replace.update({\"marker_file=volcanos\": \"marker_file=\" +\n Config().xplanet_volcanoes_markers_file(wave).replace(\"./\", \"\")})\n else:\n replace.update({\"marker_file=volcanos\": \"\"})\n\n if Settings().draw_tropic_arcs:\n replace.update({\"arc_file=tropics\": \"arc_file=tropics\"})\n else:\n replace.update({\"arc_file=tropics\": \"\"})\n\n if Settings().draw_star_markers or Settings().draw_constellations_markers:\n replace.update({\"marker_file=stars\": \"marker_file=\" +\n xplanet_stars_markers_file.replace(\"./\", \"\")})\n replace.update({\"arc_file=stars\": \"\"})\n #replace.update({\"arc_file=stars\" : \"arc_file=tropics\"})\n # prepare(wave.time,xplanet_volcanos_file,\\\n # Config().marker_xplanet_volcanos,Config().url_quakes)\n else:\n replace.update({\"marker_file=stars\": \"\"})\n replace.update({\"arc_file=stars\": \"\"})\n\n file = Config().xplanet_config_file(wave)\n\n with open(file) as opened:\n words = opened.read().split('\\n')\n opened.close()\n\n text = '\\n'.join(replace.get(y, y) for y in words)\n\n with open(file, 'w') as opened:\n opened.write(text)\n opened.close()\n\n @staticmethod\n def run_xplanet():\n wave = Settings().wave\n color = \"Blue\"\n hue = \"\"\n\n #\" -projection ortographic\"\n #\" -latitude 89.999 -longitude 330\"\n # V\" -date \"+Config().xplanet_date(wave)+\" \"+\\\n xplanet_config = \" -config \" + Config().xplanet_config_file(wave) +\\\n \" -idlewait 30 -hibernate 600 -wait 120\" +\\\n \" -pango -font Terminus -fontsize 12 -label -utclabel\" +\\\n \" -radius 42% -geometry 2560x1440\" +\\\n \" -background ~/.xplanet/bg/xplanet_bg2560_\" + color + hue + \".png \" +\\\n \" -glare 40\" +\\\n \" -num_times 1\" +\\\n \" -jdate \" + Config().xplanet_jdate(wave) +\\\n \" -output \" + Config().image_output_file(wave) + \" \"\n if not Settings().xplanet_old_projection:\n xplanet_config = xplanet_config +\\\n \" -latitude 89.999 -longitude 270\" +\\\n \" -projection azimuthal -north body\"\n elif Settings().xplanet_face_projection:\n xplanet_config = xplanet_config +\\\n \" -latitude 89.999 -longitude 270\" +\\\n \" -north body\"\n elif Settings().xplanet_old_projection:\n xplanet_config = xplanet_config +\\\n \" -latitude 0 -longitude 90\" +\\\n \" -rotate 180\"\n\n this = \"xplanet\" + xplanet_config\n if Settings().xplanet_print_commandline:\n print(this)\n call(this, shell=True)\n # print(xplanet_config)\n\n @staticmethod\n def prepare(time, f, target, url):\n ensure_dir(f)\n ensure_dir(target)\n p = pathlib.Path(target)\n if p.exists():\n os.remove(target)\n if (int(time.utc_datetime().strftime(\"%Y%m%d\")) > 20150118) \\\n and (int(time.utc_datetime().strftime(\"%Y%m%d\")) <=\n int(now().utc_datetime().strftime(\"%Y%m%d\"))):\n p = pathlib.Path(f)\n if not p.exists():\n with urllib.request.urlopen(url) as item:\n item = item.read().decode('utf-8')\n this = open(f, 'w')\n this.write(item)\n this.close()\n if int(time.utc_datetime().strftime(\"%H%M\")) == 1800:\n os.symlink(f, target)\n\n @staticmethod\n def setbg():\n wave = Settings().wave\n call(\n \"feh --bg-scale \" +\n Config().image_output_file(wave) +\n \"\",\n shell=True)\n #call(\"feh \" + Config().image_output_file(wave) + \"\", shell=True)\n\n @staticmethod\n def cleanup():\n \"\"\"\n Remove temporary files used for generating xplanet images\n \"\"\"\n wave = Settings().wave\n remove = []\n if Settings().remove_tmp_files_after_run:\n remove.extend([\n Config().xplanet_config_file(wave),\n Config().xplanet_stars_markers_file(wave),\n Config().xplanet_planets_markers_file(wave),\n Config().xplanet_planets_arcs_file(wave),\n Config().xplanet_planets_poles_file(wave),\n Config().xplanet_earthquakes_markers_file(wave),\n Config().xplanet_volcanoes_markers_file(wave),\n Config().image_earth(wave),\n Config().image_cloud(wave),\n Config().image_night(wave)\n # Config().initial_star_markers(wave)\n ])\n\n for this in remove:\n p = pathlib.Path(this)\n if p.exists():\n os.remove(this)\n\n @staticmethod\n def cleanup_run():\n \"\"\"\n Remove temporary files used for generating xplanet images\n \"\"\"\n remove = []\n\n if Settings().remove_month_files_after_run:\n for i in range(13):\n if not i == 0:\n time = JulianDate(utc=date(2016, i, 1)).tt\n remove.extend([\n Config().image_earth_month(time),\n Config().image_night_month(time)\n ])\n for this in remove:\n p = pathlib.Path(this)\n if p.exists():\n os.remove(this)\n\n @classmethod\n def main(cls):\n if True:\n Images().run_convert_images()\n cls.prep_xplanet_config()\n Markers().draw_planet_arcs()\n Markers().draw_planet_poles()\n Markers().draw_planet_markers()\n Markers().draw_star_markers()\n Markers().draw_earthquake_markers()\n Markers().draw_volcano_markers()\n cls.run_xplanet()\n cls.setbg()\n cls.cleanup()\n","sub_path":"libs/Xplanet.py","file_name":"Xplanet.py","file_ext":"py","file_size_in_byte":9621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"23932374","text":"# encoding: utf-8\nimport modules\nimport re\n\ndef init():\n add_hook('message', message)\n add_hook('loaded', loaded)\n try:\n m('webserver').add_handler('GET', show_webpage)\n except ModuleNotLoaded:\n pass\n\ndef loaded(module):\n if module == 'webserver':\n m('webserver').add_handler('GET', show_webpage)\n\ndef message(irc, channel, origin, command, args):\n if command == \"help\":\n if len(args) == 0:\n try:\n m('irc_helpers').message(irc, channel, \"%shelp\" % m('webserver').get_root_address())\n except ModuleNotLoaded:\n m('irc_helpers').message(irc, channel, \"The help page is currently unavailable.\")\n return\n search = args[0]\n help = m('datastore').query(\"SELECT module, usage, description FROM help WHERE command = ?\", search)\n if not help:\n m('irc_helpers').message(irc, channel, \"%s does not exist.\" % search, tag=\"Help\")\n return\n module, usage, description = help[0]\n try:\n m(module)\n except ModuleNotLoaded:\n m('irc_helpers').message(irc, channel, \"%s will not work, as the %s module is not loaded.\" % (search, module), tag='Help')\n return\n \n try:\n access = m('security').get_command_access(search)\n except ModuleNotLoaded:\n access = 1\n \n if search[0:4] == \"kb3:\":\n prefix = \"KB3 \"\n else:\n try:\n prefix = m('core').get_primary_prefix()\n except ModuleNotLoaded:\n prefix = ''\n \n m('irc_helpers').message(irc, channel, \"~B~U%s%s~U~B\" % (prefix, usage), tag=\"Help\")\n m('irc_helpers').message(irc, channel, u\"Module: ~B%s~B · Required access: ~BLevel %s~B\" % (module, access), tag=\"Help\")\n m('irc_helpers').message(irc, channel, description, tag=\"Help\")\n\ndef show_webpage(request):\n parts = request.path[1:].split('/')\n if len(parts) < 3:\n if len(parts) == 2:\n mods = [parts[1]]\n else:\n mods = modules.mods.keys()\n \n help = m('datastore').query(\"SELECT command, module, usage, description FROM help WHERE module IN (%s) ORDER BY module, command\" % ', '.join([\"\\\"%s\\\"\" % x for x in mods]))\n \n return render_index(mods, help)\n\ndef render_index(mods, help):\n mods.sort()\n output = \"\"\"\n\n \n Bot help\n \n \n \"\"\"\n if len(mods) > 1:\n output += \"\"\"\n

    Bot help

    \n

    There are %s commands in %s modules

    \n

    Modules

    \n
      \"\"\" % (len(help), len(mods))\n for mod in mods:\n output += \"\"\"\n
    • %s
    • \"\"\" % (mod, mod)\n output += \"\"\"\n
    \"\"\"\n else:\n module = mods[0]\n output += \"\"\"\n

    %s help

    \"\"\" % module\n if len(help) == 0:\n output += \"\"\"\n

    There are no commands in %s

    \"\"\" % module\n for entry in help:\n command, module, usage, description = entry\n description = re.sub(r\"((?:\\[|<)[a-z0-9]+?(?:\\]|>))\", r'\\1', description.replace(\"<\", \"<\"))\n if command[0:4] == \"kb3:\":\n prefix = \"KB3 \"\n else:\n try:\n prefix = m('core').get_primary_prefix()\n except ModuleNotLoaded:\n prefix = ''\n output += \"\"\"\n
    \n

    %s

    \n

    %s%s

    \n

    %s

    \n
    \"\"\" % (command, prefix, usage.replace(\"<\", \"<\"), description.replace(\"\\n\", \"
    \\n\"))\n output += \"\"\"\n \n\"\"\"\n \n return output\n","sub_path":"modules/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"515137107","text":"from xicam.plugins import ProcessingPlugin, Input, Output, InOut\nfrom pyFAI.detectors import Detector\nimport numpy as np\nfrom pyqtgraph import ROI\nfrom pyqtgraph.parametertree import parameterTypes\nfrom typing import List, Tuple\nfrom matplotlib.path import Path\n\n\nclass PolygonMask(ProcessingPlugin):\n name = 'Polygon Mask'\n\n ai = Input(\n description='PyFAI detector instance; the geometry of the detector''s inactive area will be masked.',\n type=Detector)\n polygon = Input(description='Polygon shape to mask (interior is masked)', type=List[Tuple[float, float]])\n mask = InOut(description='Mask array (1 is masked).', type=np.ndarray)\n\n def evaluate(self):\n if self.polygon.value is not None:\n # create path\n path = Path(np.vstack([self.polygon.value, self.polygon.value[-1]]),\n [Path.MOVETO] + [Path.LINETO] * (len(self.polygon.value) - 1) + [Path.CLOSEPOLY])\n\n # create a grid\n ny, nx = self.ai.value.detector.shape\n x = np.linspace(0, nx, nx)\n y = np.linspace(0, ny, ny)\n xgrid, ygrid = np.meshgrid(x, y)\n pixel_coordinates = np.c_[xgrid.ravel(), ygrid.ravel()]\n\n # find points within path\n self.mask.value = np.logical_or(self.mask.value,\n np.flipud(path.contains_points(pixel_coordinates).reshape(ny, nx)))\n\n @property\n def parameter(self):\n if not (hasattr(self, '_param') and self._param):\n instructions = parameterTypes.TextParameter(name='Instructions',\n value='Use the mouse to draw the mask. Click [Finish Mask] below when complete, or [Clear Selection] to start over.',\n readonly=True)\n clearmask = parameterTypes.ActionParameter(name='Clear Selection')\n finishmask = parameterTypes.ActionParameter(name='Finish Mask')\n\n children = [instructions, clearmask, finishmask]\n self._param = parameterTypes.GroupParameter(name='Polygon Mask', children=children)\n return self._param\n","sub_path":"xicam/SAXS/masking/polygon.py","file_name":"polygon.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"319599711","text":"# Второе задание\n\n# Реализовать функцию, принимающую несколько параметров, описывающих \n# данные пользователя: имя, фамилия, год рождения, город проживания, \n# email, телефон. Функция должна принимать параметры как именованные аргументы. \n# Реализовать вывод данных о пользователе одной строкой.\n\ndef base(name, surname, b_year, city, email, p_number):\n total = f\"{name} {surname}, {b_year} года рождения, проживающий в городе {city}. Электронная почта {email}, номер телефона {p_number}.\"\n return total\n\nprint(\"Второе задание\")\nprint(base(input(\"Введите имя: \"), input(\"Введите фамилию: \"), int(input(\"Введите год рождения: \")), input(\"Введите город проживания: \"), input(\"Введите электронную почту: \"), input(\"Введите номер телефона: \")))\n","sub_path":"les3/hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"511590355","text":"# Write a program to find N largest elements from a list.\r\nlst = []\r\nn = int(input(\"Enter No: \"))\r\nprint(\"Enter Value: \")\r\n\r\nfor i in range(n):\r\n\ti = int(input(\"\\t\"))\r\n\tlst.append(i)\r\n\r\nprint(\"List data = \", lst)\r\n\r\ndef largest(lst):\r\n\tmaxval = lst[0]\r\n\tfor i in lst:\r\n\t\tif maxval < i:\r\n\t\t\tmaxval = i\r\n\tprint(\"Largest Value of list = \",maxval)\r\n\r\nlargest(lst)","sub_path":"p1_25.py","file_name":"p1_25.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"210802186","text":"def hasSingleCycle(array):\r\n numElementsVisited=0\r\n currentIdx=0\r\n while(numElementsVisited0 and currentIdx==0):\r\n return False\r\n numElementsVisited+=1\r\n currentIdx=getCurrentIdx(currentIdx,array)\r\n return currentIdx==0\r\ndef getCurrentIdx(currentIdx,array):\r\n jump=array[currentIdx]\r\n nextIdx=(currentIdx+jump)%len(array)\r\n if(nextIdx>=0):\r\n return nextIdx\r\n else:\r\n return nextIdx+len(array)\r\narray=[2,3,1,-4,-4,2]\r\nprint(hasSingleCycle(array))\r\n \r\n \r\n","sub_path":"single_cycle_check_graphs.py","file_name":"single_cycle_check_graphs.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"85085558","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 29 15:33:39 2018\n\n@author: cwalk\n\"\"\"\nfrom bs4 import BeautifulSoup \nimport requests\nimport time\n\nurl='https://bj.58.com/pingbandiannao/35353501847632x.shtml'\n\ndef get_links_from(who_sells=0):\n urls=[]\n list_view='https://bj.58.com/pbdn/{}/'.format(str(who_sells))\n wb_data=requests.get(list_view)\n soup=BeautifulSoup(wb_data.text,'lxml')\n for link in soup.select('td.t > a.t'):\n urls.append(link.get('href').split('?')[0]) \n #print(urls)\n return urls\n\ndef get_views_from(url):\n id=url.split('/')[-1].strip('x.shtml')\n #print(id)\n api='https://jst1.58.com/counter?infoid={}&userid=&uname=&sid=0&lid=0&px=0&cfpath='.format(id)\n js=requests.get(api)\n #js.text为返回结果 respones\n #print(js.text)\n views=js.text.split('=')[-1]\n #print(views)\n return views\n \n\ndef get_item_info(who_sells=1):\n \n urls=get_links_from(who_sells)\n for url in urls:\n wb_data=requests.get(url)\n \n soup=BeautifulSoup(wb_data.text,'lxml')\n data={\n 'title':soup.title.text,\n 'price':soup.select('.price')[0].text,\n 'area':list(soup.select('.c_25d')[0].stripped_strings),\n 'date':soup.select('.time')[0].text,\n 'cate':'个人' if who_sells==0 else '商家',\n 'views':get_views_from(url)\n \n \n }\n print(data)\n\n#print(soup)\n#get_links_from()\n#get_views_from(url)\n \nget_item_info()","sub_path":"2018第3季度/9/9-29/v1.0/crawler2.py","file_name":"crawler2.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"75995690","text":"tablica=[]\nx=[]\ny=0\ndl1=0\ntab1=0\ntab2=0\ndl2=0\nm=0\nm1=0\nmx=0\nm1x=0\nc = int(input(\"Jesli chcesz zakonczyc robienie tabeli wpisz 0\"'\\n'\"Podaj liczbe ktora mam dodac do listy: \"))\nwhile c!=0:\n while c!=0:\n tablica.append(c)\n c = int(input(\"Podaj liczbe ktora mam dodac do listy: \"))\n x+=[tablica]\n tablica = []\n c = int(input(\"Jesli chcesz zakonczyc robienie tabeli wpisz 0\"'\\n'\"Podaj liczbe ktora mam dodac do listy: \"))\n y+=1\n\nfor h in range(y):\n dl1=len(x[h])\n for g in range(dl1):\n tab1+=x[h][g]\n if h+1tab2:\n m=h\n mx=tab1\n if tab2>tab1:\n m1x=tab2\n tab2=0\n tab1=0\n m1=h+1\n else:\n tab2=0\n tab1=0\nif mx>m1x:\n print(x[m])\nelse:\n print(x[m1])","sub_path":"lab06/cw_05.py","file_name":"cw_05.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"359937196","text":"'''File that manage the registration of products on user asking'''\n\nimport sys\nimport mysql.connector\n\nfrom conf import SQL_LOG, SQL_PWD\n\n\ndef reg(reg_id):\n \"\"\"Register products the user want to conserve\"\"\"\n try:\n conn = mysql.connector.connect(host=\"localhost\", user=SQL_LOG,\\\n password=SQL_PWD, database=\"P5\")\n conn.autocommit = True\n cursor = conn.cursor()\n cursor.execute(\"SELECT name, Categories_id, description, shop, \\\n url, nutrition_grade FROM Products \\\n WHERE id = %d\" % (reg_id))\n data = cursor.fetchall()\n for row in data:\n name = row[0]\n cat_id = row[1]\n description = row[2]\n stores = row[3]\n url = row[4]\n nutrition_grade = row[5]\n atts = (name, reg_id, cat_id, description, stores, url, \\\n nutrition_grade)\n cursor.execute(\"INSERT IGNORE Registred_products (name, id, \\\n Categories_id, description, shop, url, \\\n nutrition_grade) VALUES (%s, %s, %s, %s, %s, %s, \\\n %s)\", (atts))\n except mysql.connector.errors.InterfaceError as error:\n print(\"Error %d: %s\" % (error.args[0], error.args[1]))\n sys.exit(1)\n\n\ndef display_reg():\n \"\"\"Display register products\"\"\"\n try:\n conn = mysql.connector.connect(host=\"localhost\", user=SQL_LOG,\\\n password=SQL_PWD, database=\"P5\")\n conn.autocommit = True\n cursor = conn.cursor()\n cursor.execute(\"SELECT name, shop, url, description FROM \\\n Registred_products\")\n data = cursor.fetchall()\n count = 1\n for row in data:\n print(\"\\nProduit \", count, \"\\nNom :\", row[0], \"\\nMagasin(s) :\", \\\n row[1], \"\\nURL :\", row[2], \"\\nDescription :\", row[3], \\\n \"\\n\\n\")\n count += 1\n except mysql.connector.errors.InterfaceError as error:\n print(\"Error %d: %s\" % (error.args[0], error.args[1]))\n sys.exit(1)\n","sub_path":"reg_products.py","file_name":"reg_products.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"188792352","text":"\"\"\"\nDRS De-Registration resource package.\nCopyright (c) 2018 Qualcomm Technologies, Inc.\n All rights reserved.\n Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the\n limitations in the disclaimer below) provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n disclaimer.\n * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided with the distribution.\n * Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote\n products derived from this software without specific prior written permission.\n NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY\n THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nimport json\nimport uuid\n\nfrom flask import Response, request\nfrom flask_restful import Resource\nfrom marshmallow import ValidationError\n\nfrom app import app, db\nfrom app.api.v1.helpers.error_handlers import DEREG_NOT_FOUND_MSG\nfrom app.api.v1.helpers.response import MIME_TYPES, CODES\nfrom app.api.v1.helpers.utilities import Utilities\nfrom app.api.v1.models.deregdetails import DeRegDetails\nfrom app.api.v1.models.deregdevice import DeRegDevice\nfrom app.api.v1.models.deregdocuments import DeRegDocuments\nfrom app.api.v1.models.status import Status\nfrom app.api.v1.schema.deregdetails import DeRegDetailsSchema\nfrom app.api.v1.schema.deregdetailsupdate import DeRegDetailsUpdateSchema\nfrom app.api.v1.schema.deregdevice import DeRegDeviceSchema\nfrom app.api.v1.schema.deregdocuments import DeRegDocumentsSchema\n\n\nclass DeRegistrationRoutes(Resource):\n \"\"\"Class for handling De-Registration Request Routes.\"\"\"\n\n @staticmethod\n def get(dereg_id=None):\n \"\"\"GET method handler,\n returns a deregistration request based on request id.\n \"\"\"\n schema = DeRegDetailsSchema()\n try:\n if dereg_id:\n if dereg_id.isdigit() and DeRegDetails.exists(dereg_id):\n response = DeRegDetails.get_by_id(dereg_id)\n response = schema.dump(response).data\n else:\n response = DEREG_NOT_FOUND_MSG\n\n else:\n response = DeRegDetails.get_all()\n response = schema.dump(response, many=True).data\n return Response(json.dumps(response), status=CODES.get(\"OK\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n except Exception as e:\n app.logger.exception(e)\n error = {\n 'message': ['Failed to retrieve response, please try later']\n }\n return Response(json.dumps(error), status=CODES.get('INTERNAL_SERVER_ERROR'),\n mimetype=MIME_TYPES.get('APPLICATION_JSON'))\n finally:\n db.session.close()\n\n @staticmethod\n def post():\n \"\"\"POST method handler,\n Create/Submit a new De-Registration details.\n \"\"\"\n tracking_id = uuid.uuid4()\n try:\n schema = DeRegDetailsSchema()\n args = DeRegDetails.curate_args(request)\n file = request.files.get('file')\n validation_errors = schema.validate(args)\n if validation_errors:\n return Response(json.dumps(validation_errors), status=CODES.get(\"UNPROCESSABLE_ENTITY\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n response = Utilities.store_file(file, tracking_id)\n if response:\n return Response(json.dumps(response), status=CODES.get(\"UNPROCESSABLE_ENTITY\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n response = Utilities.process_de_reg_file(file.filename, tracking_id, args)\n errored = 'device_count' in response or 'invalid_imeis' in response or \\\n 'duplicate_imeis' in response or 'invalid_format' in response\n if not errored:\n gsma_response = Utilities.get_device_details_by_tac(response)\n response = DeRegDetails.create(args, tracking_id)\n db.session.commit()\n response = schema.dump(response, many=False).data\n response = {'request': response, 'devices': gsma_response}\n return Response(json.dumps(response), status=CODES.get(\"OK\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n else:\n return Response(json.dumps(response), status=CODES.get(\"UNPROCESSABLE_ENTITY\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n except Exception as e:\n db.session.rollback()\n Utilities.remove_directory(tracking_id)\n app.logger.exception(e)\n\n data = {\n 'message': ['Registration request failed, check upload path or database connection']\n }\n\n return Response(json.dumps(data), status=CODES.get('INTERNAL_SERVER_ERROR'),\n mimetype=MIME_TYPES.get('APPLICATION_JSON'))\n finally:\n db.session.close()\n\n @staticmethod\n def put():\n \"\"\"PUT method handler,\n updates existing de registration request.\n \"\"\"\n dereg_id = request.form.to_dict().get('dereg_id', None)\n try:\n schema = DeRegDetailsUpdateSchema()\n if dereg_id and dereg_id.isdigit() and DeRegDetails.exists(dereg_id):\n dreg_details = DeRegDetails.get_by_id(dereg_id)\n else:\n return Response(json.dumps(DEREG_NOT_FOUND_MSG), status=CODES.get(\"OK\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n args = DeRegDetails.curate_args(request)\n file = request.files.get('file')\n tracking_id = dreg_details.tracking_id\n if dreg_details:\n args.update({'status': dreg_details.status, 'processing_status': dreg_details.processing_status,\n 'report_status': dreg_details.report_status})\n validation_errors = schema.validate(args)\n if validation_errors:\n return Response(json.dumps(validation_errors), status=CODES.get(\"UNPROCESSABLE_ENTITY\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n if args.get('close_request', None) == 'True':\n response = DeRegDetails.close(dreg_details)\n if isinstance(response, dict):\n return Response(json.dumps(response), status=CODES.get(\"UNPROCESSABLE_ENTITY\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n else:\n response = schema.dump(response, many=False).data\n return Response(json.dumps(response), status=CODES.get(\"OK\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n if file:\n response = Utilities.store_file(file, tracking_id)\n if response:\n return Response(json.dumps(response), status=CODES.get(\"UNPROCESSABLE_ENTITY\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n filename = file.filename\n elif dreg_details.status == Status.get_status_id('New Request'):\n filename = dreg_details.file\n args.update({'device_count': dreg_details.device_count})\n else:\n filename = None\n\n if filename:\n response = Utilities.process_de_reg_file(filename, tracking_id, args)\n errored = 'device_count' in response or 'invalid_imeis' in response or \\\n 'duplicate_imeis' in response or 'invalid_format' in response\n if not errored:\n gsma_response = Utilities.get_device_details_by_tac(response)\n response = DeRegDetails.update(args, dreg_details, file=True)\n response = schema.dump(response, many=False).data\n response = {'request': response, 'devices': gsma_response}\n return Response(json.dumps(response), status=CODES.get(\"OK\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n else:\n return Response(json.dumps(response), status=CODES.get(\"UNPROCESSABLE_ENTITY\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n else:\n response = DeRegDetails.update(args, dreg_details, file=False)\n response = schema.dump(response, many=False).data\n return Response(json.dumps(response), status=CODES.get(\"OK\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n except Exception as e:\n db.session.rollback()\n app.logger.exception(e)\n\n data = {\n 'message': ['Registration request failed, check upload path or database connection']\n }\n\n return Response(json.dumps(data), status=CODES.get('INTERNAL_SERVER_ERROR'),\n mimetype=MIME_TYPES.get('APPLICATION_JSON'))\n finally:\n db.session.close()\n\n\nclass DeRegSectionRoutes(Resource):\n \"\"\"Class for handling De-Registration Section routes.\"\"\"\n\n @staticmethod\n def get(dereg_id):\n \"\"\"GET method handler, to return all section of a request.\"\"\"\n try:\n if not dereg_id.isdigit() or not DeRegDetails.exists(dereg_id):\n return Response(json.dumps(DEREG_NOT_FOUND_MSG), status=CODES.get(\"UNPROCESSABLE_ENTITY\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n\n dereg_details = DeRegDetails.get_by_id(dereg_id)\n dereg_schema = DeRegDetailsSchema()\n doc_schema = DeRegDocumentsSchema()\n device_schema = DeRegDeviceSchema()\n\n dereg_devices = DeRegDevice.get_devices_by_dereg_id(dereg_id)\n dereg_documents = DeRegDocuments.get_by_reg_id(dereg_id)\n\n deregistration_data = dereg_schema.dump(dereg_details).data\n device_data = device_schema.dump(dereg_devices, many=True).data\n document_data = doc_schema.dump(dereg_documents, many=True).data\n\n response = {\n 'dereg_details': deregistration_data,\n 'dereg_device': device_data,\n 'dereg_docs': document_data\n }\n\n return Response(json.dumps(response), status=CODES.get(\"OK\"),\n mimetype=MIME_TYPES.get(\"APPLICATION_JSON\"))\n except Exception as e:\n app.logger.exception(e)\n\n data = {\n 'message': ['De-Registration request failed, check upload path or database connection']\n }\n\n return Response(json.dumps(data), status=CODES.get('INTERNAL_SERVER_ERROR'),\n mimetype=MIME_TYPES.get('APPLICATION_JSON'))\n finally:\n db.session.close()\n","sub_path":"app/api/v1/resources/deregdetails.py","file_name":"deregdetails.py","file_ext":"py","file_size_in_byte":12064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283090111","text":"#!/usr/bin/env python\n#coding:utf-8\n\"\"\"\n Author: Wusf --\n Purpose: \n Created: 2016/1/13\n\"\"\"\n\n#----------------------------------------------------------------------\ndef Calc(cur,acctPeriods,p,s,date,stkCode):\n \"\"\"\"\"\"\n \"\"\"\n 计算Past EPS forecasts error\n cur:内存数据库cursor\n date:查询当日的日期和数据有效的最早日期\n stkCode:股票代码\n \"\"\" \n begDate = date[0]\n endDate = date[1]\n \n sql = \"\"\"\n SELECT AcctPeriod,NetProfits2Parent\n FROM FinancialPITData\n WHERE StkCode='{}'\n AND SUBSTR(AcctPeriod,5,4)='1231'\n AND DeclareDate<='{}'\n ORDER BY DeclareDate DESC LIMIT 1\n \"\"\"\n cur.execute(sql.format(stkCode,begDate,endDate))\n content = cur.fetchone()\n if content==None:\n return None\n if content[0]==None or content[1]==None:\n return None\n d = content[0]\n v = content[1]\n \n sql = \"\"\"\n SELECT DeclareDate,ForecastThisYearEPS\n FROM ForecastPITData\n WHERE StkCode='{}'\n AND AcctPeriod='{}'\n AND DeclareDate<='{}'\n ORDER BY DeclareDate DESC LIMIT 1\n \"\"\"\n cur.execute(sql.format(stkCode,d,d[0:4]+endDate[4:]))\n content = cur.fetchone()\n if content==None:\n return None\n if content[1]==None:\n return None\n d = content[0]\n f = content[1]\n \n sql = \"\"\"\n SELECT TotCap\n FROM MktCap\n WHERE StkCode='{}'\n AND Date<='{}'\n ORDER BY Date DESC LIMIT 1\n \"\"\"\n cur.execute(sql.format(stkCode,d))\n content = cur.fetchone()\n if content==None:\n return None \n s = content[0] \n \n \n \n #print v/s/10000,f,s,p\n return (v/s/10000-f)/p","sub_path":"FactorModel/backups/ComputeFundamentalFactors/FactorAlgos/Growth/PastForecastError.py","file_name":"PastForecastError.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154759098","text":"from django.shortcuts import render, redirect\n\nfrom .models import *\nfrom django.contrib import messages\n# from datetime import datetime\nimport bcrypt\n\n# Create your views here.\ndef current_user(request):\n\treturn User.objects.get(id = request.session['user_id'])\n\ndef index(request):\n\treturn render(request, 'QuoteDash_app/index.html')\n\ndef register(request):\n\tcheck = User.objects.validateUser(request.POST)\n\tif request.method != 'POST':\n\t\treturn redirect('/')\n\tif check[0] == False:\n\t\tfor error in check[1]:\n\t\t\tmessages.add_message(request, messages.INFO, error, extra_tags=\"registration\")\n\t\t\treturn redirect('/')\n\tif check[0] == True:\n\t\t#has password\n\t\thashed_pw = bcrypt.hashpw(request.POST.get('password').encode(), bcrypt.gensalt())\n\n\t\t#create user\n\t\tuser = User.objects.create(\n\t\t\tname = request.POST.get('name'),\n\t\t\talias = request.POST.get('alias'),\n\t\t\temail = request.POST.get('email'),\t\n\t\t\tpassword = hashed_pw,\n\t\t\tbirthdate = request.POST.get('birthdate')\n\t\t)\n\n\t\t#add user to session, logging them in\n\t\trequest.session['user_id'] = user.id\n\t\t#route to quotes page\n\t\treturn redirect('/quotes')\n\ndef login(request):\n\tif request.method != 'POST':\n\t\treturn redirect('/')\n\t#find user\n\tuser = User.objects.filter(email = request.POST.get('email')).first()\n\n\t#Check user credentials\n\t#add them to session and log in or add error message and route to home page\n\tif user and bcrypt.checkpw(request.POST.get('password').encode(), user.password.encode()):\n\t\trequest.session['user_id'] = user.id\n\t\treturn redirect('/quotes')\n\telse: \n\t\tmessages.add_message(request, messages.INFO, 'invalid credentials', extra_tags=\"login\")\n\t\treturn redirect('/')\n\treturn redirect('/quotes')\n\ndef logout(request):\n\t\trequest.session.clear()\n\t\treturn redirect('/')\n\ndef quotes(request):\n\tuser = current_user(request)\n\n\tcontext = {\n\t\t'user': user,\n\t\t'quotable_quotes': Quote.objects.exclude(likes = user),\n\t\t'likes': user.likes.all()\n\t}\n\n\treturn render(request, 'QuoteDash_app/quotes.html', context)\n\n\ndef create(request):\n\tif request.method != 'POST':\n\t\treturn redirect('/')\n\t##adds item to quotes\n\tcheck = Quote.objects.validateQuote(request.POST)\n\tif request.method != 'POST':\n\t\treturn redirect('/quotes')\n\tif check[0] == False:\n\t\tfor error in check[1]:\n\t\t\tmessages.add_message(request, messages.INFO, error, extra_tags=\"add_item\")\n\t\t\treturn redirect('/quotes')\n\tif check[0] == True:\n\n\t\tquote = Quote.objects.create(\n\t\t\tcontent = request.POST.get('content'),\n\t\t\tposter = current_user(request),\n\t\t\tauthor = request.POST.get('author')\n\t\t\t)\n\n\t\treturn redirect('/quotes')\n\treturn redirect('/quotes')\n\ndef add_like(request, id):\n\n\tuser = current_user(request)\n\tlike = Quote.objects.get(id=id)\n\n\tuser.likes.add(like)\n\n\treturn redirect('/quotes')\n\ndef remove_like(request, id):\n\n\tuser = current_user(request)\n\tlike = Quote.objects.get(id=id)\n\n\tuser.likes.remove(like)\n\n\treturn redirect('/quotes')\n\ndef show_user(request, id):\n\n\tuser = User.objects.get(id = id)\n\tcontext = {\n\t\t'user': user,\n\t\t'likes': user.likes.all()\t\t\n\t}\n\treturn render(request, 'QuoteDash_app/user.html', context)\n","sub_path":"QuoteDash/apps/QuoteDash_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"420795820","text":"#!/usr/bin/env python3\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.ensemble import IsolationForest\nfrom joblib import dump\nfrom imblearn.over_sampling import ADASYN, SMOTE\n\n\ndef get_train_data():\n data = pd.read_csv(\"data/train_final.csv\");\n mat = data.values\n X = mat[:, 2:]\n y = mat[:, 1:2]\n return (X,np.ravel(y))\n\ndef get_test_data():\n data = pd.read_csv(\"data/test_final.csv\")\n mat = data.values\n X = mat[:, 1:]\n return X\n\ndef write_predictions(soft_predictions, model_name, train=False):\n data = pd.read_csv(\"data/sample-submission.csv\")\n if not train:\n submission = data.drop(columns=['Y'])\n submission.insert(1, 'Y', soft_predictions)\n submission.to_csv(path_or_buf=\"predictions/\" + model_name + \".csv\", index=False)\n else:\n df = pd.DataFrame(data={'Y' : np.ravel(soft_predictions)}, dtype='float64')\n df.to_csv(path_or_buf=\"predictions/\" + model_name + \"_train.csv\", index=True)\n\n\ndef write_model(model, model_name):\n dump(model, \"models/\" + model_name + \".joblib\")\n\ndef scale(X):\n return preprocessing.scale(X)\n\ndef outlier_removal(X, y):\n X_aug = np.append(X, y.reshape(y.shape[0], 1), axis=1)\n model = IsolationForest(behaviour='new', contamination=\"auto\")\n ones = np.empty((0, X.shape[1] + 1))\n zeros = np.empty((0, X.shape[1] + 1))\n for i in range(X.shape[0]):\n x_i = np.append(X[i], np.array([y[i]]), axis=0)\n if y[i] == 1:\n ones = np.append(ones, np.array([x_i]), axis=0)\n else:\n zeros = np.append(zeros, np.array([x_i]), axis=0)\n outlier_ones = model.fit_predict(ones)\n outlier_zeros = model.fit_predict(zeros)\n removed_ones = np.array([ones[i] for i in range(outlier_ones.shape[0]) if outlier_ones[i] == 1 ])\n removed_zeros = np.array([zeros[i] for i in range(outlier_zeros.shape[0]) if outlier_zeros[i] == 1])\n X_aug_removed = np.append(removed_ones, removed_zeros, axis=0)\n print(X_aug_removed.shape)\n X_new = X_aug_removed[:, :X_aug_removed.shape[1] - 1]\n y_new = np.ravel(X_aug_removed[:, X_aug_removed.shape[1] - 1:])\n print(X_new)\n print(y_new)\n return X_new, y_new\n\ndef undersampled_data(proportion):\n data = pd.read_csv(\"data/train_final.csv\")\n zeros = data[data.Y == 0].index\n ones = data[data.Y == 1].index\n print(len(zeros))\n print(len(ones))\n print(len(zeros) + len(ones))\n random_ones = np.random.choice(ones, round(proportion * len(zeros)), replace=False)\n mat = np.append(data.loc[zeros], data.loc[random_ones], axis=0)\n X = mat[:, 2:]\n y = mat[:, 1:2]\n return (X,np.ravel(y))\n\ndef oversampling_SMOTE():\n resampler = SMOTE(random_state=42, n_neighbors=10)\n X, y = get_train_data()\n return resampler.fit_resample(X, y)\n\ndef oversampling_ADASYN():\n resampler = ADASYN(n_neighbors=10)\n X, y = get_train_data()\n return resampler.fit_resample(X, y)\n\ndef one_hot_encoding(features):\n data = pd.read_csv(\"data/train_final.csv\")\n one_hot_data = pd.get_dummies(data, columns=features)\n mat = one_hot_data.values\n X = mat[:, 2:]\n y = mat[:, 1:2]\n return (X,np.ravel(y))\n","sub_path":"extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"88748870","text":"from odoo import api, fields, models\n\n\nclass ThemeProductBrand(models.Model):\n _name = 'theme.product.brand'\n _inherit = ['website.published.multi.mixin']\n _order = 'name'\n _description= 'Product Brand'\n\n name = fields.Char('Brand Name', required=True,translate=True)\n description = fields.Text('Description', translate=True)\n website_id = fields.Many2one(\"website\", string=\"Website\")\n partner_id = fields.Many2one(\n 'res.partner',\n string='Partner',\n help='Select a partner for this brand if any.',\n ondelete='restrict'\n )\n logo = fields.Binary('Logo File')\n product_ids = fields.One2many(\n 'product.template',\n 'product_brand_id',\n string='Brand Products',\n ) \n products_count = fields.Integer(\n string='Number of products',\n compute='_get_products_count',\n )\n brand_weight = fields.Integer(\n string='Brand Weight')\n \n \n @api.depends('product_ids')\n def _get_products_count(self):\n for brand in self:\n brand.products_count = len(brand.product_ids)","sub_path":"theme_outline/models/product_brands.py","file_name":"product_brands.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"353545950","text":"N = int(input())\nA = list(map(int, input().split()))\n\nleft = [10**18] * (N + 1)\nright = [10**18] * (N + 1)\n\nnow = 0\nleft[0] = 0\nfor i, a in enumerate(A, start=1):\n now += a\n left[i] = left[i - 1] + now\n now += 1\n\nnow = 0\nright[0] = 0\nfor i, a in enumerate(A[:: -1], start=1):\n now += a\n right[i] = right[i - 1] + now\n now += 1\nright = right[::-1]\n\nans = min(left[-1], right[0])\nfor mid in range(N + 1):\n if mid % 2 == 1:\n ans = min(ans, left[mid - 1] + right[mid])\nprint(ans)\n","sub_path":"AtCoder/other/code_festival_2015_あさぷろ_hard/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"246856220","text":"from ..base import *\nfrom .vert import MergedVertex\nfrom .edge import MergedEdge\n\n\nclass PolygonEditBase(BaseObject):\n\n def detach_polygons(self):\n\n selection_ids = self._selected_subobj_ids\n selected_poly_ids = selection_ids[\"poly\"]\n\n if not selected_poly_ids:\n return False\n\n selected_vert_ids = selection_ids[\"vert\"]\n merged_verts = self._merged_verts\n merged_edges = self._merged_edges\n edges_to_split = []\n new_merged_verts = {}\n polys = self._subobjs[\"poly\"]\n selected_polys = (polys[i] for i in selected_poly_ids)\n\n change = False\n update_verts_to_transf = False\n\n for poly in selected_polys:\n\n for vert_id in poly.get_vertex_ids():\n\n merged_vert = merged_verts[vert_id]\n\n if merged_vert in new_merged_verts:\n new_merged_vert = new_merged_verts[merged_vert]\n elif len(merged_vert) == 1:\n continue\n else:\n new_merged_vert = MergedVertex(self)\n new_merged_verts[merged_vert] = new_merged_vert\n\n merged_vert.remove(vert_id)\n new_merged_vert.append(vert_id)\n merged_verts[vert_id] = new_merged_vert\n\n change = True\n\n if vert_id in selected_vert_ids:\n update_verts_to_transf = True\n\n for edge_id in poly.get_edge_ids():\n\n merged_edge = merged_edges[edge_id]\n\n if merged_edge in edges_to_split:\n edges_to_split.remove(merged_edge)\n else:\n edges_to_split.append(merged_edge)\n\n if change:\n\n selected_edge_ids = selection_ids[\"edge\"]\n update_edges_to_transf = False\n\n for merged_edge in edges_to_split:\n\n edge_id = merged_edge[0]\n new_merged_edge = MergedEdge(self)\n new_merged_edge.append(edge_id)\n merged_edge.remove(edge_id)\n merged_edges[edge_id] = new_merged_edge\n\n if edge_id in selected_edge_ids:\n update_edges_to_transf = True\n\n if update_verts_to_transf:\n self._update_verts_to_transform(\"vert\")\n\n if update_edges_to_transf:\n self._update_verts_to_transform(\"edge\")\n\n self._update_verts_to_transform(\"poly\")\n\n return change\n\n\nclass PolygonEditManager(BaseObject):\n\n def setup(self):\n\n Mgr.add_interface_updater(\n \"uv_window\", \"poly_detach\", self.__detach_polygons)\n\n def __detach_polygons(self):\n\n selection = self._selections[self._uv_set_id][\"poly\"]\n uv_data_objs = selection.get_uv_data_objects()\n\n for data_obj in uv_data_objs:\n data_obj.detach_polygons()\n","sub_path":"src/core/uv_edit/data/poly_edit.py","file_name":"poly_edit.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"629681802","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 21 11:47:19 2021\r\n\r\n@author: sukek\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport mpl_toolkits.mplot3d.art3d as art3d\r\n\r\n\r\n# classレンズ 機能 始点と入射ベクトルを入れた際に出射点と屈折ベクトルを出力\r\n\r\n# レンズ情報(2枚張り合わせまでを想定)\r\n# 境界面の数(最大3)、半径(最大3)、センタ位置(最大3)、屈折率(外、レンズ1、レンズ2) 交点計算は最低2回\r\n# 入出力はリストとし、内部はndarrayで計算\r\n\r\nclass Lens:\r\n def __init__(self, point_start, vec_in, r0, r1, point_sc0, point_sc1, n0, n1, \r\n num_planes=1, r2=None, point_sc2=[None,None,None], n2=None):\r\n # 変数初期化\r\n \r\n # レンズ情報\r\n self.r = [r0, r1, r2] #曲面の半径 入射面, 出射面, 出射面2(張り合わせ用)\r\n self.point_sc = np.stack((np.array(point_sc0), np.array(point_sc1), np.array(point_sc2, dtype = float)), \r\n axis = 0,) \r\n #半径中心の座標 入射面, 出射面, 出射面2(張り合わせ用)\r\n self.n = [n0, n1, n2] #屈折率 環境, レンズ1, レンズ2(張り合わせ用)\r\n self.num_planes = num_planes # 出口面の枚数\r\n\r\n # 屈折用計算情報\r\n # self.point_start = np.array(point_start, dtype = float)\r\n # self.vec_in = np.array(vec_in, dtype = float)\r\n\r\n self.point_start_calc = np.full((3,3), None, dtype = float)\r\n self.point_start_calc[0] = np.array(point_start )\r\n self.vec_in_calc = np.full((3,3), None, dtype = float)\r\n self.vec_in_calc[0] = np.array(vec_in )\r\n \r\n self.point_ref = np.full((3,3), None, dtype = float)\r\n self.vec_n = np.full((3,3), None, dtype = float)\r\n self.vec_ref = np.full((3,3), None, dtype = float)\r\n self.num = 0\r\n self.point_out = []\r\n self.vec_out = []\r\n\r\n # 関数 球との交点を求める\r\n # def calc_crosspoint_s(point_start, vec_in, point_sp_c, r):\r\n def calc_crosspoint_s(self):\r\n point_s = self.point_start_calc[self.num]\r\n vec_in = self.vec_in_calc[self.num]\r\n point_sp_c = self.point_sc[self.num]\r\n r = self.r[self.num]\r\n \r\n point_s_t = point_s - point_sp_c\r\n point_s_t_norm = np.linalg.norm(point_s_t, ord=2) \r\n \r\n vec_in = vec_in / np.linalg.norm(vec_in, ord=2) # vec_in normalize\r\n vec_in_norm = 1.0\r\n \r\n a = (vec_in_norm)**2\r\n b = 2 * np.dot(point_s_t, vec_in)\r\n c = (point_s_t_norm)**2 - r**2\r\n \r\n d = b**2 - 4 * a * c\r\n \r\n if abs(d) < 1.0E-12:\r\n d = 0.0\r\n \r\n if d < 0:\r\n print('線と球は交差しない: point = point_s として処理')\r\n # point_sp_0 = None\r\n vec_n = None\r\n point = point_s\r\n else:\r\n t1 = (-b + (d)**0.5) / (2 * a)\r\n t2 = (-b - (d)**0.5) / (2 * a)\r\n if (t1 > 0) and (t2 > 0): # どちらも正の場合\r\n if t1 < t2:\r\n t = t1\r\n else:\r\n t = t2\r\n elif(t1 < 0): # t1が負の場合\r\n t = t2\r\n elif(t2 <= 0): # t2が負/0の場合\r\n t = t1\r\n else:\r\n print('不明なエラー:t value')\r\n \r\n point_sp_0 = point_s_t + t * vec_in\r\n point = point_sp_0 + point_sp_c\r\n vec_n = point_sp_0 / r # 交点における球の法線(大きさ1)\r\n # if d == 0:\r\n # print('球の接線')\r\n # elif (t1 > 0) and (t2 > 0):\r\n # print('始点は球の外側')\r\n # else:\r\n # print('始点は球の内側')\r\n # print(point)\r\n \r\n self.point_ref[self.num] = point\r\n self.vec_n[self.num] = vec_n\r\n # return(point, vec_n)\r\n \r\n\r\n # 屈折計算\r\n # def calc_refracted(vec_in, vec_n, n1, n2, d):\r\n def calc_refracted(self):\r\n vec_in = self.vec_in_calc[self.num]\r\n vec_n = self.vec_n[self.num]\r\n \r\n n1 = self.n[self.num]\r\n if ((self.num_planes == 1) and (self.num == 1)):\r\n n2 = self.n[0]\r\n elif (self.num_planes == 2) and (self.num == 2):\r\n n2 = self.n[0]\r\n else:\r\n n2 = self.n[self.num + 1] \r\n \r\n ratio_n = n1 / n2 \r\n # if (vec_n is None) or (d == 0):\r\n if (vec_n is None):\r\n print('交点なし: vec_ref = vec_in として処理')\r\n vec_ref = vec_in\r\n else:\r\n vin_dot_vn = np.dot(vec_in, vec_n)\r\n \r\n if vin_dot_vn > 0: # 法線判定\r\n vec_n = -vec_n\r\n vin_dot_vn = np.dot(vec_in, vec_n)\r\n c_ref = 1 - ratio_n**2 * ( 1 - vin_dot_vn**2)\r\n \r\n if abs(c_ref) < 1.0E-12:\r\n c_ref = 0.0\r\n \r\n # 全反射判定\r\n # if (d <= 0):\r\n # print('接線')\r\n # vec_ref = vec_in\r\n if (c_ref) < 0:\r\n # print('反射')\r\n vec_ref = vec_in - 2 * vin_dot_vn * vec_n # 全反射の場合\r\n else:\r\n # print('屈折')\r\n vec_ref = ratio_n * (vec_in - vin_dot_vn * vec_n) - c_ref**0.5 * vec_n # 屈折\r\n vec_ref = vec_ref / np.linalg.norm(vec_ref, ord=2)\r\n \r\n self.vec_ref[self.num] = vec_ref\r\n # return vec_ref\r\n\r\n def calc_output(self):\r\n # num_planes の値で繰り返し\r\n for self.num in range(self.num_planes + 1):\r\n self.calc_crosspoint_s()\r\n self.calc_refracted()\r\n \r\n if self.num < 2:\r\n self.point_start_calc[self.num + 1] = self.point_ref[self.num]\r\n self.vec_in_calc[self.num + 1] = self.vec_ref[self.num]\r\n \r\n self.point_out = self.point_ref[self.num_planes].tolist()\r\n self.vec_out = self.vec_ref[self.num_planes].tolist()\r\n\r\n \r\n# 関数 描画用\r\ndef draw(point_sp_c, r, point_start, point, vec_ref):\r\n color_sp = 'skyblue'\r\n color_line = 'r'\r\n range_x = (-5, 5)\r\n range_y = range_x\r\n range_z = range_x\r\n \r\n u = np.linspace(0, 2 * np.pi, 100)\r\n v = np.linspace(0, np.pi, 100)\r\n x = point_sp_c[0] + r * np.outer(np.cos(u), np.sin(v))\r\n y = point_sp_c[1] + r * np.outer(np.sin(u), np.sin(v))\r\n z = point_sp_c[2] + r * np.outer(np.ones(np.size(u)), np.cos(v))\r\n \r\n # line\r\n p_s = point_start\r\n p_r = point\r\n p_e = p_r + vec_ref * 5\r\n lines = np.stack((p_s,p_r,p_e), 0).T\r\n # line_x = lines[0]\r\n # line_y = lines[1]\r\n # line_z = lines[2]\r\n \r\n fig = plt.figure(figsize=plt.figaspect(1.))\r\n \r\n # 3D\r\n ax = fig.add_subplot(222, projection='3d')\r\n ax.set_box_aspect((1,1,1))\r\n ax.set(xlabel='X', ylabel='Y', zlabel='Z')\r\n ax.set(xlim = range_x, ylim = range_x, zlim = range_z)\r\n # ax.set_zlim(-5, 5)\r\n \r\n # 球の描画\r\n # Plot the surface\r\n # ax.plot_surface(x, y, z, color=color_sp,rcount=100, ccount=100, antialiased=False)\r\n ax.plot_wireframe(x, y, z, color=color_sp, linewidth=0.5)\r\n # 線の描画\r\n line= art3d.Line3D(lines[0], lines[1], lines[2], color=color_line)\r\n ax.add_line(line)\r\n ax.plot(p_s[0], p_s[1], p_s[2], color=color_line, marker='o')\r\n ax.plot(p_r[0], p_r[1], p_r[2], color=color_line, marker='x')\r\n \r\n \r\n # 2D\r\n class Fig2d:\r\n def __init__(self, num_subplot, label0, label1, range0, range1, sph0, sph1, i, j):\r\n self.num_subplot = num_subplot\r\n self.label0 = label0\r\n self.label1 = label1\r\n self.range0 = range0\r\n self.range1 = range1\r\n self.sph0 = sph0\r\n self.sph1 = sph1\r\n self.i = i\r\n self.j = j\r\n \r\n f2d = [Fig2d(221, 'X', 'Y', range_x, range_y, sph0=x, sph1=y, i=0, j=1),\r\n Fig2d(223, 'X', 'Z', range_x, range_z, x, z, 0, 2),\r\n Fig2d(224, 'Y', 'Z', range_y, range_z, y, z, 1, 2)]\r\n \r\n for num in range(len(f2d)):\r\n ax = fig.add_subplot(f2d[num].num_subplot)\r\n ax.grid(True)\r\n ax.set(xlabel = f2d[num].label0, ylabel = f2d[num].label1)\r\n ax.set(xlim = f2d[num].range0, ylim = f2d[num].range1)\r\n ax.plot(f2d[num].sph0, f2d[num].sph1, color=color_sp, linewidth=0.5) #球\r\n ax.plot(lines[f2d[num].i], lines[f2d[num].j], color=color_line) #光線\r\n ax.plot(p_s[f2d[num].i], p_s[f2d[num].j], color=color_line, marker='o') #始点\r\n ax.plot(p_r[f2d[num].i], p_r[f2d[num].j], color=color_line, marker='x') #屈折点\r\n \r\n \r\n # # x-y\r\n # ax = fig.add_subplot(221)\r\n # ax.grid(True)\r\n # ax.set(xlabel='x', ylabel='y')\r\n # ax.set(xlim = xrange, ylim = yrange)\r\n # ax.plot(x, y, color=color_sp, linewidth=0.5)\r\n # ax.plot(line_x, line_y, color=color_line)\r\n # ax.plot(p_s[0], p_s[1], color=color_line, marker='o')\r\n # ax.plot(p_r[0], p_r[1], color=color_line, marker='x')\r\n \r\n # # x-z\r\n # ax = fig.add_subplot(223)\r\n # ax.grid(True)\r\n # ax.set(xlabel='x', ylabel='z')\r\n # ax.set(xlim = xrange, ylim = yrange)\r\n # ax.plot(x, z, color=color_sp, linewidth=0.5)\r\n # ax.plot(line_x, line_z, color=color_line)\r\n # ax.plot(p_s[0], p_s[2], color=color_line, marker='o')\r\n # ax.plot(p_r[0], p_r[2], color=color_line, marker='x')\r\n \r\n # # y-z\r\n # ax = fig.add_subplot(224)\r\n # ax.grid(True)\r\n # ax.set(xlabel='y', ylabel='z')\r\n # ax.set(xlim = xrange, ylim = yrange)\r\n # ax.plot(y, z, color=color_sp, linewidth=0.5)\r\n # ax.plot(line_y, line_z, color=color_line)\r\n # ax.plot(p_s[1], p_s[2], color=color_line, marker='o')\r\n # ax.plot(p_r[1], p_r[2], color=color_line, marker='x')\r\n \r\n # plt.savefig(\"3d_ball.jpg\",dpi=120)\r\n plt.show()\r\n\r\n\r\n\r\n# 始点ベクトル情報\r\npoint_start = [-2, -1, 0.0]\r\nvec_in = [1,0,0]\r\n# 球情報\r\n# point_sp_c = np.array([1,0,0])\r\n# r = 2.0\r\n# n1 = 1.0\r\n# n2 = 1.6\r\n\r\nlens1 = Lens(point_start=point_start, vec_in=vec_in, \r\n r0=2, r1=2, point_sc0=[1,0,0], point_sc1=[1,0,0], n0=1.0, n1=1.5)\r\n\r\n# lens1.point_start = np.array([-2, -1, 0.0])\r\n# lens1.vec_in = np.array([1,0,0])\r\nlens1.calc_output()\r\n\r\nprint('lens1.point_out =', lens1.point_out)\r\nprint('lens1.vec_out =', lens1.vec_out)\r\n\r\npoint_start = lens1.point_start_calc[1]\r\npoint = np.array(lens1.point_out)\r\nvec_ref = np.array(lens1.vec_out)\r\ndraw(lens1.point_sc[1], lens1.r[1], point_start, point, vec_ref)","sub_path":"lens_class.py","file_name":"lens_class.py","file_ext":"py","file_size_in_byte":10765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"481650094","text":"from collections import namedtuple, OrderedDict\nimport hashlib\nimport json\nimport time\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\n\n\nfrom cratis_shop_payment.common import log_payment\nfrom cratis_shop_payment.crypto import ssl_sign, load_pem_private_key, load_pem_public_key, ssl_verify\nfrom cratis_shop_payment.models import PaymentOrder\nfrom cratis_shop_payment.views import PaymentView\n\n\n__author__ = 'alex'\n\n\ndef sha1(string):\n return hashlib.sha1(string).hexdigest()\n\n\ndef md5(string):\n return hashlib.md5(string).hexdigest()\n\n\nclass PaytrailAccept(PaymentView):\n def get(self, request, *args, **kwargs):\n log_payment(request, 'paytrail', 'pay_accept')\n\n method = self.load_method(kwargs['method'])\n settings = method.behavior().config\n\n # skip validation\n \n\n order_id = request.GET['ORDER_NUMBER']\n timestamp = request.GET['TIMESTAMP']\n paid = request.GET['PAID']\n method = request.GET['METHOD']\n authcode = request.GET['RETURN_AUTHCODE']\n\n mac_data = '|'.join((order_id, timestamp, paid, method, settings['secret']))\n\n if md5(mac_data).upper() != authcode:\n return HttpResponse('Bad signature. Can not accpet payment.', None, 403)\n\n\n inject.instance('checkout_service').payment_success(PaymentOrder.objects.get(pk=order_id), amount=paid)\n\n return HttpResponseRedirect(reverse('cratis_profile__main'))\n\nclass PaytrailCancel(PaymentView):\n def post(self, request, *args, **kwargs):\n log_payment(request, 'paytrail', 'pay_cancel')\n\n # order_id = request.GET['orderid']\n # order = Order.objects.get(order_id)\n # order.mark_paid()\n\n return HttpResponseRedirect(reverse('orders_checkout'))\n\n\nclass PaytrailCallback(PaymentView):\n def post(self, request, *args, **kwargs):\n log_payment(request, 'paytrail', 'pay_callback')\n\n # skip validation\n order_id = request.GET['orderid']\n order = Order.objects.get(order_id)\n order.mark_paid()\n\n return HttpResponse('ok')\n\n\nclass PaytrailStart(PaymentView):\n def get(self, request, *args, **kwargs):\n method = self.load_method(kwargs['method'])\n settings = method.behavior().config\n\n order_id = request.session['order_id']\n\n\n po = PaymentOrder.objects.get(pk=order_id)\n order = json.loads(po.body)\n\n\n\n\n data = OrderedDict()\n data['MERCHANT_ID'] = settings['id']\n data['AMOUNT'] = str(po.sum)\n data['ORDER_NUMBER'] = str(order_id)\n data['REFERENCE_NUMBER'] = ''\n data['ORDER_DESCRIPTION'] = ''\n data['CURRENCY'] = order['currency']['code'].upper()\n data['RETURN_ADDRESS'] = request.build_absolute_uri(reverse('paytrail_payment_accept', kwargs={'method': method.slug}))\n data['CANCEL_ADDRESS'] = request.build_absolute_uri(reverse('paytrail_payment_cancel', kwargs={'method': method.slug}))\n data['PENDING_ADDRESS'] = ''\n data['NOTIFY_ADDRESS'] = request.build_absolute_uri(reverse('paytrail_payment_callback', kwargs={'method': method.slug}))\n\n data['TYPE'] = 'S1'\n data['CULTURE'] = 'en_US'\n data['PRESELECTED_METHOD'] = ''\n data['MODE'] = '1'\n data['VISIBLE_METHODS'] = ''\n data['GROUP'] = ''\n\n\n#6pKF4jkv97zmqBJ3ZL8gUw5DfT2NMQ|13466|99.90|123456||Testitilaus|EUR|http://www.esimerkki.fi/success|http://www.esimerkki.fi/cancel||http://www.esimerkki.fi/notify|S1|fi_FI|1||\n#6pKF4jkv97zmqBJ3ZL8gUw5DfT2NMQ|13466|99.90|123456||Testitilaus|EUR|http://www.esimerkki.fi/success|http://www.esimerkki.fi/cancel||http://www.esimerkki.fi/notify|S1|fi_FI||1||\n\n #\n #data['MERCHANT_ID'] = settings['id']\n #data['AMOUNT'] = '99.90'\n #data['ORDER_NUMBER'] = '123456'\n #data['REFERENCE_NUMBER'] = ''\n #data['ORDER_DESCRIPTION'] = 'Testitilaus'\n #data['CURRENCY'] = 'EUR'\n #data['RETURN_ADDRESS'] = 'http://www.esimerkki.fi/success'\n #data['CANCEL_ADDRESS'] = 'http://www.esimerkki.fi/cancel'\n #data['PENDING_ADDRESS'] = ''\n #data['NOTIFY_ADDRESS'] = 'http://www.esimerkki.fi/notify'\n #\n #data['TYPE'] = 'S1'\n #data['CULTURE'] = 'fi_FI'\n #data['PRESELECTED_METHOD'] = ''\n #data['MODE'] = '1'\n #data['VISIBLE_METHODS'] = ''\n #data['GROUP'] = ''\n\n mac_data = settings['secret'] + '|' + '|'.join(data.values())\n\n mac = md5(mac_data).upper()\n\n data['AUTHCODE'] = mac\n\n log_payment(request, 'paytrail', 'pay_start', data)\n\n url = 'https://payment.verkkomaksut.fi/'\n\n return render(request, 'payment/post_redirect.html', {'url': url, 'fields': data.items()})\n\n","sub_path":"cratis_shop_payment/paytrail/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"384345741","text":"'''\nhttps://leetcode.com/problems/best-time-to-buy-and-sell-stock/description/\nInput: [7,1,5,3,6,4]\nOutput: 5\nExplanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.\n Not 7-1 = 6, as selling price needs to be larger than buying price.\n\n\nInput: [7,6,4,3,1]\nOutput: 0\nExplanation: In this case, no transaction is done, i.e. max profit = 0.\n'''\n\n'''\nBasic Idea : minPrice is the minimum price from day 0 to day i. And maxPro is the maximum profit we can get \nfrom day 0 to day i. How to get maxPro? Just get the larger one between current maxPro and prices[i] - minPrice.\n'''\ncost = [100,100,5,3,6,4]\nprofit = 0\nmin_prize = cost[0]\nfor index in range(1,len(cost)):\n profit = max(profit,cost[index]-min_prize)\n min_prize = min(min_prize,cost[index])\nprint(profit)\n","sub_path":"my_question/array-share-max-profit.py","file_name":"array-share-max-profit.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"249545409","text":"from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n\nfrom portfolio.models import Project, ProjectImage\nfrom portfolio.forms import ProjectImageForm\n\n\nclass ProjectListService(object):\n def __init__(self, request, params):\n self.request = request\n self.params = params\n if not self.params:\n self.params = {}\n\n def getData(self):\n last_item_id = self.params.get('last_item_id', None)\n category = self.params.get('c', None)\n filter = {}\n\n if (last_item_id):\n last_item = Project.objects.get(pk=last_item_id)\n filter['start_date__lt'] = last_item.start_date\n\n queryset = Project.objects.filter(**filter)\n paginator = Paginator(queryset, self.page_size)\n page = self.params.get('page', 1)\n\n try:\n data = paginator.page(page)\n except PageNotAnInteger:\n data = paginator.page(1)\n except EmptyPage:\n data = paginator.page(paginator.num_pages)\n\n return [data, paginator]\n\n @property\n def page_size(self):\n return self.params.get('page_size', 10)\n\n\nclass ProjectImageService(object):\n def __init__(self, project):\n self.project = project\n\n def addimages(self, params, uploaded_files):\n form = ProjectImageForm(params, uploaded_files)\n\n images = []\n if (form.is_valid()):\n files = uploaded_files.getlist('path')\n for file in files:\n mime_type = file.content_type.split('/')[0]\n size = file._size\n image = ProjectImage(path=file, project=self.project,\n mime_type=mime_type, size=size)\n image.save()\n images.append(image)\n return images","sub_path":"apps/portfolio/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"642836171","text":"\"\"\"Add 'successful' flag to Application\n\nRevision ID: ca1d1ff9204d\nRevises: c46df9260505\nCreate Date: 2019-05-13 07:55:55.568227\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"ca1d1ff9204d\"\ndown_revision = \"c46df9260505\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\"application\", sa.Column(\"successful\", sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"application\", \"successful\")\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ca1d1ff9204d_add_successful_flag_to_application.py","file_name":"ca1d1ff9204d_add_successful_flag_to_application.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"262836752","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 29 11:01:32 2018\n\n@author: TE122613\ntutotial open cv\nhttps://docs.opencv.org/3.4.1/d0/d86/tutorial_py_image_arithmetics.html\n# Bitwise Operations\n\"\"\"\n\n# import libraries opencv and numpy\nimport cv2 as cv\nimport numpy as np\n\n# Load two images\nimg1 = cv.imread('TE connectivity.jpg')\nimg2 = cv.imread('logo.jpg')\n\n# logo position, shift position to coresponding area of interest\nX = 650\nY = 750\n\n# shape of the logo row, col, X,Y are position of the logo\nrows,cols,channels = img2.shape\nroi = img1[X:rows+X, Y:cols+Y ]\n\n# Now create a mask of logo and create its inverse mask also\nimg2gray = cv.cvtColor(img2,cv.COLOR_BGR2GRAY) #filter image color to grayscale\nret, mask = cv.threshold(img2gray, 100, 255, cv.THRESH_BINARY) #treshold image, create mask, try different intensities\nmask_inv = cv.bitwise_not(mask) #inverted masking\n\n# Now black-out the area of logo in ROI\nimg1_bg = cv.bitwise_and(roi,roi,mask = mask_inv) # masking foreground\n# Take only region of logo from logo image.\nimg2_fg = cv.bitwise_and(img2,img2,mask = mask) # masking background\n# Put logo in ROI and modify the main image\ndst = cv.add(img1_bg,img2_fg) # add images foreground and background\nimg1[X:rows+X, Y:cols+Y ] = dst # modify img1 in ROI\n\n# show images\ncv.imshow('bg',img1_bg)\ncv.imshow('fg',img2_fg)\ncv.imshow('mask',mask)\ncv.imshow('res',img1)\ncv.waitKey(0)\ncv.destroyAllWindows()","sub_path":"OpenCV_official/3. Core operations/2. Arithmetic Operations on Images/Arithmetic Operations on Images_3.py","file_name":"Arithmetic Operations on Images_3.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"354927330","text":"from pybricks.hubs import PrimeHub\nfrom pybricks.tools import wait\n\n# Initialize the hub.\nhub = PrimeHub()\n\nwhile True:\n # Read the tilt values.\n pitch, roll = hub.imu.tilt()\n\n # Print the result.\n print(pitch, roll)\n wait(200)\n","sub_path":"examples/pup/hub_primehub/imu_tilt.py","file_name":"imu_tilt.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"525646187","text":"# Code to implement Horner's Method for the evaluation of polynomial of n degree\n#Horner's Method allows the evaluation of polynomial of n degree with only n multiplications and n additions.\n# Time complexity : O(n)\n\ndef hornerMethod(poly, x, degree):\n result = poly[0]\n for i in range(1, degree+1):\n result = poly[i] + x*result\n return result \n\n\nif __name__ == \"__main__\":\n degree = int(input(\"Enter Degree of polynomial: \"))\n print(\"Enter corfficients in the decreasing power of x (space separated input): \")\n poly = list(map(float, input().split()))\n x = float(input(\"Enter x : \")) \n print(\"\\nValue is : \", hornerMethod(poly, x, degree))","sub_path":"07_Horner's Method/Code - Horner's_Method.py","file_name":"Code - Horner's_Method.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"395603150","text":"\"\"\"\nSampling the Warfarin dataset (without replacement) and feeding\ninto the training algorithm.\n\"\"\"\n\nimport logging\nimport os\nimport argparse\nfrom pdb import set_trace\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nfrom utils.utils import normalize_df\nimport pandas as pd\n\ntest_run = True\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--refresh', default = True, help = 'Recreate the data from csv', type = bool)\nparser.add_argument('--raw_data_dir', default = 'data', help = 'Raw data directory', type = str)\n\n# create a dataset object whose iterator returns\n# one patient characteristic and correct dosage\nclass WarfarinDataset(Dataset):\n\n def __init__(self, csv_file_name, raw_data_dir, cleaning_func = None, normalize = False):\n \"\"\"\n Args:\n csv_file_name (string): Name of csv file.\n raw_data_dir (string): Directory of raw data.\n \"\"\"\n self.warfarin = pd.read_csv(\n os.path.join(raw_data_dir, csv_file_name)\n )\n if cleaning_func:\n self.warfarin = cleaning_func(self.warfarin, normalize)\n\n def __len__(self):\n return len(self.warfarin)\n\n def __getitem__(self, idx):\n Y = self.warfarin.iloc[idx, 0]\n X = self.warfarin.iloc[idx, 1:].values\n return {'dosage' : Y, 'features' : X}\n\n def get_choices(self):\n choices = np.unique(self.warfarin['dosage'])\n choices.sort()\n return(choices)\n\n def get_X_labels(self):\n return self.warfarin.columns.values[1:] # first one is Y\n\n# customized cleaning for warfarin dataset\ndef warfarin_cleaning(warfarin, normalize = False):\n \"\"\"\n Args:\n warfarin (pd.DataFrame): warfarin data.\n Returns:\n pd.DataFrame with cleaned data and reduced set of\n features.\n \"\"\"\n # last line is not a valid record\n # valid columns are 1 - 63\n warfarin = warfarin.iloc[:-1, 0:62]\n\n # drop patients with no known therapeutic \n # level of Warfarin\n warfarin = warfarin.loc[warfarin['Therapeutic Dose of Warfarin'].notnull(), :]\n\n # discretize the dosage into bins\n warfarin['dosage'] = discretize_dosage(warfarin['Therapeutic Dose of Warfarin'])\n\n # age to number of decades\n warfarin['age_decade'] = warfarin['Age'].astype('category').cat.codes + 1\n\n # enzyme inducer status = 1 if patient taking carbamazepine, phenytoin, \n # rifampin, or rifampicin, otherwise zero\n enzyme_cols = warfarin.filter(regex = '(Carbamazepine)|(Phenytoin)|(Rifampin)|(Rifampicin)', axis = 1)\n enzyme_yn = my_or(enzyme_cols)\n warfarin['enzyme_yn'] = enzyme_yn\n\n # amiodarone\n amiodarone_cols = warfarin.filter(regex = 'Amiodarone', axis = 1)\n amiodarone_yn = my_or(amiodarone_cols)\n warfarin['amiodarone_yn'] = amiodarone_yn\n\n # make dummy vars\n age_dummies = pd.get_dummies(warfarin['Age'], prefix = 'age')\n gender_dummies = pd.get_dummies(warfarin['Gender'], prefix = 'gender')\n race_dummies = pd.get_dummies(warfarin['Race'], prefix = 'race')\n warfarin = pd.concat([warfarin, age_dummies, gender_dummies, race_dummies], axis = 1)\n\n # fill in missings\n warfarin.loc[warfarin['Height (cm)'].isnull(), 'Height (cm)'] = np.mean(warfarin['Height (cm)'])\n warfarin.loc[warfarin['Weight (kg)'].isnull(), 'Weight (kg)'] = np.mean(warfarin['Weight (kg)'])\n\n # keep only X and Y vars\n filter_regex = ('(^age)|(^race)|(^gender)|(^dosage)'\n '|(^Height)|(^Weight)|(^enzyme_yn)'\n '|(^amiodarone_yn)')\n warfarin = warfarin.filter(regex = filter_regex, axis = 1)\n\n # rearrage columns to be Y, X1, X2 ...\n new_cols = ['dosage'] + [col for col in warfarin if col != 'dosage']\n warfarin = warfarin[new_cols]\n\n # make sure there are no missings\n assert warfarin.notnull().all().all()\n\n if normalize:\n temp = warfarin['dosage'].copy()\n warfarin = normalize_df(warfarin)\n warfarin['dosage'] = temp\n\n return warfarin\n\n# OR operation between columns of a pandas dataframe\ndef my_or(df):\n x = df.iloc[:, 0].copy()\n x[:] = False\n for col in df:\n x = x | df[col] == 1\n return x * 1.\n\n# discretize dosage\ndef discretize_dosage(dosage):\n return pd.cut(dosage, bins = [0, 21, 49, 1e10]).cat.codes\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n if args.refresh:\n print('Building dataset.')\n warfarin_dataset = WarfarinDataset('warfarin.csv', args.raw_data_dir, warfarin_cleaning)\n\n if test_run:\n # print out first 100 samples from Dataset object\n for i in range(100):\n sample = warfarin_dataset[i]\n print('Sample doasage is %(dosage)s, features are %(features)s.' % sample)\n \n # define a custom dataloader\n print('Building dataloader.')\n dataloader = DataLoader(warfarin_dataset, batch_size = 4, shuffle = True, num_workers = 4)\n\n if test_run:\n # print out the first 100 batches\n for i_batch, sampled_batch in enumerate(dataloader):\n print(i_batch, sampled_batch)\n if i_batch == 99:\n break\n","sub_path":"build_dataset.py","file_name":"build_dataset.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"630003730","text":"from graphviz import Digraph, nohtml\nimport uuid\n\nclass BinaryTree:\n value = None\n leftChild = None\n rightChild = None\n\n def __init__ (self, value):\n self.value = value\n self.leftChild = None\n self.rightChild = None\n\n def insertLeft(self, node):\n if not isinstance(node, BinaryTree):\n node = BinaryTree(node)\n\n if self.leftChild is not None:\n node.leftChild = self.leftChild\n\n self.leftChild = node\n\n def insertRight(self, node):\n if not isinstance(node, BinaryTree):\n node = BinaryTree(node)\n\n if self.rightChild is not None:\n node.rightChild = self.rightChild\n\n self.rightChild = node\n\n def height(self):\n maxLeft = 0\n maxRight = 0\n if self.leftChild != None:\n maxLeft = self.leftChild.height()\n if self.rightChild != None:\n maxRight = self.rightChild.height()\n return 1 + max(maxLeft, maxRight)\n\n def inOrder(self, func):\n if self.leftChild != None:\n self.leftChild.inOrder(func)\n func(self.value)\n if self.rightChild != None:\n self.rightChild.inOrder(func)\n \n def preOrder(self, func):\n func(self.value)\n if self.leftChild != None:\n self.leftChild.inOrder(func)\n if self.rightChild != None:\n self.rightChild.inOrder(func)\n \n def postOrder(self, func):\n if self.leftChild != None:\n self.leftChild.inOrder(func)\n if self.rightChild != None:\n self.rightChild.inOrder(func)\n func(self.value)\n\n def visualize(self, filename='tmp.gv'):\n g = Digraph('g', filename=filename, node_attr={'shape': 'record', 'height': '.1'})\n self._visualize(g)\n return g\n\n def _visualize(self, g):\n nodeName = str(uuid.uuid1())\n g.node(nodeName, nohtml(' | ' + str(self.value) + '|'))\n if self.leftChild != None:\n leftUUID = self.leftChild._visualize(g)\n g.edge(nodeName + ':l', leftUUID + ':n')\n if self.rightChild != None:\n rightUUID = self.rightChild._visualize(g)\n g.edge(nodeName + ':r', rightUUID + ':n')\n return nodeName\n\n \nif __name__ == '__main__':\n t = BinaryTree(0)\n t.insertLeft(1)\n t.insertLeft(2)\n t.insertRight(3)\n print (\"----- height -----\")\n print (t.height())\n print (\"----- inOrder -----\")\n t.inOrder(print)\n print (\"----- preOrder -----\")\n t.preOrder(print)\n print (\"----- postOrder -----\")\n t.postOrder(print)\n g = t.visualize()\n g.view()\n \n","sub_path":"algorithms/trees/binaryTree.py","file_name":"binaryTree.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"541136784","text":"import os\r\nimport time\r\n#文件名取当前的年月日时分秒\r\n\r\ndef getFile(path):\r\n files=os.listdir(path)\r\n for file in files:\r\n filename=file\r\n file=path+'\\\\'+file\r\n if os.path.isfile(file):\r\n attrs=os.stat(file)\r\n modify_time=time.localtime(attrs.st_mtime)\r\n s_latest=time.strftime('%Y%m%d%H%M%S',modify_time)\r\n if s_latest>'20161008000000' and s_latest<'20161010000000' and (not file.endswith(\".keep\")):\r\n print(s_latest)\r\n file_list.append(filename+'\\n')\r\n else:\r\n getFile(file)\r\nprint('正在查找今日修改的文件...')\r\nfile_list=[]\r\npath=r'D://0100060928_view//workspace//CXLCS//Java Source//com//cathay'\r\ngetFile(path)\r\npath=r'D://0100060928_view//workspace//CXLCS//Web Content//html'\r\ngetFile(path)\r\n\r\ns_now=time.strftime('%Y%m%d%H%M%S')\r\nfile=open('D://logs//latest'+s_now+'.txt','w+')\r\nif len(file_list)==0:\r\n file.write('empty set')\r\nelse:\r\n i=0\r\n for filename in file_list:\r\n i+=1\r\n print(str(i)+'\\n')\r\n file.write(filename)\r\nfile.close()\r\nprint('end')\r\n\r\n \r\n","sub_path":"latestfile.py","file_name":"latestfile.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"577917310","text":"#!/usr/bin/env python3\nfrom lib515 import *\nfrom optparse import OptionParser\nfrom random import *\n\ndef readPList(file) :\n\tpList = []\n\titems = int(file.readline().rstrip())\n\tfor i in range(items) :\n\t\tl = file.readline().rstrip().split()\n\t\tprob = float(l[0])\n\t\tlabel = l[1]\n\t\tpList.append(tuple((prob, label)))\n\treturn pList\n\ndef readState(hmm, file) :\n\tstateTrans = readPList(file)\n\tprobabilities = readPList(file)\n\tstate = tuple((stateTrans, probabilities))\n\treturn state\n\ndef readHMM(file) :\n\tHMM = {}\n\tstates = int(file.readline().rstrip())\n\tfor s in range(states) :\n\t\tstateName = file.readline().rstrip()\n\t\tHMM[stateName] = readState(HMM, file) \n\treturn HMM\t\n\t\ndef pickState(hmm, current) :\n\tran = random()\n\tif current != None :\n\t\t(stateOdds, itemOdds) = hmm[current]\n\t\tsum = 0.0\n\t\tfor s in stateOdds :\n\t\t\t(p, state) = s\n\t\t\tsum += p\n\t\t\tif sum > ran :\n\t\t\t\treturn (p, state)\n\t\tprint(\"Pick State Error\")\n\telse :\n\t\tkeys = list(hmm.keys())\n\t\treturn (1.0 / len(keys), choice(keys))\n\t\ndef pickItem(hmm, current) :\n\tran = random()\n\t(stateOdds, itemOdds) = hmm[current]\n\tsum = 0.0\n\tfor i in itemOdds :\n\t\t(p, item) = i\n\t\tsum += p\n\t\tif sum > ran :\n\t\t\treturn i\n\tprint(\"Pick Item Error\")\n\ndef makeStrings(hmm, init, num) :\n\tcurrent = init\n\tstates = \"\"\n\titems = \"\"\n\tP = 1.0\n\tpState = 1.0\n\tfor x in range(num) :\n\t\tif current != None : \n\t\t\ti = pickItem(hmm, current)\n\t\telse : #in the event that the initial state is not defined\n\t\t\t(pState, current) = pickState(hmm, current)\n\t\t\ti = pickItem(hmm, current)\n\t\tstates += current\n\t\t(p, item) = i\n\t\titems += item\n\t\t#P *= p * pState this is where it should go logically for the odds to get to this state and select this item\n\t\t(pState, current) = pickState(hmm, current)\n\t\tP *= p * pState #this is where it matches the calculations on the submission page\n\treturn (states, items, P)\n\ndef main() :\n\tparser = OptionParser(usage=\"usage: %prog [options] file\")\n\tparser.add_option(\"-n\", \"--number\", dest=\"num\", default=\"20\", help=\"number of chars to generate\")\n\tparser.add_option(\"-i\", \"--init\", dest=\"init\", default=None, help=\"initial state for string\")\n\t(options, args) = parser.parse_args()\n\t\t\n\tif len(args) != 1 :\n\t\tparser.print_help()\n\t\texit(1)\n\tinit = options.init\n\tnum = int(options.num)\n\tfilename = args[0]\n\tfile = open(filename, \"r\")\n\tHMM = readHMM(file)\n\tprint(HMM)\n\t(states, items, P) = makeStrings(HMM, init, num)\n\tprint(states)\n\tprint(items)\n\tprint(P)\n\t\nmain()\n","sub_path":"hmmGen.py","file_name":"hmmGen.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"415285950","text":"from typing import List, Optional\nimport os\n\n# allowed_extensions is an array, like ['jpg', 'jpeg', 'png']\ndef file_paths_from_folder(\n root_folder_path: str, \n allowed_extensions: List[str] = None, \n recursive: bool = True\n) -> List[str]:\n import os\n\n root_folder_path = os.path.abspath(root_folder_path)\n file_paths = []\n\n for (dir_path, _, file_names) in os.walk(root_folder_path):\n abs_dir_path = os.path.abspath(dir_path)\n\n for file_name in file_names:\n if allowed_extensions is not None and len(allowed_extensions) > 0:\n for extension in allowed_extensions:\n if file_name.lower().endswith(extension.lower()):\n file_paths.append(os.path.join(abs_dir_path, file_name))\n\n break\n else:\n file_paths.append(os.path.join(abs_dir_path, file_name))\n \n if not recursive:\n break\n \n return file_paths\n\ndef path_of_file(f: str) -> str:\n return os.path.abspath(f)\n\ndef folder_path_of_file(f: str) -> str:\n return os.path.dirname(path_of_file(f))\n\ndef temp_path_for_path(_path: str) -> str:\n import random, string\n\n folder_path = folder_path_of_file(_path)\n ext = extension(_path, include_dot=True)\n\n while True:\n proposed_path = os.path.join(\n folder_path,\n '.' + ''.join(random.choices(string.ascii_lowercase + string.digits, k=8)) + ext\n )\n\n if not os.path.exists(proposed_path):\n return proposed_path\n\ndef file_name(_path: str, include_extension: bool = True) -> str:\n basename = os.path.basename(_path)\n\n if not include_extension:\n basename = remove_extensions(basename)\n \n return basename\n\ndef extension(_path: str, include_dot: bool = False) -> Optional[str]:\n path_comps = _path.replace('/.', '/').split(\".\")\n\n if len(path_comps) == 1:\n return None\n \n ext = path_comps[-1]\n\n if include_dot:\n ext = '.' + ext\n \n return ext\n\ndef replace_extension(_path: str, new_extension: str) -> str:\n if not new_extension.startswith('.'):\n new_extension = '.' + new_extension\n \n return _path.replace(extension(_path, include_dot=True), new_extension)\n\ndef remove_extensions(_path: str) -> str:\n while True:\n ext = extension(_path, include_dot=True)\n\n if ext is None:\n return _path\n \n _path = _path.rstrip(ext)\n\ndef remove(_path: str) -> bool:\n try:\n os.remove(_path)\n\n return True\n except:\n return False","sub_path":"kov_utils/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"565040174","text":"from model.group import Group\n\n\ndef test_delete_first_group(app, db):\n if len(db.get_group_list()) == 0:\n app.group.create(Group(name=\"test\"))\n old_groups = db.get_group_list()\n app.group.delete_first_group()\n assert len(old_groups) - 1 == app.group.count()\n new_groups = db.get_group_list()\n # удаляем только один элемент, т.к. вырезка включает левую границу, но не включает правую,\n # т.е. удаляем первый элемент\n old_groups[0:1] = []\n assert old_groups == new_groups\n\n\n","sub_path":"test/test_groups_db/test_del_group.py","file_name":"test_del_group.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"287046550","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 25 13:19:24 2021\n\n@author: ronguy\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 22 20:09:58 2021\n\n@author: ronguy\n\"\"\"\nimport matplotlib\nimport numpy as np\nfrom sklearn.manifold import TSNE\nfrom scipy import integrate as int\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport time\nimport shelve\nimport guidata\nimport guidata.dataset.datatypes as dt\nimport guidata.dataset.dataitems as di\nimport swat\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom time import time\nfrom bootstrap_stat import datasets as d\nfrom bootstrap_stat import bootstrap_stat as bp\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import preprocessing\nfrom sklearn import datasets\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.linear_model import LinearRegression\nimport statsmodels.stats.weightstats as ws\nfrom sklearn.cluster import KMeans\nimport guidata\nimport guidata.dataset.datatypes as dt\nimport guidata.dataset.dataitems as di\nfrom tkinter import *\nfrom tkinter.filedialog import asksaveasfilename\nimport umap\nfrom lmfit import minimize, Parameters\n\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets import make_blobs\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import MinMaxScaler\n\ndef dbscan_plot(data,eps=0.1,min_samples=50):\n X=data\n X = StandardScaler().fit_transform(X)\n db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n\n # Number of clusters in labels, ignoring noise if present.\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n n_noise_ = list(labels).count(-1)\n\n print('Estimated number of clusters: %d' % n_clusters_)\n print('Estimated number of noise points: %d' % n_noise_)\n print(\"Silhouette Coefficient: %0.3f\"\n % metrics.silhouette_score(X, labels))\n\n # Black removed and is used for noise instead.\n plt.figure(figsize=(10, 10))\n unique_labels = set(labels)\n colors = [plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\n for k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = [0, 0, 0, 1]\n\n class_member_mask = (labels == k)\n \n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),label = k,\n markeredgecolor='k', markersize=14)\n \n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=6)\n \n plt.legend(fontsize=15, title_fontsize='40') \n plt.title('Estimated number of clusters: %d' % n_clusters_)\n plt.show()\n return labels\n\n\n\ndef residual(params, x, data):\n alpha = params['alpha']\n beta = params['beta']\n gam = params['gamma']\n \n\n avMarkers=x['H3.3']*alpha+x['H4']*beta+x['H3']*gam\n od=x.subtract(avMarkers,axis=0)\n return np.std(od['H3.3'])+np.std(od['H4'])+np.std(od['H3'])\n #(pow(od['H3']-avMarkers,2)+pow(od['H3.3']-avMarkers,2)+pow(od['H4']-avMarkers,2))\n\n\n\ndef draw_umap(data,n_neighbors=15, min_dist=0.1, n_components=2, metric='euclidean', title=''\n ,cc=0):\n fit = umap.UMAP(\n n_neighbors=n_neighbors,\n min_dist=min_dist,\n n_components=n_components,\n metric=metric\n )\n u = fit.fit_transform(data);\n plt.figure(figsize=(6, 5))\n if n_components == 2:\n plt.scatter(u[:,0], u[:,1], c=cc,s=3,cmap=plt.cm.jet)\n plt.clim(-5,5)\n plt.colorbar()\n plt.title(title, fontsize=18)\n return u;\n\n\nparams = {'axes.titlesize': 30,\n 'legend.fontsize': 20,\n 'figure.figsize': (16, 10),\n 'axes.labelsize': 20,\n 'axes.titlesize': 20,\n 'xtick.labelsize': 20,\n 'ytick.labelsize': 20,\n 'figure.titlesize': 30}\nplt.rcParams.update(params)\nplt.style.use('seaborn-whitegrid')\nsns.set_style(\"white\")\n\n#df=pd.read_csv(\"control.csv\")\n#dfmut=pd.read_csv(\"mutant.csv\")\ndir=\"/Users/ronguy/Dropbox/Work/CyTOF/Datasets/17321/\"\nC16=pd.read_csv(dir+\"C17.csv\")\n#C15=pd.read_csv(dir+\"C15.csv\")\n#C11=C11[(C11 != 0).all(1)]\n#C12=C12[(C12 != 0).all(1)]\n# threshold=5\n# lC15=len(C15)\n# print(\"C15\")\n# for col in C15.columns:\n# lG= np.count_nonzero(C15[col]>threshold) \n# print(\"%-30s %d %5.3f\" % (col,lG,lG/lC15*100.)) \n# print(\" \")\n# lC16=len(C16)\n# print(\"C16\")\n# for col in C16.columns:\n# lG= np.count_nonzero(C16[col]>threshold) \n# print(\"%-30s %d %5.3f\" % (col,lG,lG/lC16*100.)) \n\nNamesAll=['H3',\n 'IdU',\n 'MBP',\n 'H3K36me3',\n 'GFAP',\n 'EZH2',\n 'H3K4me3',\n 'H3K79me2',\n 'yH2A.X',\n 'H3K36me2',\n 'Sox2',\n 'SIRT1',\n 'H4K16ac',\n 'H2Aub',\n 'H3K4me1',\n 'H3.3',\n 'H3K64ac',\n 'BMI1',\n 'Cmyc',\n 'H4',\n 'H3K27ac',\n 'H4K20me3',\n 'DLL3',\n 'cleaved H3',\n 'H3K9ac',\n 'H1.0',\n 'CD24',\n 'H3K27me3',\n 'H3K27M',\n 'H3K9me3',\n 'CD44',\n 'Ki-67',\n 'CXCR4',\n 'pH3[S28]',\n 'H1.3/4/5']\n\n\n\nNames_UMAP=[\n 'H3',\n 'H3K36me3',\n 'H3K4me3',\n 'H3K79me2',\n 'yH2A.X',\n 'H3K36me2',\n 'H4K16ac',\n 'H2Aub',\n 'H3K4me1',\n 'H3.3',\n 'H3K64ac',\n 'H4',\n 'H3K27ac',\n 'H4K20me3',\n 'cleaved H3',\n 'H3K9ac',\n 'H1.0',\n 'H3K27me3',\n 'H3K27M',\n 'H3K9me3',\n 'pH3[S28]',\n]\n\n\n\nplt.figure(figsize=(6, 5))\n\nGateColumns=['H3.3','H4','H3']\n\n \nC16=C16[(C16[GateColumns]>5).all(axis=1)]\n\nGateColumns=[\n 'H3',\n 'H3K4me3',\n 'H3K79me2',\n 'yH2A.X',\n 'H3K36me2',\n 'H4K16ac',\n 'H2Aub',\n 'H3K4me1',\n 'H3.3',\n 'H3K64ac',\n 'H4',\n 'H3K27ac',\n 'H4K20me3',\n 'H3K9ac',\n 'H3K27me3',\n 'H3K9me3',]\n\n\n\nC16=C16[(C16[GateColumns]>0).all(axis=1)]\n\n\nthreshold=2\nlC16=len(C16)\nprint(\"C16\")\nfor col in C16.columns:\n lG= np.count_nonzero(C16[col]>threshold) \n print(\"%-30s %d %5.3f\" % (col,lG,lG/lC16*100.)) \nprint(\" \")\n\n\n\n\nC16mask=C16[NamesAll]0:\n N_Neurons=N_Neurons+1\nn_encoder1 = np.int(N_Neurons / 2) \nn_encoder2 = N_Neurons\nn_latent = 10\nn_decoder2 = N_Neurons \nn_decoder1 = np.int(N_Neurons / 2)\n\n# reg = MLPRegressor(hidden_layer_sizes = (n_encoder1, n_encoder2, n_latent, n_decoder2, n_decoder1), \n# activation = 'tanh', \n# solver = 'adam', \n# learning_rate_init = 0.0005, \n# max_iter = 2000, \n# tol = 0.0000001, \n# verbose = True)\n\n# train_x=data_scaled\n\n\n\n# reg.fit(train_x, train_x)\n\n\n# def encoder(data):\n# data = np.asmatrix(data)\n \n# encoder1 = data*reg.coefs_[0] + reg.intercepts_[0]\n# encoder1 = (np.exp(encoder1) - np.exp(-encoder1))/(np.exp(encoder1) + np.exp(-encoder1))\n \n# encoder2 = encoder1*reg.coefs_[1] + reg.intercepts_[1]\n# encoder2 = (np.exp(encoder2) - np.exp(-encoder2))/(np.exp(encoder2) + np.exp(-encoder2))\n \n# latent = encoder2*reg.coefs_[2] + reg.intercepts_[2]\n# latent = (np.exp(latent) - np.exp(-latent))/(np.exp(latent) + np.exp(-latent))\n \n# return np.asarray(latent)\n\n\n\n# eout=encoder(data_scaled)\n\n# X_2d=draw_umap(eout,cc=CAll['H3K27M'],min_dist=0.01)\n\n\nfor NN in NamesAll:\n Var=NN\n TSNEVar=NN\n cc=CAll[TSNEVar]#[mask]\n plt.figure(figsize=(6, 5))\n plt.scatter(X_2d[:,0],X_2d[:,1],s=2,\n c=cc, cmap=plt.cm.jet)\n cmap = matplotlib.cm.get_cmap('jet')\n plt.colorbar()\n plt.clim(-3.5,3.5)\n mask=CAllmask[TSNEVar]==True\n rgba = cmap(-10)\n plt.scatter(X_2d[mask][:,0],X_2d[mask][:,1],s=2,\n color=rgba) \n plt.title(TSNEVar+\" C16 Epigenetic\")\n\n\n\nCAll=CAll.assign(umap0=X_2d[:,0])\nCAll=CAll.assign(umap1=X_2d[:,1])\nlab=dbscan_plot(X_2d,eps=0.25,min_samples=50)\nCAll=CAll.assign(clust=lab)\n\n\n\nplt.figure(figsize=(6, 5))\nmask=CAll.clust==0\nplt.scatter(CAll[mask].umap0,CAll[mask].umap1,s=2,\n color='darkorange',label='Mutant (C16) - H3K27M High')\nmask=CAll.clust==1\nplt.scatter(CAll[mask].umap0,CAll[mask].umap1,s=2,\n color='dimgray',label='Mutant (C16) - H3K27M Low')\nplt.legend(markerscale=5,fontsize=15)\nplt.title(\"C16 AE + UMAP\")\nplt.show()\n","sub_path":"C15C16_Autoencoder copy.py","file_name":"C15C16_Autoencoder copy.py","file_ext":"py","file_size_in_byte":10155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"367909892","text":"from Store_app.models import Variation\nfrom .models import Cart, CartItem\nfrom django.http.response import HttpResponse\nfrom django.http import JsonResponse\n\ndef _get_session_id(request): # get the session id from the current session\n session_id = request.session.session_key\n return session_id\n\ndef get_cart(request):\n session_id = _get_session_id(request)\n if not session_id:\n return HttpResponse('You need to login first. The login page will come out soon')\n try:\n cart = Cart.objects.get(cart_id=session_id)\n except Cart.DoesNotExist:\n cart = Cart(cart_id=session_id)\n cart.save()\n return cart\n\ndef get_total_tax_grandTotal(cart):\n cart_items = CartItem.objects.filter(cart=cart, is_active=True) \n total = 0\n for cart_item in cart_items:\n total += cart_item.get_subTotal()\n tax = get_tax(total)\n\n return {\n \"total\": total,\n \"tax\": tax,\n \"grand_total\": total + tax\n }\n\ndef get_quantity(cart):\n cart_items = CartItem.objects.filter(cart=cart, is_active=True) \n quantiy = 0\n for cart_item in cart_items:\n quantiy += cart_item.quantity\n return quantiy\n\n\ndef get_tax(total):\n return (2*total)/100\n\ndef response_to_CartPage(cart, cart_item):\n # calculate total, price, and grand total the return them as JSON\n total_tax_grandTotal = get_total_tax_grandTotal(cart)\n response_body = {\n 'sub_total': cart_item.get_subTotal(),\n 'quantity': get_quantity(cart),\n **total_tax_grandTotal\n }\n return JsonResponse(response_body)\n\ndef get_queryParameter(request):\n queryParameter_dict ={}\n for item in request.GET:\n queryParameter_dict[item] = request.GET.get(item)\n return queryParameter_dict\n\ndef check_existed_variationInCart(request, cart_items):\n queryParameter_dict = get_queryParameter(request)\n for cart_item in cart_items: \n variation_dict = cart_item.get_variation_dict() # 1 for loop\n if queryParameter_dict == variation_dict :\n return cart_item\n return\n\ndef create_VariationOfProduct(request,product, cart_item):\n variation_dict = get_queryParameter(request)\n variation_object_list = []\n for key,value in variation_dict.items():\n variation = Variation.objects.get(\n product=product, \n variation_category = key,\n variation_value = value\n )\n variation_object_list.append(variation)\n cart_item.variation.add(*variation_object_list)\n return cart_item\n","sub_path":"Carts_app/helper_function.py","file_name":"helper_function.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"594186099","text":"#'////////////////////////////////////////////////////////////////////////////\r\n#' FILE: novelomics_mapping_00_lookups.py\r\n#' AUTHOR: David Ruvolo\r\n#' CREATED: 2021-05-05\r\n#' MODIFIED: 2021-05-06\r\n#' PURPOSE: process reference entities for Novel Omics data\r\n#' STATUS: in.progress\r\n#' PACKAGES: os, molgenis.client,\r\n#' COMMENTS: This file should be run before the main novelomics processing\r\n#' script.\r\n#'////////////////////////////////////////////////////////////////////////////\r\n\r\nimport os # for local testing only\r\nimport json\r\nimport molgenis.client as molgenis\r\n\r\n# set token\r\n# os.environ['molgenisToken'] = ''\r\n\r\n# @title rd3_extra\r\n# @param rd3 molgenis session\r\nclass molgenis_extra(molgenis.Session):\r\n def update_table(self, data, entity):\r\n for d in range(0, len(data), 1000):\r\n response = self._session.post(\r\n url=self._url + 'v2/' + entity,\r\n headers=self._get_token_header_with_content_type(),\r\n data=json.dumps({'entities': data[d:d+1000]})\r\n )\r\n if response.status_code == 201:\r\n print(\"Imported batch \" + str(d) +\r\n \" successfully (\" + str(response.status_code) + \")\")\r\n else:\r\n print(\"Failed to import batch \" + str(d) +\r\n \" (\" + str(response.status_code) + \")\")\r\n\r\n# @title flatten attribute\r\n# @description pull values from a specific attribute\r\n# @param data list of dict\r\n# @param name of attribute to flatten\r\n# @param distinct if TRUE, return unique cases only\r\n# @return a list of values\r\ndef flatten_attr(data, attr, distinct=False):\r\n out = []\r\n for d in data:\r\n tmp_attr = d.get(attr)\r\n out.append(tmp_attr)\r\n if distinct:\r\n return list(set(out))\r\n else:\r\n return out\r\n\r\n# @title Identitfy new lookup values\r\n# @description using a list of new values, determine if there are new values to update\r\n# @param lookup RD3 lookup table\r\n# @param lookup_attr the attribute to look into\r\n# @param new a list of unique values\r\n# @return a list of dictionaries of \r\ndef identify_new_lookups(lookup, lookup_attr, new):\r\n refs = flatten_attr(lookup, lookup_attr)\r\n out = []\r\n for n in new:\r\n if (n in refs) == False:\r\n out.append({'id': n, 'label': n})\r\n return out\r\n\r\n# @title Prepare Reference Types for Import\r\n# @descrition prepare new reference data for import\r\n# @param data list containing one or more dictionaries of new references\r\n# @param id_name label to apply to the ID variable (id => identifier)\r\n# @param id_var specific key to extract\r\n# @param label_name label to map to 'label' key (i.e., name => label)\r\n# @param label_var specific key to extract\r\ndef prepare_new_lookups(data, id_name='identifier', label_name='label'):\r\n out = []\r\n for d in data:\r\n new = {}\r\n new[id_name] = d\r\n new[label_name] = d\r\n out.append(new)\r\n return out\r\n\r\n# set tokens and host\r\nenv = 'dev'\r\napi = {\r\n 'host': {\r\n 'prod': 'https://solve-rd.gcc.rug.nl/api/',\r\n 'acc' : 'https://solve-rd-acc.gcc.rug.nl/api/',\r\n 'dev' : 'https://solve-rd-acc.gcc.rug.nl/api/',\r\n },\r\n 'token': {\r\n 'prod': '${molgenisToken}',\r\n 'acc': '${molgenisToken}',\r\n 'dev': os.getenv('molgenisToken') if os.getenv('molgenisToken') is not None else None\r\n }\r\n}\r\n\r\nrd3 = molgenis_extra(url=api['host'][env], token=api['token'][env])\r\n\r\n# fetch: novel omics data to process\r\nexperiment = rd3.get('rd3_portal_novelomics_experiment', batch_size=1000)\r\nshipment = rd3.get('rd3_portal_novelomics_shipment', batch_size=10000)\r\n\r\n# fetch: reference entities\r\nrd3_filetypes = rd3.get('rd3_typeFile')\r\nrd3_seqtypes = rd3.get('rd3_seqType')\r\nrd3_organisations = rd3.get('rd3_organisation')\r\nrd3_ERN = rd3.get('rd3_ERN')\r\n\r\n\r\n# process file types, if there are new records add to lookup table\r\nprint('References: Looking for new filetypes...')\r\nnovelomics_filetypes = flatten_attr(experiment, 'file_type', distinct=True)\r\nnew_rd3_filetypes = identify_new_lookups(rd3_filetypes, 'identifier', novelomics_filetypes)\r\n\r\nif len(new_rd3_filetypes):\r\n print('Identified new file type references:', len(new_rd3_filetypes))\r\n filetypes_to_upload = prepare_new_lookups(data = new_rd3_filetypes)\r\n print('Importing new file type lookups. (update labels manually)')\r\n print(filetypes_to_upload)\r\n rd3.update_table(data=filetypes_to_upload, entity='rd3_typeFile')\r\nelse:\r\n print('No new filetypes')\r\n\r\n\r\n# process: sequenceTypes\r\n# using the reference entity `seqType`, identify potential new cases in the\r\n# experiment staging entity. \r\nprint('References: Looking for new sequencing types...')\r\nnovelomics_seqtypes = flatten_attr(experiment, 'library_strategy', distinct=True)\r\nnew_rd3_seqtypes = identify_new_lookups(rd3_seqtypes, 'identifier', novelomics_seqtypes)\r\n\r\nif len(new_rd3_seqtypes):\r\n print('Identified new seqType references:', len(new_rd3_seqtypes))\r\n seqtypes_to_upload = prepare_new_lookups(data=new_rd3_seqtypes)\r\n print('Importing new seqType lookups. (Update labels manually)')\r\n print(seqtypes_to_upload)\r\n rd3.update_table(data=seqtypes_to_upload, entity='rd3_seqType')\r\nelse:\r\n print('No new sequencing types identified')\r\n\r\n\r\n# process Organisation\r\nprint(\"References: Looking for new submitting organisations...\")\r\norganisation_types = flatten_attr(shipment, 'organisation', distinct=True)\r\nnew_organisation_types = identify_new_lookups(rd3_organisations, 'name', organisation_types)\r\n\r\nif len(new_organisation_types):\r\n print('Identified new organisations', len(new_organisation_types))\r\n orgs_to_upload = prepare_new_lookups(data=new_organisation_types, id_name='name', label_name='identifier')\r\n print('Importing new `organisations` (change labels manually)')\r\n print(orgs_to_upload)\r\n rd3.update_table(data = orgs_to_upload, entity='rd3_organisation')\r\nelse:\r\n print('No new organistations identified')\r\n\r\n\r\n# process: European Reference Networks\r\nprint(\"References: Looking for new ERNs\")\r\nern_types = flatten_attr(shipment, 'ERN', distinct=True)\r\nnew_ern_types = identify_new_lookups(rd3_ERN, 'identifier', ern_types)\r\n\r\nif len(new_ern_types):\r\n print('Identified new ERNs', len(new_ern_types))\r\n print(new_ern_types)\r\n print('Please fix these before importing...')\r\nelse:\r\n print('No new ERN types to add')","sub_path":"python/novelomics/novelomics_mapping_00_lookups.py","file_name":"novelomics_mapping_00_lookups.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"610244310","text":"# -*- coding: utf-8 -*-\r\n# by digiteng...08.2020\r\n# \r\nfrom Renderer import Renderer\r\nfrom enigma import ePixmap, eTimer, ePicLoad\r\nfrom Components.AVSwitch import AVSwitch\r\nfrom Components.Pixmap import Pixmap\r\nfrom Components.config import config\r\nimport re\r\nfrom Tools.Directories import fileExists\r\n\r\ntry:\r\n\tpathLoc = config.plugins.xtraEvent.loc.value\r\nexcept:\r\n\tpass\r\n\r\nclass xtraPoster(Renderer):\r\n\r\n\tdef __init__(self):\r\n\t\tRenderer.__init__(self)\r\n\t\tself.delayPicTime = 100\r\n\t\tself.timer = eTimer()\r\n\t\tself.timer.callback.append(self.showPicture)\r\n\t\t\r\n\tdef applySkin(self, desktop, parent):\r\n\t\tattribs = self.skinAttributes[:]\r\n\t\tfor attrib, value in self.skinAttributes:\r\n\t\t\tif attrib == 'delayPic': # delay time(ms) for poster showing...\r\n\t\t\t\tself.delayPicTime = int(value)\r\n\t\tself.skinAttributes = attribs\r\n\t\treturn Renderer.applySkin(self, desktop, parent)\r\n\r\n\tGUI_WIDGET = ePixmap\r\n\tdef changed(self, what):\r\n\t\tif not self.instance:\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\tif what[0] != self.CHANGED_CLEAR:\r\n\t\t\t\tself.timer.start(self.delayPicTime, True)\r\n\r\n\tdef showPicture(self):\r\n\t\tevnt = ''\r\n\t\tpstrNm = ''\r\n\t\tevntNm = ''\r\n\t\ttry:\r\n\t\t\tevent = self.source.event\r\n\t\t\tif event:\r\n\t\t\t\tevnt = event.getEventName()\r\n\t\t\t\tevntNm = re.sub(\"([\\(\\[]).*?([\\)\\]])|(: odc.\\d+)|(\\d+: odc.\\d+)|(\\d+ odc.\\d+)|(:)|( -(.*?).*)|(,)|!\", \"\", evnt).rstrip()\r\n\t\t\t\tpstrNm = \"{}xtraEvent/poster/{}.jpg\".format(pathLoc, evntNm)\r\n\t\t\t\tif fileExists(pstrNm):\r\n\t\t\t\t\tsize = self.instance.size()\r\n\t\t\t\t\tself.picload = ePicLoad()\r\n\t\t\t\t\tsc = AVSwitch().getFramebufferScale()\r\n\t\t\t\t\tif self.picload:\r\n\t\t\t\t\t\tself.picload.setPara((size.width(), size.height(), sc[0], sc[1], False, 1, '#00000000'))\r\n\t\t\t\t\tresult = self.picload.startDecode(pstrNm, 0, 0, False)\r\n\t\t\t\t\tif result == 0:\r\n\t\t\t\t\t\tptr = self.picload.getData()\r\n\t\t\t\t\t\tif ptr != None:\r\n\t\t\t\t\t\t\tself.instance.setPixmap(ptr)\r\n\t\t\t\t\t\t\tself.instance.show()\r\n\t\t\t\t\tdel self.picload\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.instance.hide()\r\n\t\t\telse:\r\n\t\t\t\tself.instance.hide()\r\n\t\t\treturn\r\n\t\texcept:\r\n\t\t\tpass\r\n","sub_path":"xtraEvent plugin/Renderer/xtraPoster.py","file_name":"xtraPoster.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"206016030","text":"import numpy as np\nimport torch\n\ndef train_data():\n\t\t\t\n context = open(\"./data/train/train.txt.source.txt\").readlines()\n for i in range(len(context)):\n context[i] = context[i].split()\n\n question = open('./data/train/train.txt.target.txt').readlines()\n for i in range(len(question)):\n question[i] = question[i].split()\n\n ans_pos = open('./data/train/train.txt.bio').readlines()\n answer = []\n answer_pointer = []\n for i in range(len(ans_pos)):\n answer.append([])\n answer_pointer.append([])\n ans_pos[i] = ans_pos[i].split()\n ans_s = -1\n ans_e = -1\n for j in range(len(ans_pos[i])):\n \n if ans_pos[i][j] != 'O' :\n answer[i].append(context[i][j])\n ans_e = j\n if ans_s == -1:\n ans_s = j\n answer_pointer[i].append(j)\n answer_pointer[i].append(ans_e) \n\n return context, question, answer, answer_pointer\n\ndef val_data():\n\t\t\t\n context = open(\"./data/dev/dev.txt.shuffle.dev.source.txt\").readlines()\n for i in range(len(context)):\n context[i] = context[i].split()\n\n question = open('./data/dev/dev.txt.shuffle.dev.target.txt').readlines()\n for i in range(len(question)):\n question[i] = question[i].split()\n\n ans_pos = open('./data/dev/dev.txt.shuffle.dev.bio').readlines()\n answer = []\n answer_pointer = []\n for i in range(len(ans_pos)):\n answer.append([])\n answer_pointer.append([])\n ans_pos[i] = ans_pos[i].split()\n ans_s = -1\n ans_e = -1\n for j in range(len(ans_pos[i])):\n \n if ans_pos[i][j] != 'O' :\n answer[i].append(context[i][j])\n ans_e = j\n if ans_s == -1:\n ans_s = j\n answer_pointer[i].append(j)\n answer_pointer[i].append(ans_e) \n\n return context, question, answer, answer_pointer\n\ndef embedding_weight():\n\tf = open('glove.6B.100d.txt').readlines()\n\tdim = len(f[0].split()) - 1\n\tweight = []\n\tword2id = {}\n\tid2word = []\n\n\tnp.random.seed(7)\n\t# pad, sos, eos, unk----------\n\n\tword2id['PAD'] = 0\n\tword2id['SOS'] = 1\n\tword2id['EOS'] = 2\n\tword2id['UNK'] = 3\n\tid2word.append('PAD')\n\tid2word.append('S0S')\n\tid2word.append('EOS')\n\tid2word.append('UNK')\n\n\tweight.append(np.random.randn(dim))\n\tweight.append(np.random.randn(dim))\n\tweight.append(np.random.randn(dim))\n\tweight.append(np.random.randn(dim))\n\t# pad, sos, eos, unk --------\n\n\n\t\n\tfor i in range(len(f)):\n\t\tf[i] = f[i].split()\n\t\tvector_element = np.array(f[i][1:]).astype(np.float32)\n\t\tword2id[f[i][0]] = int(i+4)\n\t\tid2word.append(f[i][0])\n\t\tweight.append(vector_element)\n\tweight = np.array(weight).reshape(-1, dim)\n\t#print(weight.shape)\n\t\n\tnp.save('weight.npy', weight)\n\tnp.save('word2id.npy', word2id)\n\tnp.save('id2word.npy', id2word)\n \ndef max_length(x):\n max_len = 0\n for i, sentence in enumerate(x):\n if len(sentence) > max_len:\n max_len = len(sentence)\n return max_len\n\ndef data_reduction(val=False):\n \n context, question, answer, answer_pointer = train_data()\n if val:\n context, question, answer, answer_pointer = val_data()\n count = 0\n index_c = []\n for i in range(len(context)):\n if len(context[i]) <= 100:\n count+=1\n index_c.append(i)\n #print(count/len(context)) \n count = 0\n index_q = []\n for i in range(len(question)):\n if len(question[i]) <= 20:\n count+=1\n index_q.append(i)\n #print(count/len(question))\n \n count = 0\n index_a = []\n for i in range(len(answer)):\n if len(answer[i]) <= 20:\n count+=1\n index_a.append(i)\n #print(count/len(answer))\n \n print(len(set(index_c)&set(index_q)&set(index_a))/len(context))\n set_i = set(index_c)&set(index_q)&set(index_a)\n index = [i for i in set_i]\n context_new = []\n question_new = []\n answer_new = []\n answer_pointer_new = []\n \n for i in range(len(index)):\n context_new.append(context[index[i]])\n question_new.append(question[index[i]])\n answer_new.append(answer[index[i]])\n answer_pointer_new.append(answer_pointer[index[i]])\n \n \n return context_new, question_new, answer_new, answer_pointer_new\ndef mask(sentences_ids):\n mask = np.zeros(sentences_ids.shape)\n for i in range(len(sentences_ids)):\n for j in range(len(sentences_ids[0])):\n if sentences_ids[i][j] != 0:\n mask[i][j] = 1\n return mask\n \ndef id_sentence(sentences, max_len, shortlist=False):\n ids= []\n if False == shortlist:\n \n\n word2id = np.load(\"word2id.npy\").item()\n\n for i, sentence in enumerate(sentences):\n ids.append(np.zeros(max_len))\n for j, word in enumerate(sentence):\n if word in word2id:\n ids[i][j] = word2id[word]\n else:\n ids[i][j] = word2id[\"UNK\"]\n ids[i][len(sentence)] = int(2)\n \n ids = np.array(ids).reshape(-1, max_len).astype(np.int32)\n else:\n \n\n word2id = np.load(\"word2id_shortlist.npy\").item()\n\n for i, sentence in enumerate(sentences):\n ids.append(np.zeros(max_len))\n for j, word in enumerate(sentence):\n if word in word2id:\n ids[i][j] = word2id[word]\n else:\n ids[i][j] = word2id[\"UNK\"]\n ids[i][len(sentence)] = int(2)\n\n ids = np.array(ids).reshape(-1, max_len).astype(np.int32)\n \n return ids\ndef dynamic_id_sentence(sentences, shortlist=False): # different length\n ids = []\n word2id = None\n if shortlist:\n word2id = np.load(\"word2id_shortlist.npy\").item()\n else:\n word2id = np.load(\"word2id.npy\").item()\n \n for i, sentence in enumerate(sentences):\n ids.append([])\n for j, word in enumerate(sentence):\n if word in word2id:\n ids[i].append(word2id[word])\n else:\n ids[i].append(word2id[\"UNK\"])\n ids[i].append(int(2)) \n return ids\n \ndef shortlist():\n \n context, question, answer, answer_pointer = train_data()\n \n count = {}\n\n for i, sentence in enumerate(question):\n for j , word in enumerate(sentence):\n if word not in count:\n count[word] = 0\n else:\n count[word] += 1\n \n x = sorted(count.items(), key = lambda item:item[1]) #(word, num)\n \n x_top2000 = x[-2000:]\n \n word2id = {}\n word2id['PAD'] = 0\n word2id['SOS'] = 1\n word2id['EOS'] = 2\n word2id['UNK'] = 3\n id2word=['PAD', 'SOS', 'EOS', 'UNK']\n \n weight = np.zeros((2004, 100))\n\n \n word2id_encoder = np.load(\"word2id.npy\").item()\n weight_encoder = np.load(\"weight.npy\")\n \n for i in range(4):\n weight[i] = weight_encoder[i]\n print(weight.shape)\n np.random.seed(0)\n i = 3\n for word, item in x_top2000:\n i+=1\n word2id[word] = i\n id2word.append(word)\n \n if word in word2id_encoder:\n i_encoder = word2id_encoder[word]\n weight[i] = weight_encoder[i_encoder]\n else:\n weight[i] = np.random.randn(100)\n \n np.save('word2id_shortlist.npy', word2id)\n np.save('weight_shortlist.npy', weight)\n np.save('id2word_shortlist.npy', id2word)","sub_path":"data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":7481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"162305602","text":"'''\n\nLink : https://leetcode.com/problems/letter-combinations-of-a-phone-number/description/\n\nQuestion 17. Letter Combinations of a Phone Number\n\nGiven a digit string, return all possible letter combinations that the number could represent.\n\nA mapping of digit to letters (just like on the telephone buttons) is given below.\n\n2 - abc, 3-def, 4- ghi, 5-jkl, 6-mno, 7-pqrs, 8-tuv, 9-wxyz\n\nInput:Digit string \"23\"\nOutput: [\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"].\nNote:\nAlthough the above answer is in lexicographical order, your answer could be in any order you want.\n'''\n\n\n'''\nSolution:\n1. Backtrack : \n - Time Complexity : O(n^2)\n - Run Time : 35ms\n'''\n\n\nclass Solution(object):\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n\n ans = []\n l = ['', '', 'abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']\n if ('#' in digits) or ('1' in digits) or ('*' in digits):\n return ans\n ans = self.get_combinations(digits, l, ans, [], 0)\n return ans\n\n def get_combinations(self, digits, l, ans, temp_ans, index):\n if len(temp_ans) == 0 and len(digits) == 0:\n return ans\n elif len(digits) == index and len(temp_ans) == len(digits):\n ans.append(''.join(temp_ans))\n return ans\n for i in xrange(index, len(digits)):\n string = l[int(digits[i])]\n for j in string:\n temp_ans.append(j)\n ans = self.get_combinations(digits, l, ans, temp_ans, i + 1)\n temp_ans.pop()\n return ans","sub_path":"Algorithms/LeetCode/17-LetterCombinationsofaPhoneNumber.py","file_name":"17-LetterCombinationsofaPhoneNumber.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"95721512","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 Chris Caron \n# All rights reserved.\n#\n# This code is licensed under the MIT License.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files(the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions :\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport re\nimport io\nimport os\nfrom .ConfigBase import ConfigBase\nfrom ..common import ConfigFormat\nfrom ..common import ConfigIncludeMode\nfrom ..AppriseLocale import gettext_lazy as _\n\n\nclass ConfigFile(ConfigBase):\n \"\"\"\n A wrapper for File based configuration sources\n \"\"\"\n\n # The default descriptive name associated with the service\n service_name = _('Local File')\n\n # The default protocol\n protocol = 'file'\n\n # Configuration file inclusion can only be of the same type\n allow_cross_includes = ConfigIncludeMode.STRICT\n\n def __init__(self, path, **kwargs):\n \"\"\"\n Initialize File Object\n\n headers can be a dictionary of key/value pairs that you want to\n additionally include as part of the server headers to post with\n\n \"\"\"\n super(ConfigFile, self).__init__(**kwargs)\n\n # Store our file path as it was set\n self.path = os.path.abspath(os.path.expanduser(path))\n\n # Update the config path to be relative to our file we just loaded\n self.config_path = os.path.dirname(self.path)\n\n return\n\n def url(self, privacy=False, *args, **kwargs):\n \"\"\"\n Returns the URL built dynamically based on specified arguments.\n \"\"\"\n\n # Prepare our cache value\n if isinstance(self.cache, bool) or not self.cache:\n cache = 'yes' if self.cache else 'no'\n\n else:\n cache = int(self.cache)\n\n # Define any URL parameters\n params = {\n 'encoding': self.encoding,\n 'cache': cache,\n }\n\n if self.config_format:\n # A format was enforced; make sure it's passed back with the url\n params['format'] = self.config_format\n\n return 'file://{path}{params}'.format(\n path=self.quote(self.path),\n params='?{}'.format(self.urlencode(params)) if params else '',\n )\n\n def read(self, **kwargs):\n \"\"\"\n Perform retrieval of the configuration based on the specified request\n \"\"\"\n\n response = None\n\n try:\n if self.max_buffer_size > 0 and \\\n os.path.getsize(self.path) > self.max_buffer_size:\n\n # Content exceeds maximum buffer size\n self.logger.error(\n 'File size exceeds maximum allowable buffer length'\n ' ({}KB).'.format(int(self.max_buffer_size / 1024)))\n return None\n\n except OSError:\n # getsize() can throw this acception if the file is missing\n # and or simply isn't accessible\n self.logger.error(\n 'File is not accessible: {}'.format(self.path))\n return None\n\n # Always call throttle before any server i/o is made\n self.throttle()\n\n try:\n # Python 3 just supports open(), however to remain compatible with\n # Python 2, we use the io module\n with io.open(self.path, \"rt\", encoding=self.encoding) as f:\n # Store our content for parsing\n response = f.read()\n\n except (ValueError, UnicodeDecodeError):\n # A result of our strict encoding check; if we receive this\n # then the file we're opening is not something we can\n # understand the encoding of..\n\n self.logger.error(\n 'File not using expected encoding ({}) : {}'.format(\n self.encoding, self.path))\n return None\n\n except (IOError, OSError):\n # IOError is present for backwards compatibility with Python\n # versions older then 3.3. >= 3.3 throw OSError now.\n\n # Could not open and/or read the file; this is not a problem since\n # we scan a lot of default paths.\n self.logger.error(\n 'File can not be opened for read: {}'.format(self.path))\n return None\n\n # Detect config format based on file extension if it isn't already\n # enforced\n if self.config_format is None and \\\n re.match(r'^.*\\.ya?ml\\s*$', self.path, re.I) is not None:\n\n # YAML Filename Detected\n self.default_config_format = ConfigFormat.YAML\n\n # Return our response object\n return response\n\n @staticmethod\n def parse_url(url):\n \"\"\"\n Parses the URL so that we can handle all different file paths\n and return it as our path object\n\n \"\"\"\n\n results = ConfigBase.parse_url(url, verify_host=False)\n if not results:\n # We're done early; it's not a good URL\n return results\n\n match = re.match(r'[a-z0-9]+://(?P[^?]+)(\\?.*)?', url, re.I)\n if not match:\n return None\n\n results['path'] = ConfigFile.unquote(match.group('path'))\n return results\n","sub_path":"apprise/config/ConfigFile.py","file_name":"ConfigFile.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503809301","text":"from django import template\nfrom django.urls import reverse\n\nregister = template.Library()\n\n\n@register.inclusion_tag('menu.html', takes_context=True)\ndef show_menu(context):\n menu_items = [\n {\n 'path': '/2017/',\n # 'icon': 'fa-home',\n 'title': 'Home',\n }, {\n 'path': reverse('about'),\n 'title': 'About'\n }, {\n 'path': reverse('team_list'),\n 'title': 'Team'\n },\n {\n 'path': reverse('sponsors_list'),\n 'title': 'Sponsors'\n }, {\n 'path': reverse('proposal_form', kwargs=dict(key='financial-aid')),\n 'title': 'Financial Aid'\n }, {\n 'path': reverse('about_code'),\n 'title': 'Code of Conduct'\n },\n\n # {\n # 'path': reverse('about'),\n # 'title': 'Event',\n # 'menu': [\n #\n # ]\n # }, #{\n # 'path': reverse('proposals'),\n # 'title': 'Call for Papers',\n # }\n ]\n\n path = context['request'].path\n\n for root in menu_items:\n if 'menu' not in root:\n continue\n\n for child in root['menu']:\n if child['path'] == path:\n root['selected'] = child['selected'] = True\n\n return {\n 'menu_items': menu_items\n }\n","sub_path":"pyconcz_2017/common/templatetags/menu_tags.py","file_name":"menu_tags.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"53057719","text":"from tkinter import *\r\nfrom tkinter import messagebox as mb\r\nfrom math import sqrt, fabs\r\n\r\npoints = []\r\npoints_normal = []\r\n\r\n\r\n\r\nCANVAS_WIDTH = 651\r\nCANVAS_HEIGHT = 551\r\nRECTANGLE = 1\r\nWORK_WIDTH = CANVAS_WIDTH - RECTANGLE\r\nWORK_HEIGHT = CANVAS_HEIGHT - RECTANGLE\r\nPREC = 2\r\n\r\ndef clearCanvas() -> None:\r\n points.clear()\r\n points_normal.clear()\r\n canvas.delete(\"all\")\r\n canvas.create_line(0, WORK_HEIGHT/2, WORK_WIDTH, WORK_HEIGHT/2, fill='black') # OX\r\n canvas.create_line(WORK_WIDTH/2, 0, WORK_WIDTH/2, WORK_HEIGHT, fill='black') # OY\r\n\r\n for i in range(5, WORK_WIDTH+1, 10):\r\n canvas.create_line(i, WORK_HEIGHT/2-2, i, WORK_HEIGHT/2+2, fill='black')\r\n\r\n for i in range(5, WORK_HEIGHT+1, 10):\r\n canvas.create_line(WORK_WIDTH/2-2, i, WORK_WIDTH/2+2, i, fill='black')\r\n\r\n canvas.create_text(WORK_WIDTH-10, WORK_HEIGHT/2+10, text='X')\r\n canvas.create_text(WORK_WIDTH/2-10, 10, text='Y')\r\n canvas.create_text(WORK_WIDTH/2-10, WORK_HEIGHT/2+10, text='O')\r\n canvas.create_rectangle(2, 2, CANVAS_WIDTH, CANVAS_HEIGHT, width=RECTANGLE)\r\n\r\n\r\ndef getLen(A: tuple, B: tuple) -> float:\r\n return sqrt((B[0]-A[0])*(B[0]-A[0]) + (B[1]-A[1])*(B[1]-A[1]))\r\n\r\n\r\ndef getLambda(A: tuple, B: tuple, C: tuple) -> float:\r\n return getLen(A, B) / getLen(A, C)\r\n\r\n\r\ndef getSquare(A: tuple, B: tuple, C: tuple) -> float:\r\n det = (A[0]-C[0])*(B[1]-C[1]) - (B[0]-C[0])*(A[1]-C[1])\r\n return fabs(det/2)\r\n\r\n\r\ndef getDiffSquare(A: tuple, B: tuple, C: tuple, L: tuple) -> float:\r\n return abs(getSquare(A, B, L) - getSquare(A, C, L))\r\n\r\n\r\ndef getCoord(A: tuple, B: tuple, lmbd: float) -> tuple:\r\n x = round((A[0] + lmbd * B[0]) / (1 + lmbd), 1)\r\n y = round((A[1] + lmbd * B[1]) / (1 + lmbd), 1)\r\n return (x, y,)\r\n\r\n\r\ndef checkError(x: str, y: str) -> bool:\r\n try:\r\n x = float(x)\r\n y = float(y)\r\n return True\r\n except:\r\n mb.showerror('Ошибка', 'Введены некорректные данные!')\r\n return False\r\n\r\n\r\ndef addPoint(event, src: str = 'entry', point: tuple = ()) -> None:\r\n global points\r\n\r\n if src == 'entry':\r\n x, y = eAddX.get(), eAddY.get()\r\n if checkError(x, y):\r\n point = denormalizeCoord(float(x), float(y))\r\n point_n = (float(x), float(y),)\r\n eAddX.delete(0, END)\r\n eAddY.delete(0, END)\r\n else:\r\n return\r\n else:\r\n point_n = normalizeCoord(point[0], point[1])\r\n\r\n if (0 <= point[0] <= WORK_WIDTH) and (0 <= point[1] <= WORK_HEIGHT):\r\n if point not in points:\r\n points.append(point)\r\n points_normal.append(point_n)\r\n else:\r\n mb.showwarning('Предупреждение', 'Данная точка уже была введена')\r\n return\r\n else:\r\n mb.showerror('Ошибка', 'Некорректный диапазон!')\r\n return\r\n\r\n if src == 'entry':\r\n canvas.create_line(point[0], point[1], point[0], point[1], width=3, fill='red',\r\n capstyle=ROUND, smooth=TRUE, splinesteps=1)\r\n\r\n eAmountPoints.configure(state=NORMAL)\r\n eAmountPoints.delete(0, END)\r\n eAmountPoints.insert(0, str(len(points)))\r\n eAmountPoints.configure(state=DISABLED)\r\n\r\n\r\ndef solution() -> None:\r\n ans_points = []\r\n ans_points_n = []\r\n ans_diff = -1\r\n ans_diff_n = -1\r\n L_best = tuple()\r\n L_best_n = tuple()\r\n \r\n\r\n if len(points) < 3:\r\n mb.showerror('Ошибка', 'Введите хотя бы три точки!')\r\n return\r\n\r\n for i in range(0, len(points)):\r\n for j in range(i+1, len(points)):\r\n for k in range(j+1, len(points)):\r\n curr_points = [points[i], points[j], points[k]]\r\n curr_points_n = [points_normal[i], points_normal[j], points_normal[k]]\r\n for p in range(3):\r\n A, B, C = curr_points[p], curr_points[(p + 1) % 3], curr_points[(p + 2) % 3]\r\n A_n, B_n, C_n = curr_points_n[p], curr_points_n[(p + 1) % 3], curr_points_n[(p + 2) % 3]\r\n if getSquare(A, B, C) < 1:\r\n break\r\n if ((getLen(A,B)) + getLen(A,C) - getLen(B,C) < PREC) or ((getLen(A,B)) + getLen(B,C) - getLen(A,C) < PREC) or\\\r\n ((getLen(B,C)) + getLen(A,C) - getLen(A,B) < PREC):\r\n break\r\n \r\n lmbd = getLambda(A, B, C)\r\n L = getCoord(B, C, lmbd)\r\n L_n = getCoord(B_n, C_n, lmbd)\r\n diff = getDiffSquare(A, B, C, L)\r\n diff_n = getDiffSquare(A_n, B_n, C_n, L_n)\r\n if (diff < ans_diff) or (ans_diff == -1):\r\n ans_points = [A, B, C]\r\n ans_points_n = [A_n, B_n, C_n]\r\n ans_diff = diff\r\n ans_diff_n = diff_n\r\n L_best = L\r\n L_best_n = L_n\r\n\r\n if len(ans_points) < 3:\r\n mb.showerror('Ошибка', 'Точки лежат на одной прямой!')\r\n return\r\n\r\n for q in (eBestPAX, eBestPAY, eBestPBY, eBestPCX, eBestPCY, eBestPBX, eDiff):\r\n q.configure(state=NORMAL)\r\n q.delete(0, END)\r\n\r\n Ax, Ay = ans_points[0][0], ans_points[0][1]\r\n Bx, By = ans_points[1][0], ans_points[1][1]\r\n Cx, Cy = ans_points[2][0], ans_points[2][1]\r\n Lx, Ly = L_best[0], L_best[1]\r\n\r\n canvas.create_line(Ax, Ay, Bx, By, width=1, fill='green')\r\n canvas.create_line(Cx, Cy, Bx, By, width=1, fill='green')\r\n canvas.create_line(Ax, Ay, Cx, Cy, width=1, fill='green')\r\n canvas.create_line(Ax, Ay, Lx, Ly, width=1, fill='blue')\r\n canvas.create_line(Ax, Ay, Ax, Ay, width=5, fill='red',\r\n capstyle=ROUND, smooth=TRUE, splinesteps=1)\r\n canvas.create_line(Bx, By, Bx, By, width=5, fill='red',\r\n capstyle=ROUND, smooth=TRUE, splinesteps=1)\r\n canvas.create_line(Cx, Cy, Cx, Cy, width=5, fill='red',\r\n capstyle=ROUND, smooth=TRUE, splinesteps=1)\r\n canvas.create_line(Lx, Ly, Lx, Ly, width=5, fill='red',\r\n capstyle=ROUND, smooth=TRUE, splinesteps=1)\r\n \r\n Ax_n, Ay_n = ans_points_n[0][0], ans_points_n[0][1]\r\n Bx_n, By_n = ans_points_n[1][0], ans_points_n[1][1]\r\n Cx_n, Cy_n = ans_points_n[2][0], ans_points_n[2][1]\r\n Lx_n, Ly_n = L_best_n[0], L_best_n[1]\r\n\r\n # Ax, Ay = normalizeCoord(Ax, Ay)\r\n # Bx, By = normalizeCoord(Bx, By)\r\n # Cx, Cy = normalizeCoord(Cx, Cy)\r\n # Lx, Ly = normalizeCoord(Lx, Ly)\r\n\r\n for i, j in zip((Ax_n, Ay_n, Bx_n, By_n, Cx_n, Cy_n), (eBestPAX, eBestPAY, eBestPBX, eBestPBY, eBestPCX, eBestPCY)):\r\n j.insert(0, i)\r\n\r\n ans_diff_n = getDiffSquare((Ax_n, Ay_n,), (Bx_n, By_n,), (Cx_n, Cy_n,), (Lx_n, Ly_n,))\r\n eDiff.insert(0, round(ans_diff_n, 1))\r\n\r\n for q in (eBestPAX, eBestPAY, eBestPBY, eBestPCX, eBestPCY, eBestPBX, eDiff):\r\n q.configure(state=DISABLED)\r\n\r\n\r\ndef normalizeCoord(x: int, y: int) -> (float, float,):\r\n x = round((x - WORK_WIDTH/2)*100/WORK_WIDTH, 1)\r\n y = round(-(y - WORK_HEIGHT/2)*100/WORK_HEIGHT, 1)\r\n return (x, y,)\r\n\r\n\r\ndef denormalizeCoord(x: float, y: float) -> (int, int,):\r\n x = round(x * WORK_WIDTH / 100 + WORK_WIDTH/2)\r\n y = round(-y * WORK_WIDTH / 100 + WORK_HEIGHT/2)\r\n\r\n return (x, y,)\r\n\r\n\r\ndef getPos(event):\r\n x, y = normalizeCoord(event.x, event.y)\r\n\r\n eXPos.configure(state=NORMAL)\r\n eYPos.configure(state=NORMAL)\r\n\r\n eXPos.delete(0, END)\r\n eYPos.delete(0, END)\r\n\r\n eXPos.insert(0, str(x))\r\n eYPos.insert(0, str(y))\r\n\r\n eXPos.configure(state=DISABLED)\r\n eYPos.configure(state=DISABLED)\r\n\r\n\r\ndef setPoint(event) -> None:\r\n x, y = event.x, event.y\r\n canvas.create_line(x, y, x, y, width=3, fill='red',\r\n capstyle=ROUND, smooth=TRUE, splinesteps=1)\r\n addPoint('', 'canvas', (x, y,))\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"Лабораторная работа #4\")\r\nroot.geometry(\"1000x600\")\r\nroot.resizable(0, 0)\r\n\r\n\r\nfWorkDir = Frame(root)\r\nfWorkDir.pack(side=LEFT, anchor='n')\r\n\r\nfAddPoint = Frame(fWorkDir)\r\nlAddPoint = Label(fAddPoint, text='Введите точку:', font=\"Segoe 13 bold\")\r\nlAddX = Label(fAddPoint, text='X:', font=\"Segoe 12\")\r\nlAddY = Label(fAddPoint, text='Y:', font=\"Segoe 12\")\r\neAddX = Entry(fAddPoint, font=18, width=8)\r\neAddY = Entry(fAddPoint, font=18, width=8)\r\nbAddPoint = Button(fAddPoint, text='Add', command=lambda x='entry': addPoint(\"\", x), font='Cambria 12 bold',\r\n width=10, height=2, bg='black', activebackground=\"#0B5FA4\", fg='white')\r\n\r\nfAddPoint.pack(side=TOP, anchor='w', pady=20)\r\nlAddPoint.pack(side=TOP, pady=10)\r\nlAddX.pack(side=LEFT, padx=5)\r\neAddX.pack(side=LEFT, ipady=8)\r\nlAddY.pack(side=LEFT, padx=5)\r\neAddY.pack(side=LEFT, ipady=8)\r\nbAddPoint.pack(padx=10)\r\n\r\n\r\nfAmountPoints = Frame(fWorkDir)\r\nlAmountPoints = Label(\r\n fAmountPoints, text='Точек введено:', font=\"Segoe 13 bold\")\r\neAmountPoints = Entry(fAmountPoints, font=18, width=8)\r\neAmountPoints.insert(0, len(points))\r\neAmountPoints.configure(state=DISABLED)\r\n\r\n\r\nfAmountPoints.pack(pady=5)\r\nlAmountPoints.pack()\r\neAmountPoints.pack(ipady=8)\r\n\r\nbSearch = Button(fWorkDir, text='Search', command=solution, font='Cambria 12 bold',\r\n width=20, height=2, bg='black', activebackground=\"#0B5FA4\", fg='white')\r\nbSearch.pack(pady=20)\r\n\r\n\r\nfAnswer = Frame(fWorkDir)\r\nLabel(fAnswer, text='Answer:', font=\"Segoe 12 bold\").pack(pady=5)\r\nfBestPoints = Frame(fAnswer)\r\nfAnswer.pack(pady=5)\r\nfBestPoints.pack(pady=5)\r\n\r\nfBestAPoint = Frame(fBestPoints)\r\nlBestAPoint = Label(fBestAPoint, text='A:', font=\"Segoe 12 bold\")\r\n\r\nfBestPALabel = Frame(fBestAPoint)\r\nlBestPAX = Label(fBestPALabel, text='X:', font=\"Segoe 12 bold\")\r\nlBestPAY = Label(fBestPALabel, text='Y:', font=\"Segoe 12 bold\")\r\n\r\nfBestPAEntry = Frame(fBestAPoint)\r\neBestPAX = Entry(fBestPAEntry, font=18, width=5, state=DISABLED)\r\neBestPAY = Entry(fBestPAEntry, font=18, width=5, state=DISABLED)\r\n\r\nfBestAPoint.pack(side=LEFT)\r\nlBestAPoint.pack()\r\nfBestPALabel.pack()\r\nlBestPAX.pack(side=LEFT, padx=5)\r\nlBestPAY.pack(side=LEFT, padx=5)\r\nfBestPAEntry.pack()\r\neBestPAX.pack(side=LEFT, ipady=8)\r\neBestPAY.pack(side=LEFT, ipady=8)\r\n\r\n\r\nfBestBPoint = Frame(fBestPoints)\r\nlBestBPoint = Label(fBestBPoint, text='B:', font=\"Segoe 12 bold\")\r\n\r\nfBestPBLabel = Frame(fBestBPoint)\r\nlBestPBX = Label(fBestPBLabel, text='X:', font=\"Segoe 12 bold\")\r\nlBestPBY = Label(fBestPBLabel, text='Y:', font=\"Segoe 12 bold\")\r\n\r\nfBestPBEntry = Frame(fBestBPoint)\r\neBestPBX = Entry(fBestPBEntry, font=18, width=5, state=DISABLED)\r\neBestPBY = Entry(fBestPBEntry, font=18, width=5, state=DISABLED)\r\n\r\nfBestBPoint.pack(side=LEFT)\r\nlBestBPoint.pack()\r\nfBestPBLabel.pack()\r\nlBestPBX.pack(side=LEFT, padx=5)\r\nlBestPBY.pack(side=LEFT, padx=5)\r\nfBestPBEntry.pack()\r\neBestPBX.pack(side=LEFT, ipady=8)\r\neBestPBY.pack(side=LEFT, ipady=8)\r\n\r\n\r\nfBestCPoint = Frame(fBestPoints)\r\nlBestCPoint = Label(fBestCPoint, text='C:', font=\"Segoe 12 bold\")\r\n\r\nfBestPCLabel = Frame(fBestCPoint)\r\nlBestPCX = Label(fBestPCLabel, text='X:', font=\"Segoe 12 bold\")\r\nlBestPCY = Label(fBestPCLabel, text='Y:', font=\"Segoe 12 bold\")\r\n\r\nfBestPCEntry = Frame(fBestCPoint)\r\neBestPCX = Entry(fBestPCEntry, font=18, width=5, state=DISABLED)\r\neBestPCY = Entry(fBestPCEntry, font=18, width=5, state=DISABLED)\r\n\r\nfBestCPoint.pack(side=LEFT)\r\nlBestCPoint.pack()\r\nfBestPCLabel.pack()\r\nlBestPCX.pack(side=LEFT, padx=5)\r\nlBestPCY.pack(side=LEFT, padx=5)\r\nfBestPCEntry.pack()\r\neBestPCX.pack(side=LEFT, ipady=8)\r\neBestPCY.pack(side=LEFT, ipady=8)\r\n\r\n\r\nfDiff = Frame(fAnswer)\r\nlDiff = Label(fDiff, text='Best Difference: ', font=\"Segoe 12 bold\")\r\neDiff = Entry(fDiff, font=18, width=10, state=DISABLED)\r\n\r\nfDiff.pack(pady=15)\r\nlDiff.pack(side=LEFT)\r\neDiff.pack(side=LEFT, ipady=8)\r\n\r\n\r\nfCanvas = Frame(root)\r\nfCanvas.pack(side=LEFT)\r\n\r\ncanvas = Canvas(fCanvas, width=CANVAS_WIDTH, height=CANVAS_HEIGHT, bg='white')\r\ncanvas.bind(\"\", setPoint)\r\ncanvas.bind(\"\", getPos)\r\ncanvas.pack()\r\n\r\nclearCanvas()\r\n\r\nfPosition = Frame(fCanvas)\r\n\r\nlXPos = Label(fPosition, text='X:', font=\"Segoe 7 bold\")\r\nlYPos = Label(fPosition, text='Y:', font=\"Segoe 7 bold\")\r\neXPos = Entry(fPosition, font=4, width=8, state=DISABLED)\r\neYPos = Entry(fPosition, font=4, width=8, state=DISABLED)\r\nbclearCanvas = Button(fPosition, text='Clear', command=clearCanvas, font='Cambria 12 bold',\r\n width=5, height=1, bg='black', activebackground=\"#0B5FA4\", fg='white')\r\n\r\n\r\n\r\nfPosition.pack()\r\nlXPos.pack(side=LEFT)\r\neXPos.pack(side=LEFT)\r\nlYPos.pack(side=LEFT)\r\neYPos.pack(side=LEFT)\r\nbclearCanvas.pack(side=RIGHT, anchor='e')\r\n\r\nroot.bind('', addPoint)\r\n\r\nroot.mainloop()\r\n","sub_path":"second_sem/lab_04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"52698229","text":"\nimport glob\n\nid = 0\nfamDic = {}\ntypeDic = {}\n\nfor file in glob.glob('*.elem_sorted.csv'):\n\n with open(file,'r') as f:\n next(f)\n for line in f:\n if len(line) > 3 and '###' in line:\n sp = line.split('\\t')\n id += 1\n start = sp[5]\n stop = sp[6]\n chrm = sp[4]\n fam = sp[9]\n type = sp[10]\n if chrm not in famDic:\n famDic[chrm] = {}\n\n if fam not in famDic[chrm]:\n\n famDic[chrm][fam] = [int(start)] \n else:\n famDic[chrm][fam].append(int(start))\n\n if chrm not in typeDic:\n typeDic[chrm] = {}\n if type not in typeDic[chrm]:\n\n typeDic[chrm][type] = [int(start)]\n else:\n \t \t typeDic[chrm][type].append(int(start))\n\n\nperoGenes = {}\n\nwith open('/n/holylfs03/LABS/hoekstra_lab/Lab/PUBLIC/ANNOTATIONS/Pman2.1_chr_NCBI/Pman2.1_chr_NCBI.corrected.merged-with-Apollo.Aug19.sorted.gff3','r') as f:\n for line in f:\n if '#' not in line:\n sp = line.split('\\t')\n if 'gene' in sp[2]:\n \n if int(sp[4]) > int(sp[3]):\n if sp[0] not in peroGenes:\n peroGenes[sp[0]] = [[sp[-1].split(';')[1].split('=')[-1],int(sp[3]),int(sp[4])]] \n else:\n peroGenes[sp[0]].append([sp[-1].split(';')[1].split('=')[-1],int(sp[3]),int(sp[4])])\n else:\n if sp[0] not in peroGenes:\n peroGenes[sp[0]] = [[sp[-1].split(';')[1].split('=')[-1],int(sp[4]),int(sp[3])]]\n else:\n peroGenes[sp[0]].append([sp[-1].split(';')[1].split('=')[-1],int(sp[4]),int(sp[3])])\n\nwith open('population.txt','w') as f:\n for chrm in peroGenes:\n for gene in peroGenes[chrm]:\n f.write(gene[0])\n\noutput = 'TEGenicInsertions'\n\nprint(famDic)\n\nfor chrm in peroGenes:\n for gene in peroGenes[chrm]:\n for fam in famDic[chrm]:\n for i in famDic[chrm][fam]:\n if gene[1] < i and i < gene[2]:\n with open('{0}/allTEGenicInsertions.txt'.format(output),'a') as f:\n f.write(gene[0])\n with open('{0}/fam/{1}_GenicInsertions.txt'.format(output,fam.replace('/','_')),'a') as f:\n \t \t f.write(gene[0])\n \n for type in typeDic[chrm]:\n for i in typeDic[chrm][type]:\n\n if gene[1] < i and i < gene[2]:\n with open('{0}/type/{1}_GenicInsertions.txt'.format(output,type.replace('/','_')),'a') as f:\n f.write(gene[0])\n\n","sub_path":"associateTEs.py","file_name":"associateTEs.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"116221566","text":"from re import template\nfrom kivy.app import App\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.lang import Builder\nfrom kivy.properties import StringProperty\nfrom kivy.core.window import Window\nfrom kivy.factory import Factory\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.popup import Popup\n\nimport os\nimport json\nfrom kivy.config import Config\nConfig.set('graphics', 'width', '1200')\nConfig.set('graphics', 'height', '700')\nConfig.write()\n\nBuilder.load_string('''\n:\n Label:\n text: root.text\n font_size: 15\n text_size: self.width, None\n size_hint_y: None\n height: self.texture_size[1]\n:\n BoxLayout:\n size: root.size\n pos: root.pos\n orientation: \"vertical\"\n FileChooserListView:\n id: filechooser\n path: \"C:/dissertation/causcumber/scenarios/compare_interventions/features\"\n\n BoxLayout:\n size_hint_y: None\n height: 30\n Button:\n text: \"Cancel\"\n on_release: root.cancel()\n\n Button:\n text: \"Load\"\n on_release: root.load(filechooser.path, filechooser.selection)\n \n''')\n\n\nclass displayResult(ScrollView):\n text = StringProperty('')\n\nclass LoadDialog(FloatLayout):\n load = ObjectProperty(None)\n cancel = ObjectProperty(None)\n\nclass main(App):\n\n created_file = []\n loadfile = ObjectProperty(None)\n\n def __init__(self,**kwargs):\n super(main,self).__init__(**kwargs)\n os.chdir('compare_interventions')\n\n def build(self):\n\n Window.bind(on_request_close=self.on_request_close)\n \n Layout = BoxLayout(orientation = 'vertical')\n banner = Label(text='Causcumber', size_hint=(1, 0.1))\n Layout.add_widget(banner)\n\n displayLayout = GridLayout(cols=2, width=\"600dp\")\n\n resultLayout = GridLayout(cols=1, width=\"600dp\")\n self.select_feature_file = Button(text='Select feature file', size_hint=(1, 0.1)) # Choose feature file to run\n self.select_feature_file.bind(on_press=self.show_load)\n #self.select_feature_file = selectFile()\n resultLayout.add_widget(self.select_feature_file) \n self.Result = Label(text='Result', size_hint=(1, 0.1)) # Title\n resultLayout.add_widget(self.Result)\n self.display_result = displayResult(text='') # Display result\n resultLayout.add_widget(self.display_result) \n displayLayout.add_widget(resultLayout) \n\n inputLayout = GridLayout(cols=1, width=\"600dp\")\n self.choose_input_title = Label(text='Choose different input', size_hint=(1, 0.1)) # Title\n inputLayout.add_widget(self.choose_input_title) \n self.paremeter1 = Label(text='Parameter 1', size_hint=(1, 0.1)) #modify parameter 1\n inputLayout.add_widget(self.paremeter1) \n self.input1 = TextInput(text='', size_hint=(1, 0.5), multiline=False) \n inputLayout.add_widget(self.input1)\n self.paremeter2 = Label(text='Parameter 2', size_hint=(1, 0.1)) #modify parameter 2\n inputLayout.add_widget(self.paremeter2) \n self.input2 = TextInput(text='', size_hint=(1, 0.5), multiline=False) \n inputLayout.add_widget(self.input2) \n\n self.paremeter2 = Label(text='quar_period', size_hint=(1, 0.1)) #modify parameter 2\n inputLayout.add_widget(self.paremeter2) \n self.input3 = TextInput(text='', size_hint=(1, 0.5), multiline=False) \n inputLayout.add_widget(self.input3) \n self.paremeter2 = Label(text='n_days', size_hint=(1, 0.1)) #modify parameter 2\n inputLayout.add_widget(self.paremeter2) \n self.input4 = TextInput(text='', size_hint=(1, 0.5), multiline=False) \n inputLayout.add_widget(self.input4) \n self.paremeter2 = Label(text='pop_type', size_hint=(1, 0.1)) #modify parameter 2\n inputLayout.add_widget(self.paremeter2) \n self.input5 = TextInput(text='', size_hint=(1, 0.5), multiline=False) \n inputLayout.add_widget(self.input5) \n self.paremeter2 = Label(text='pop_size', size_hint=(1, 0.1)) #modify parameter 2\n inputLayout.add_widget(self.paremeter2) \n self.input6 = TextInput(text='', size_hint=(1, 0.5), multiline=False) \n inputLayout.add_widget(self.input6) \n self.paremeter2 = Label(text='pop_infected', size_hint=(1, 0.1)) #modify parameter 2\n inputLayout.add_widget(self.paremeter2) \n self.input7 = TextInput(text='', size_hint=(1, 0.5), multiline=False) \n inputLayout.add_widget(self.input7) \n self.paremeter2 = Label(text='location', size_hint=(1, 0.1)) #modify parameter 2\n inputLayout.add_widget(self.paremeter2) \n self.input8 = TextInput(text='', size_hint=(1, 0.5), multiline=False) \n inputLayout.add_widget(self.input8) \n self.paremeter2 = Label(text='interventions', size_hint=(1, 0.1)) #modify parameter 2\n inputLayout.add_widget(self.paremeter2) \n self.input9 = TextInput(text='', size_hint=(1, 0.5), multiline=False) \n inputLayout.add_widget(self.input9) \n displayLayout.add_widget(inputLayout)\n \n\n runBehave = Button(text='Run behave', size_hint=(1, 0.1)) # Run update function \n runBehave.bind(on_press=self.update)\n displayLayout.add_widget(runBehave) \n\n saveInput = Button(text='Save input', size_hint=(1, 0.1)) # save input as new feature file\n saveInput.bind(on_press=self.save_file)\n displayLayout.add_widget(saveInput) \n\n Layout.add_widget(displayLayout)\n\n return Layout\n\n def update(self, userInput):\n if os.path.isfile(\"results.json\") == True:\n json_file = open(\"results.json\")\n outputs = json.load(json_file)\n json_file.close() \n # Convert json to string\n data = json.dumps(outputs)\n result = data\n split_data = data.split()\n result = \"\"\n word_count = 0\n for split_data in split_data:\n result += split_data + \" \"\n word_count += 1\n if word_count == 6 or \".\" in split_data:\n result += \"\\n\"\n word_count = 0\n\n self.display_result.text = result\n else:\n self.display_result.text = \"Please select a feature file\" \n\n def save_file(self, instance):\n parameter_input1 = self.input1.text\n parameter_input2 = self.input2.text\n parameter_input3 = self.input3.text\n parameter_input4 = self.input4.text\n parameter_input5 = self.input5.text\n parameter_input6 = self.input6.text\n parameter_input7 = self.input7.text\n parameter_input8 = self.input8.text\n parameter_input9 = self.input9.text\n feature_file_name = \"compare_\" + parameter_input1 + \"_\" + parameter_input2 + \".feature\" #generate file name based on input\n self.created_file.append(feature_file_name)\n os.chdir('features')\n file = open(\"feature_template.txt\",encoding=\"utf-8\")\n template = file.read()\n\n template = template.replace(\"[quar_period_place_holder]\", parameter_input3)\n template = template.replace(\"[n_days_place_holder]\", parameter_input4)\n template = template.replace(\"[pop_type_place_holder]\", \"hybrid\")\n template = template.replace(\"[pop_size_place_holder]\", parameter_input6)\n template = template.replace(\"[pop_infected_place_holder]\", parameter_input7)\n template = template.replace(\"[location_place_holder]\", \"UK\")\n template = template.replace(\"[interventions_place_holder]\", \"baseline\")\n\n file.close()\n f = open(feature_file_name, \"a\") #generate feature file with input file name\n #f.write(parameter_input1 + parameter_input2) \n f.write(template)\n f.close()\n os.chdir('..')\n\n def dismiss_popup(self):\n self._popup.dismiss()\n\n def show_load(self, instance):\n content = LoadDialog(load=self.load, cancel=self.dismiss_popup)\n self._popup = Popup(title=\"Load file\", content=content,\n size_hint=(0.9, 0.9))\n self._popup.open()\n\n def load(self, path, filename):\n filename = filename[0].replace('C:\\\\dissertation\\\\causcumber\\\\scenarios\\\\compare_interventions\\\\features\\\\', '')\n file = open(\"results.json\",\"w\")\n file.close()\n print(\"File cleaned\")\n behave_cmd = \"behave features/\"+ filename + \" --format json --outfile results.json\"\n #behave_cmd = \"behave features/\"+ filename + \" --format json --junit\"\n os.system(behave_cmd) \n self.dismiss_popup()\n \n def on_request_close(self, instance): #remove results.json and other feature file created when closing the program\n os.remove(\"results.json\")\n os.chdir('features')\n for self.created_file in self.created_file:\n os.remove(self.created_file)\n os.chdir('..')\n print(\"Closing\")\n\nFactory.register('LoadDialog', cls=LoadDialog)\nmain().run()","sub_path":"scenarios/beta.py","file_name":"beta.py","file_ext":"py","file_size_in_byte":9393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"427062130","text":"#!/usr/bin/python3\n#\n# Copyright (c) 2020 Inceptio\n#\nimport os\nimport launch\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch.substitutions import EnvironmentVariable\nfrom launch.substitutions import LaunchConfiguration\nfrom launch import LaunchDescription\nfrom launch_ros.actions import Node\nfrom launch.actions import DeclareLaunchArgument\n\ndef generate_launch_description():\n scenario_runner_package = 'scenario_runner'\n scenario_runner_parameter = os.path.join(get_package_share_directory(scenario_runner_package), 'config', 'settings.yaml')\n\n trucksim_package = 'trucksim_bridge'\n trucksim_package_dir_share = get_package_share_directory(trucksim_package)\n trucksim_parameter = os.path.join(get_package_share_directory(trucksim_package), 'config', 'settings.yaml')\n\n carla_package = 'carla_bridge'\n carla_package_dir_share = get_package_share_directory(carla_package)\n carla_parameter = os.path.join(get_package_share_directory(carla_package), 'config', 'settings.yaml')\n\n hdmap_pkg = 'ad_map_server'\n hdmap_dir = get_package_share_directory(hdmap_pkg)\n\n return LaunchDescription([\n Node(\n package=scenario_runner_package,\n # node_namespace='fead',\n node_executable='run_jinan', \n node_name=scenario_runner_package,\n parameters=[scenario_runner_parameter],\n remappings=None,\n output='screen',\n emulate_tty=True,\n arguments=['--ros-args --log-level INFO']\n ), \n Node(\n package='ad_map_server',\n # node_namespace='fead',\n node_executable='ad_map_server', \n node_name='map_server' ,\n remappings=None,\n output='screen',\n cwd = hdmap_dir,\n emulate_tty=True,\n # arguments=['resource/jinan_config.txt']\n arguments=['resource/jinan_two_lanes_config.txt']\n ),\n Node(\n package='traffic_sim',\n # node_namespace='fead',\n node_executable='run_traffic_sim', \n node_name='traffic_sim' ,\n remappings=None,\n output='screen',\n emulate_tty=True,\n arguments=['--ros-args --log-level INFO']\n ),\n Node(\n package='traffic_sim',\n # node_namespace='fead',\n node_executable='run_traffic_list', \n node_name='traffic_sim' ,\n remappings=None,\n output='screen',\n emulate_tty=True,\n arguments=['--ros-args --log-level INFO']\n ),\n Node(\n package='trajectory_planner',\n # node_namespace='fead',\n node_executable='run_trajectory_planner', \n node_name='trajectory_planner' ,\n remappings=None,\n output='screen',\n emulate_tty=True,\n arguments=['--ros-args --log-level INFO']\n ),\n Node(\n package='trajectory_planner',\n # node_namespace='fead',\n node_executable='run_greedy_dm', \n node_name='greedy_dm' ,\n remappings=None,\n output='screen',\n emulate_tty=True,\n arguments=['--ros-args --log-level INFO']\n ),\n Node(\n package='trucksim_bridge',\n # node_namespace='fead',\n node_executable='trucksim_bridge_node', \n node_name='trucksim_bridge',\n parameters=[trucksim_parameter],\n remappings=None,\n cwd = trucksim_package_dir_share,\n output='screen',\n emulate_tty=True\n ),\n Node(\n package='pid_controller',\n # node_namespace='fead',\n node_executable='run_control', \n node_name='pid_controller' ,\n remappings=None,\n output='screen',\n emulate_tty=True,\n arguments=['--ros-args --log-level INFO']\n ),\n Node(\n package=carla_package,\n # node_namespace='fead',\n node_executable='run_carla_bridge', \n node_name=carla_package,\n parameters=[carla_parameter],\n remappings=None,\n output='screen',\n emulate_tty=True,\n cwd = carla_package_dir_share,\n arguments=['--ros-args --log-level INFO']\n ),\n ])\n","sub_path":"scenarios/scenario_runner/launch/run_jinan.py","file_name":"run_jinan.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"76683717","text":"'''\r\nA Scrabble game\r\n'''\r\nfrom lexicon import Lexicon\r\nimport copy\r\n\r\nclass Scrabble():\r\n def __init__(self, rack, dictfile):\r\n '''\r\n The constructor initializes the following\r\n Two boards (regular and transposed)\r\n Two special square dictionaries (regular and transposed)\r\n Loads the trie into memory\r\n\r\n '''\r\n # Initialize board, dictionary special spots and points per letter\r\n self.board = []\r\n self.board_transp = []\r\n self.rack = rack[:]\r\n self.best_move = dict()\r\n self.best_score = 0\r\n self.best_word = \"\"\r\n self.longest_move = dict()\r\n self.longest_score = 0\r\n self.longest_length = 0\r\n self.longest_word = \"\"\r\n self.dictionary = Lexicon(dictfile)\r\n self.cross_checks = []\r\n self.cross_checks_transp = []\r\n print(\"Loaded dictionary!\")\r\n self.points = {\r\n 'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4,\r\n 'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P': 3,\r\n 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8,\r\n 'Y': 4, 'Z': 10\r\n }\r\n\r\n self.tile_dist = {\r\n 'A': 9, 'B': 2, 'C': 2, 'D': 4, 'E': 12, 'F': 2, 'G': 3, 'H': 2,\r\n 'I': 9, 'J': 1, 'K': 1, 'L': 4, 'M': 2, 'N': 6, 'O': 8, 'P': 2,\r\n 'Q': 1, 'R': 6, 'S': 4, 'T': 6, 'U': 4, 'V': 2, 'W': 2, 'X': 1,\r\n 'Y': 2, 'Z': 1, ' ':2\r\n }\r\n \r\n for i in range(15):\r\n row = []\r\n for j in range(15):\r\n row.append(None)\r\n self.board.append(row)\r\n self.board_transp = copy.deepcopy(self.board)\r\n\r\n self.specials = {\r\n (0, 0): \"TW\", (0, 7): \"TW\", (0, 14): \"TW\",\r\n (7, 0): \"TW\", (7, 7): \"TW\", (7, 14): \"TW\",\r\n (14, 0): \"TW\", (14, 7): \"TW\", (14, 14): \"TW\",\r\n (1, 1): \"DW\", (2, 2): \"DW\", (3, 3): \"DW\", (4, 4): \"DW\",\r\n (7, 7): \"DW\", (10, 10): \"DW\", (11, 11):\"DW\", (12, 12): \"DW\", (13, 13): \"DW\",\r\n (1, 13): \"DW\", (2, 12): \"DW\", (3, 11): \"DW\", (4, 10): \"DW\",\r\n (10, 4): \"DW\", (11, 3): \"DW\", (12, 2): \"DW\", (13, 1): \"DW\",\r\n (0, 3): \"DL\", (0, 11): \"DL\", (2, 6): \"DL\", (2, 8): \"DL\",\r\n (3, 0): \"DL\", (3, 7): \"DL\", (3, 14): \"DL\",\r\n (6, 2): \"DL\", (6, 6): \"DL\", (6, 8): \"DL\", (6, 12): \"DL\",\r\n (7, 3): \"DL\", (7, 11): \"DL\",\r\n (14, 3): \"DL\", (14, 11): \"DL\", (12, 6): \"DL\", (12, 8): \"DL\",\r\n (11, 0): \"DL\", (11, 7): \"DL\", (11, 14): \"DL\",\r\n (8, 2): \"DL\", (8, 6): \"DL\", (8, 8): \"DL\", (8, 12): \"DL\",\r\n (1, 5): \"TL\", (1, 9): \"TL\",\r\n (5, 1): \"TL\", (5, 5): \"TL\", (5, 9): \"TL\", (5, 13): \"TL\",\r\n (13, 5): \"TL\", (13, 9): \"TL\",\r\n (9, 1): \"TL\", (9, 5): \"TL\", (9, 9): \"TL\", (9, 13): \"TL\",\r\n }\r\n\r\n self.specials_transp = dict()\r\n for special_sq in self.specials:\r\n self.specials_transp[special_sq[::-1]] = self.specials[special_sq]\r\n \r\n\r\n def load_board(self, filename):\r\n '''\r\n Load a certain scrabble state from a file 'filename'\r\n '''\r\n\r\n with open(filename) as f:\r\n lc = 0\r\n for line in f:\r\n if lc < 15:\r\n for i in range(len(line)-1):\r\n if line[i] != '.':\r\n self.board[lc][i] = line[i]\r\n self.board_transp[i][lc] = line[i]\r\n lc += 1\r\n self.cross_checks = [self.cross_checks_row(i,0) for i in range(15)]\r\n self.cross_checks_transp = [self.cross_checks_row(i,1) for i in range(15)]\r\n \r\n def set_rack(self, rack):\r\n self.rack = rack[:]\r\n self.best_move = dict()\r\n self.best_score = 0\r\n self.best_word = \"\"\r\n self.longest_move = dict()\r\n self.longest_score = 0\r\n self.longest_length = 0\r\n self.longest_word = \"\"\r\n \r\n def get_anchors(self, row, transp=0):\r\n '''\r\n Get the anchors from a given row.\r\n row is the index of the row on the board\r\n returns a list of numbers, where each number is the column of the anchor in the row\r\n '''\r\n anchors = set()\r\n if not transp:\r\n for i in range(15):\r\n if (i > 0 and self.board[row][i] is None and self.board[row][i-1] is not None or \r\n i < 14 and self.board[row][i] is None and self.board[row][i+1] is not None or\r\n row > 0 and self.board[row][i] is None and self.board[row-1][i] is not None or\r\n row < 14 and self.board[row][i] is None and self.board[row+1][i] is not None):\r\n anchors.add(i)\r\n else:\r\n for i in range(15):\r\n if (i > 0 and self.board_transp[row][i] is None and self.board_transp[row][i-1] is not None or \r\n i < 14 and self.board_transp[row][i] is None and self.board_transp[row][i+1] is not None or\r\n row > 0 and self.board_transp[row][i] is None and self.board_transp[row-1][i] is not None or\r\n row < 14 and self.board_transp[row][i] is None and self.board_transp[row+1][i] is not None):\r\n anchors.add(i)\r\n return anchors\r\n \r\n def cross_checks_row(self, row, transp):\r\n '''\r\n Cross checks every square in the row and returns a list containing the set of\r\n playable letters from the rack for each square in the row\r\n row is the index of the row in the board\r\n rack is a list of strings representing the rack\r\n '''\r\n if transp:\r\n board = self.board_transp\r\n else:\r\n board = self.board\r\n cross_checks = []\r\n for i in range(15):\r\n if board[row][i] is not None:\r\n cross_checks.append(set())\r\n continue\r\n up_count = row-1\r\n crossed_word = \"\"\r\n while up_count >= 0 and board[up_count][i] is not None:\r\n crossed_word += board[up_count][i]\r\n up_count -= 1\r\n crossed_word = crossed_word[::-1]\r\n\r\n place_letter_index = len(crossed_word)\r\n down_count = row+1\r\n while down_count < 15 and board[down_count][i] is not None:\r\n crossed_word += board[down_count][i]\r\n down_count += 1\r\n\r\n if crossed_word == \"\":\r\n cross_checks.append(set(self.rack))\r\n else:\r\n valid_letters = set()\r\n for letter in self.rack:\r\n candidate_word = crossed_word[:place_letter_index] + letter + crossed_word[place_letter_index:]\r\n if self.dictionary.check(candidate_word):\r\n valid_letters.add(letter)\r\n cross_checks.append(valid_letters)\r\n return cross_checks\r\n \r\n def get_points(self, placement, across=1):\r\n '''\r\n Returns the total points acquired after playing a move\r\n placement is a dictionary mapping coordinate on the board(tuple) to letter played(string)\r\n across is a boolean to denote whether the move was played across or not\r\n Function does not check for legality of the move\r\n '''\r\n points = 0\r\n row = None\r\n # If the word is played across, no transposition needed\r\n if across:\r\n board = self.board\r\n specials = self.specials\r\n corrected_placement = placement\r\n # If played down use transposed board, specials and placement coordinates\r\n else:\r\n board = self.board_transp\r\n specials = self.specials_transp\r\n # Transpose placement coordinates as well\r\n corrected_placement = dict()\r\n for place in placement:\r\n corrected_placement[place[::-1]] = placement[place]\r\n\r\n # Score all the vertical words first\r\n for place in corrected_placement:\r\n letter_played = corrected_placement[place]\r\n word_points = 0\r\n row = place[0] #inefficient, row needs to be taken only once\r\n column = place[1]\r\n # Check upwards for part of vertical word\r\n up_count = row-1\r\n while up_count >= 0 and board[up_count][column] is not None:\r\n word_points += self.points[board[up_count][column]]\r\n up_count -= 1\r\n # Check downwards for part of vertical word\r\n down_count = row+1\r\n while down_count < 15 and board[down_count][column] is not None:\r\n word_points += self.points[board[down_count][column]]\r\n down_count += 1\r\n # Score the vertical word\r\n if place in specials:\r\n special = specials[place]\r\n if special == \"DW\":\r\n word_points = (self.points[letter_played] + word_points)*2 if word_points != 0 else 0\r\n elif special == \"TW\":\r\n word_points = (self.points[letter_played] + word_points)*3 if word_points != 0 else 0\r\n elif special == \"DL\":\r\n word_points = (self.points[letter_played]*2 + word_points) if word_points != 0 else 0\r\n elif special == \"TL\":\r\n word_points = (self.points[letter_played]*3 + word_points) if word_points != 0 else 0\r\n else:\r\n word_points = word_points + self.points[letter_played] if word_points != 0 else 0\r\n points += word_points\r\n \r\n # Score the word across\r\n # Get the first letter of the word across\r\n first_letter_col = None\r\n for place in corrected_placement:\r\n if first_letter_col is None or place[1] < first_letter_col:\r\n first_letter_col = place[1]\r\n \r\n column_iter = first_letter_col\r\n word_points = 0\r\n multiplier = 1\r\n # Look at the word rightwards from the first letter placed \r\n while (column_iter < 15 and (board[row][column_iter] is not None or (row, column_iter) in corrected_placement) ):\r\n letter = board[row][column_iter]\r\n if letter is None:\r\n letter = corrected_placement[row, column_iter] \r\n letter_pos = (row, column_iter)\r\n\r\n if letter in corrected_placement.values() and letter_pos in corrected_placement and letter_pos in specials:\r\n if specials[letter_pos] == \"TW\":\r\n multiplier *= 3\r\n elif specials[letter_pos] == \"DW\":\r\n multiplier *= 2\r\n elif specials[letter_pos] == \"TL\":\r\n word_points += self.points[letter]*2\r\n elif specials[letter_pos] == \"DL\":\r\n word_points += self.points[letter]\r\n word_points += self.points[letter]\r\n column_iter += 1\r\n\r\n # Look at the word leftwards from the first letter placed\r\n column_iter = first_letter_col - 1\r\n while (column_iter >= 0 and (board[row][column_iter] is not None or (row, column_iter) in corrected_placement)):\r\n letter = board[row][column_iter]\r\n if letter is None:\r\n letter = corrected_placement[row, column_iter] \r\n letter_pos = (row, column_iter)\r\n if letter in corrected_placement.values() and letter_pos in corrected_placement and letter_pos in specials:\r\n if specials[letter_pos] == \"TW\":\r\n multiplier *= 3\r\n elif specials[letter_pos] == \"DW\":\r\n multiplier *= 2\r\n elif specials[letter_pos] == \"TL\":\r\n word_points += self.points[letter]*2\r\n elif specials[letter_pos] == \"DL\":\r\n word_points += self.points[letter]\r\n word_points += self.points[letter]\r\n column_iter -= 1\r\n # Account for multipliers\r\n word_points *= multiplier\r\n points += word_points\r\n # Check for bingo\r\n if len(placement) == 7:\r\n points += 50\r\n return points\r\n\r\n\r\n def solve(self):\r\n '''\r\n Updates the best and longest words using by backtracking through all possible moves across and down\r\n '''\r\n if '*' in self.rack:\r\n self.blank_solve()\r\n else:\r\n # Search all fifteen rows for best move(s)\r\n for i in range(15):\r\n row_anchors = self.get_anchors(i)\r\n for anchor in row_anchors:\r\n if anchor != 0 and self.board[i][anchor-1] is not None:\r\n prefix = \"\"\r\n column_iter = anchor-1\r\n while column_iter >= 0 and self.board[i][column_iter] is not None:\r\n prefix += self.board[i][column_iter]\r\n column_iter -= 1\r\n prefix = prefix[::-1]\r\n prefix_node = self.dictionary.path_node(prefix)\r\n self.right_extend(prefix, prefix_node, (i, anchor), 0, (i, anchor))\r\n else:\r\n limit = 0\r\n column_iter = anchor - 1\r\n while column_iter >= 0 and self.board[i][column_iter] is None and column_iter not in row_anchors:\r\n limit += 1\r\n column_iter -= 1\r\n self.left_part(\"\", self.dictionary.root_node, limit, (i, anchor), 0)\r\n # Search all fifteen columns for moves\r\n \r\n for i in range(15):\r\n row_anchors = self.get_anchors(i, 1)\r\n for anchor in row_anchors:\r\n if anchor != 0 and self.board_transp[i][anchor-1] is not None:\r\n prefix = \"\"\r\n column_iter = anchor-1\r\n while column_iter >= 0 and self.board_transp[i][column_iter] is not None:\r\n prefix += self.board_transp[i][column_iter]\r\n column_iter -= 1\r\n prefix = prefix[::-1]\r\n prefix_node = self.dictionary.path_node(prefix)\r\n self.right_extend(prefix, prefix_node, (i, anchor), 1, (i, anchor))\r\n else:\r\n limit = 0\r\n column_iter = anchor - 1\r\n while column_iter >= 0 and self.board_transp[i][column_iter] is None and column_iter not in row_anchors:\r\n limit += 1\r\n column_iter -= 1\r\n self.left_part(\"\", self.dictionary.root_node, limit, (i, anchor), 1)\r\n \r\n \r\n '''\r\n The following two functions implement Appel and Jacobson's backtrack search for moves\r\n '''\r\n def left_part(self, partial_word, node, limit, anchor_location, transp):\r\n self.right_extend(partial_word, node, anchor_location, transp, anchor_location)\r\n\r\n if limit == 0:\r\n return \r\n else:\r\n for edge in node.edges:\r\n if edge in self.rack:\r\n self.rack.remove(edge)\r\n new_node = node.edges[edge]\r\n new_partial_word = partial_word + edge\r\n self.left_part(new_partial_word, new_node, limit-1, anchor_location, transp)\r\n self.rack.append(edge)\r\n \r\n def right_extend(self, partial_word, node, square, transp, base):\r\n if transp:\r\n board = self.board_transp\r\n cross_checks = self.cross_checks_transp\r\n else:\r\n board = self.board\r\n cross_checks = self.cross_checks\r\n\r\n row = square[0]\r\n if square[1] > 14:\r\n if node.terminal and square != base:\r\n prev_square = (square[0], square[1]-1)\r\n move = self.generate_move(partial_word, prev_square, transp)\r\n score = self.get_points(move, (not transp))\r\n if score > self.best_score:\r\n self.best_move = move\r\n self.best_score = score\r\n self.best_word = partial_word\r\n if len(partial_word) > self.longest_length:\r\n self.longest_length = len(partial_word)\r\n self.longest_score = score\r\n self.longest_move = move\r\n self.longest_word = partial_word\r\n return\r\n if node is None:\r\n return\r\n if board[square[0]][square[1]] is None:\r\n if node.terminal and square != base:\r\n prev_square = (square[0], square[1]-1)\r\n move = self.generate_move(partial_word, prev_square, transp)\r\n score = self.get_points(move, (not transp))\r\n if score > self.best_score:\r\n self.best_move = move\r\n self.best_score = score\r\n self.best_word = partial_word\r\n if len(partial_word) > self.longest_length:\r\n self.longest_length = len(partial_word)\r\n self.longest_score = score\r\n self.longest_move = move\r\n self.longest_word = partial_word\r\n for edge in node.edges:\r\n if edge in self.rack and edge in cross_checks[square[0]][square[1]]:\r\n self.rack.remove(edge)\r\n new_node = node.edges[edge]\r\n new_partial_word = partial_word + edge\r\n next_square = (square[0], square[1]+1)\r\n self.right_extend(new_partial_word, new_node, next_square, transp, base)\r\n self.rack.append(edge)\r\n else:\r\n l = board[square[0]][square[1]]\r\n if l in node.edges:\r\n new_node = node.edges[l]\r\n next_square = (square[0], square[1]+1)\r\n new_partial_word = partial_word + l\r\n self.right_extend(new_partial_word, new_node, next_square, transp, base)\r\n \r\n\r\n def generate_move(self, word, last_square, transp):\r\n '''\r\n Generates a placement of tiles for a word ending at a given square\r\n Assumes the move is possible and legal\r\n Note than if transposed is true, the placement returned will have transposed coordinates\r\n NOT true coordinates\r\n '''\r\n placement = dict()\r\n if transp:\r\n board = self.board_transp\r\n else:\r\n board = self.board\r\n row = last_square[0]\r\n column_iter = last_square[1]\r\n for i in range(len(word)):\r\n if board[row][column_iter] is None:\r\n if not transp:\r\n placement[row,column_iter] = word[len(word)-1-i]\r\n else:\r\n placement[column_iter, row] = word[len(word)-1-i]\r\n column_iter -= 1\r\n return placement\r\n\r\n def get_best_move(self):\r\n return (self.best_word, self.best_score, self.best_move)\r\n \r\n def get_longest_move(self):\r\n return (self.longest_word, self.longest_score, self.longest_move)\r\n\r\n def blank_solve(self):\r\n raise NotImplementedError\r\n\r\n def get_opponent_rack_prob_dist(self):\r\n prob_dist = dict()\r\n distcpy = self.tile_dist\r\n for i in range(15):\r\n for j in range(15):\r\n if self.board[i][j] is not None and self.board[i][j] in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\":\r\n distcpy[self.board[i][j]] -= 1\r\n for i in range(len(self.rack)):\r\n distcpy[self.rack[i]] -= 1\r\n total_tiles_left = 0\r\n for c in distcpy:\r\n total_tiles_left += distcpy[c]\r\n for c in self.tile_dist:\r\n prob_dist[c] = 1 - (1 - distcpy[c]/total_tiles_left)**7\r\n return prob_dist\r\n\r\n\r\n ","sub_path":"Scrabble/scrabble.py","file_name":"scrabble.py","file_ext":"py","file_size_in_byte":20074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"236016944","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/xooof/xmldispatcher/tools/unittest/TestRunner.py\n# Compiled at: 2008-10-01 10:39:34\nimport sys, string, time, unittest, traceback, TestCaseBase\n\ndef _htmlEscape(s):\n return s.replace('&', '&').replace('<', '<')\n\n\nclass _WritelnDecorator:\n \"\"\"Used to decorate file-like objects with a handy 'writeln' method\"\"\"\n __module__ = __name__\n\n def __init__(self, stream):\n self.stream = stream\n\n def __getattr__(self, attr):\n return getattr(self.stream, attr)\n\n def writeln(self, *args):\n if args:\n apply(self.write, args)\n self.write('\\n')\n\n\nclass _TextAndHtmlTestResult(unittest.TestResult):\n \"\"\"A test result class that can print formatted to a text stream and an html stream.\n\n Used by TextAndHtmlTestRunner.\n \"\"\"\n __module__ = __name__\n separator1 = '=' * 70\n separator2 = '-' * 70\n\n def __init__(self, htmlStream, textStream, descriptions, verbosity):\n unittest.TestResult.__init__(self)\n self.htmlStream = htmlStream\n self.textStream = textStream\n self.showAll = verbosity > 1\n self.dots = verbosity == 1\n self.descriptions = descriptions\n self.myErrors = []\n self.myFailures = []\n\n def getDescription(self, test):\n if self.descriptions:\n return test.shortDescription() or str(test)\n else:\n return str(test)\n\n def getHtmlDescription(self, test):\n return '%s [%s]' % (_htmlEscape(self.getDescription(test)), _htmlEscape(str(test)))\n\n def startTest(self, test):\n unittest.TestResult.startTest(self, test)\n if self.showAll:\n self.textStream.write(self.getDescription(test))\n self.textStream.write(' ... ')\n\n def addSuccess(self, test):\n unittest.TestResult.addSuccess(self, test)\n if self.showAll:\n self.textStream.writeln('ok')\n elif self.dots:\n self.textStream.write('.')\n self.htmlStream.writeln('%sOK' % (self.getHtmlDescription(test),))\n\n def addError(self, test, err):\n unittest.TestResult.addError(self, test, err)\n self.myErrors.append((test, err))\n if self.showAll:\n self.textStream.writeln('ERROR')\n elif self.dots:\n self.textStream.write('E')\n self.htmlStream.writeln(\"%sERROR\" % (self.getHtmlDescription(test), len(self.errors)))\n\n def addFailure(self, test, err):\n unittest.TestResult.addFailure(self, test, err)\n self.myFailures.append((test, err))\n if self.showAll:\n self.textStream.writeln('FAIL')\n elif self.dots:\n self.textStream.write('F')\n self.htmlStream.writeln(\"%sFAIL\" % (self.getHtmlDescription(test), len(self.failures)))\n\n def printTextErrors(self):\n if self.dots or self.showAll:\n self.textStream.writeln()\n self.printTextErrorList('ERROR', self.myErrors)\n self.printTextErrorList('FAIL', self.myFailures)\n\n def printTextErrorList(self, flavour, errors):\n for (test, err) in errors:\n self.textStream.writeln(self.separator1)\n self.textStream.writeln('%s: %s' % (flavour, self.getDescription(test)))\n self.textStream.writeln(self.separator2)\n self.textStream.writeln('%s' % string.join(apply(traceback.format_exception, err), ''))\n\n def printHtmlErrors(self):\n self.printHtmlErrorList('ERROR', self.myErrors)\n self.printHtmlErrorList('FAIL', self.myFailures)\n\n def printHtmlErrorList(self, flavour, errors):\n i = 0\n for (test, err) in errors:\n i += 1\n self.htmlStream.writeln(\"
    \" % (flavour, i))\n self.htmlStream.writeln('

    %s

    ' % self.getHtmlDescription(test))\n self.htmlStream.write('
    ')\n            self.htmlStream.writeln(_htmlEscape('%s' % string.join(apply(traceback.format_exception, err), '')))\n            self.htmlStream.writeln('
    ')\n if isinstance(err[1], TestCaseBase.StructEqualFailureException):\n err[1].toHtml(self.htmlStream)\n\n\nclass TextAndHtmlTestRunner:\n \"\"\"A test runner class that displays results in textual form.\n\n It prints out the names of tests as they are run, errors as they\n occur, and a summary of the results at the end of the test run.\n \"\"\"\n __module__ = __name__\n\n def __init__(self, htmlStream, textStream=sys.stderr, descriptions=1, verbosity=1):\n self.htmlStream = _WritelnDecorator(htmlStream)\n self.textStream = _WritelnDecorator(textStream)\n self.descriptions = descriptions\n self.verbosity = verbosity\n\n def _makeResult(self):\n return _TextAndHtmlTestResult(self.htmlStream, self.textStream, self.descriptions, self.verbosity)\n\n def run(self, test):\n \"\"\"Run the given test case or test suite.\"\"\"\n result = self._makeResult()\n startTime = time.time()\n self.htmlStream.writeln('Test results')\n self.htmlStream.writeln(\"\")\n try:\n test(result)\n complete = 1\n except KeyboardInterrupt:\n complete = 0\n\n self.htmlStream.writeln('
    ')\n stopTime = time.time()\n timeTaken = float(stopTime - startTime)\n run = result.testsRun\n result.printTextErrors()\n self.textStream.writeln(result.separator2)\n if complete:\n self.textStream.writeln('Ran %d test%s in %.3fs' % (run, run == 1 and '' or 's', timeTaken))\n self.textStream.writeln()\n self.htmlStream.writeln('

    Ran %d test%s in %.3fs

    ' % (run, run == 1 and '' or 's', timeTaken))\n self.htmlStream.writeln()\n else:\n self.textStream.writeln('Interrupted by user')\n self.textStream.writeln()\n self.htmlStream.writeln('

    Interrupted by user

    ')\n self.htmlStream.writeln()\n if not result.wasSuccessful():\n self.textStream.write('FAILED (')\n self.htmlStream.write('FAILED (')\n (failed, errored) = map(len, (result.failures, result.errors))\n if failed:\n self.textStream.write('failures=%d' % failed)\n self.htmlStream.write('failures=%d' % failed)\n if errored:\n if failed:\n self.textStream.write(', ')\n self.textStream.write('errors=%d' % errored)\n self.htmlStream.write('errors=%d' % errored)\n self.textStream.writeln(')')\n self.htmlStream.writeln(')')\n self.textStream.writeln('')\n self.textStream.writeln('Detailed results are in result.html')\n else:\n self.textStream.writeln('OK')\n self.htmlStream.writeln('OK')\n result.printHtmlErrors()\n self.htmlStream.writeln('')\n return result\n\n\ndef main(module='__main__', defaultTest=None, argv=None, testRunner=None, testLoader=unittest.defaultTestLoader):\n if argv is None:\n argv = sys.argv\n if '-v' in argv:\n verbosity = 2\n elif '-q' in argv:\n verbosity = 0\n else:\n verbosity = 1\n if testRunner is None:\n testRunner = TextAndHtmlTestRunner(open('result.html', 'w'), verbosity=verbosity)\n unittest.TestProgram(module, defaultTest, argv, testRunner, testLoader)\n return\n\n\ndef _myimport(moduleName):\n module = __import__(moduleName)\n for part in moduleName.split('.')[1:]:\n module = getattr(module, part)\n\n return module\n\n\ndef loadTestsFromModuleNames(moduleNames, testLoader=unittest.defaultTestLoader):\n test = unittest.TestSuite()\n modules = map(_myimport, moduleNames)\n for module in modules:\n test.addTest(testLoader.loadTestsFromModule(module))\n\n return test","sub_path":"pycfiles/xooof.xmldispatcher-0.1-py2.4/TestRunner.py","file_name":"TestRunner.py","file_ext":"py","file_size_in_byte":8155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"309099304","text":"#!/usr/bin/env python3.6\nfrom sorting import choose_small\ne=1\nl=[]\nwhile e>0:\n Input=input('Please input your number into the list(End by input end):')\n if Input=='end':\n e=-1\n else:\n l.append(int(Input))\nprint('The list which you input is:')\nprint(l)\nchoose_small(l)\n\n\n","sub_path":"testfunction3.py","file_name":"testfunction3.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"270889398","text":"import sys\r\n\r\nfrom argparse import ArgumentParser\r\n\r\nfrom lib.file_processer import FileContentCountPatternFinder\r\n\r\nclass Options:\r\n def __init__(self):\r\n self.filepaths = []\r\n self.pattern = ''\r\n self.functions = {'count' : 0, 'sum' : 1, 'display': 2}\r\n self._init_func_dictionary()\r\n self._init_parser()\r\n \r\n def _init_func_dictionary(self):\r\n self.finders = { 0:FileContentCountPatternFinder}\r\n \r\n def _init_parser(self):\r\n #overrides usage that is by default something like:\r\n # usage: PROG [-h] [--foo] [FOO]] bar [bar ...]\r\n # usage = './bin/run_project'\r\n self.parser = ArgumentParser() #usage=usage\r\n # inits the argparser with an argument example with\r\n # a default value 'example-value'\r\n # nargs='+' takes 1 or more arguments, nargs='*' takes zero or more.\r\n self.parser.add_argument('-p',\r\n '--list',\r\n help='example.log',\r\n nargs='+',\r\n required=True\r\n )\r\n self.parser.add_argument('-re',\r\n '--regex',\r\n dest='regex',\r\n help='An example regex option'\r\n ) \r\n self.parser.add_argument('-f',\r\n '--function',\r\n default=0,\r\n dest='function',\r\n help='count = 0, sum = 1, display = 2',\r\n type = int,\r\n choices=[0, 1, 2]\r\n )\r\n \r\n def parse(self, args=None):\r\n\r\n #show parsed in args\r\n print('Arguments to be used:')\r\n for _, value in self.parser.parse_args()._get_kwargs():\r\n if value is not None:\r\n print(value)\r\n \r\n args = self.parser.parse_args()\r\n\r\n self.filepaths = list(args.list)\r\n self.pattern = args.regex\r\n function = args.function\r\n\r\n return self.finders[0]","sub_path":"filereader/lib/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"520610402","text":"import numpy as np\nimport numba as nb\nfrom PIL import Image as Image\n\nimport time\n\nc = 1.0\nG = 2e-3\n\nn_iter =300\ndt = 0.001\n\n@nb.jit()\ndef normalize(v):\n norm = np.linalg.norm(v)\n if norm == 0: \n return v\n return v / norm\n\n@nb.jit()\ndef camera_ray_trace(x,y,camera_origin,point):\n # point = camera_normal+x*camera_right+y*camera_up\n ray_origin = camera_origin.copy()\n ray_position = camera_origin.copy()\n ray_direction = point - camera_origin\n ray_velocity = c*ray_direction\n ray_total_time = 0\n color = (0,0,0)\n for t in range(n_iter):\n r = bh_position - ray_position\n a = 7.0e-3*(bh_mass/np.dot(r,r))*normalize(r)\n # print(a)\n ray_prev_pos = ray_position.copy()\n ray_velocity += a*(t*dt)\n ray_velocity = c*normalize(ray_velocity)\n ray_position += ray_velocity*(t*dt) + (a/2)*(t*dt)**2\n ray_total_time += (t*dt)\n\n ray_bh_dist = np.linalg.norm(ray_position - bh_position)\n if 0 <= max(ray_prev_pos[1], ray_position[1]) and 0 >= min(ray_prev_pos[1], ray_position[1]):\n a = ray_prev_pos\n b = ray_position\n l = b-a\n cross_point = np.array([a[0]-(a[1]/l[1])*l[0], 0, a[2]-(a[1]/l[1])*l[2]])\n r = np.linalg.norm(cross_point - disk_origin)\n if r <= disk_outer_r and r >= disk_inner_r:\n color = disk_color\n break\n elif ray_bh_dist <= bh_radius:\n break\n elif ray_bh_dist >= 15.0:\n break\n return color\n\ndef render():\n global image_pixels\n ratio = float(pic_width)/pic_height\n x0, x1 = -1.0, 1.0\n y0, y1 = -1.0/ratio, 1.0/ratio\n xstep, ystep = (x1-x0)/(pic_width-1), (y1-y0)/(pic_height-1)\n\n t0 = time.time()\n\n for j in range(pic_height):\n y = y0 + j*ystep\n\n if (j+1) % 10 == 0:\n print(\"line \" + str(j+1) + \"/\" + str(pic_height))\n print(time.time() - t0)\n t0 = time.time()\n\n for i in range(pic_width):\n x = x0 + i*xstep\n point = camera_normal+x*camera_right+y*camera_up\n image_pixels[j,i] = camera_ray_trace(x,y,camera_origin,point)\n\nbh_position = np.array([0, 0., 0.])\nbh_mass = 80\nbh_radius = 2*bh_mass*G/c**2\nprint(bh_radius)\n\npic_width = 100\npic_height = 100\n\nc_origin = np.array([0., 0., 0.])\nc_focus = np.array([0., 0., 0.])\n\ndisk_origin = c_focus\ndisk_inner_r = 4.5*bh_radius\ndisk_outer_r = 15*bh_radius\ndisk_color = (255,255,255)\n\nimage_pixels = np.zeros((pic_height,pic_width,3))\n\ncamera_origin = c_origin.copy()\ncamera_direction=normalize(c_focus-c_origin)\ncamera_focal_length=1.2\ncamera_normal=camera_origin+camera_focal_length*camera_direction\ncamera_right=np.array([1,0,0])\ncamera_up=normalize(np.cross(camera_normal,camera_right))\n\nr = 7.0\n\ndef render_theta(dr):\n global c_origin\n \n global image_pixels\n \n global camera_origin\n global camera_direction\n global camera_normal\n global camera_up\n\n c_origin = np.array([0., 0.2, -(r-dr)])\n image_pixels = np.zeros((pic_height,pic_width,3))\n \n camera_origin = c_origin.copy()\n camera_direction=normalize(c_focus-c_origin)\n camera_normal=camera_origin+camera_focal_length*camera_direction\n camera_up=normalize(np.cross(camera_normal,camera_right))\n \n render()\n \n Image.fromarray(image_pixels.astype(np.uint8)).save(\"images/T3/blackhole-\"+ str(dr) +\".png\")\n\n print(\"Done.\"+str(dr))\n\nfor i in np.arange(0,7,0.2):\n print(i)\n render_theta(i)\n\nprint(\"All Done.\")","sub_path":"Black_Hole/bh2.py","file_name":"bh2.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226396355","text":"\"\"\"\nTake the following IPv4 address: 128.32.10.1\n\nThis address has 4 octets where each octet is a single byte (or 8 bits).\n\n1st octet 128 has the binary representation: 10000000\n2nd octet 32 has the binary representation: 00100000\n3rd octet 10 has the binary representation: 00001010\n4th octet 1 has the binary representation: 00000001\nSo 128.32.10.1 == 10000000.00100000.00001010.00000001\n\nBecause the above IP address has 32 bits, we can represent it as the unsigned 32 bit number: 2149583361\n\nComplete the function that takes an unsigned 32 bit number and returns a string representation of its IPv4 address.\n\nExamples\n2149583361 ==> \"128.32.10.1\"\n32 ==> \"0.0.0.32\"\n0 ==> \"0.0.0.0\"\n\"\"\"\n\ndef int32_to_ip(int32):\n binary = '{:032b}'.format(int32)\n parts = [binary[i:i+8] for i in range(0, len(binary), 8)]\n a = 0\n l = []\n for item in parts:\n item = item[::-1]\n for i in range(len(item)):\n a += int(item[i]) * 2**i\n l.append(str(a))\n a = 0\n return \".\".join(l)\n\n\n# Or\n\nfrom ipaddress import IPv4Address\n\ndef int32_to_ip(int32):\n return str(IPv4Address(int32))","sub_path":"Python/5-KYU/int32 to IPv4.py","file_name":"int32 to IPv4.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"125153501","text":"from django.db import models\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom gitview.repositories.managers import RepositoryManager\n\n\nclass Repository(models.Model):\n owner_id = models.PositiveIntegerField()\n owner_ct = models.ForeignKey(ContentType)\n\n owner = generic.GenericForeignKey(fk_field=\"owner_id\",\n ct_field=\"owner_ct\")\n\n name = models.SlugField(max_length=50, unique=True)\n\n default_branch = models.SlugField(max_length=100, default=\"master\")\n\n location = models.FilePathField(path=\"/home/git/repositories/\",\n allow_files=False, allow_folders=True,\n recursive=True, match=\".*\\.git\")\n\n is_public = models.BooleanField(default=True)\n\n has_issues = models.BooleanField(default=True)\n\n objects = RepositoryManager()\n\n class Meta:\n verbose_name_plural = \"repositories\"\n\n def __unicode__(self):\n return \"%s/%s\" % (self.owner.username, self.name)\n\n @property\n def git_repository(self):\n from git import Repo\n\n return Repo(self.repository_location)\n\n @property\n def repository_location(self):\n return \"/home/git/repositories/%s\" % self.location\n\n def create_git_repository(self):\n from git import Repo\n\n Repo.init(self.repository_location, bare=True)\n\n def save(self, *args, **kwargs):\n if not self.location:\n folder_path = \"%s/%s.git\" % (self.owner.username, self.name)\n\n self.location = folder_path\n\n self.create_git_repository()\n\n return super(Repository, self).save(*args, **kwargs)\n","sub_path":"gitview/repositories/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"600794197","text":"#https://codility.com/media/train/2-CountingElements.pdf\n\n\"\"\"Problem: You are given an integer m (1 ¬ m ¬ 1 000 000) and two non-empty, zero-indexed\narrays A and B of n integers, a0, a1, . . . , an−1 and b0, b1, . . . , bn−1 respectively (0 ¬ ai\n, bi ¬ m).\nThe goal is to check whether there is a swap operation which can be performed on these\narrays in such a way that the sum of elements in array A equals the sum of elements in\narray B after the swap. By swap operation we mean picking one element from array A and\none element from array B and exchanging them.\"\"\"\n\n\ndef counting(A, m):\n n = len(A)\n count = [0] * (m + 1)\n for k in xrange(n):\n count[A[k]] += 1\n return count\n\ndef slow_solution(A, B, m):\n n = len(A)\n sum_a = sum(A)\n sum_b = sum(B)\n for i in xrange(n):\n for j in xrange(n):\n change = B[j] - A[i]\n sum_a += change\n sum_b -= change\n if sum_a == sum_b:\n return True\n sum_a -= change\n sum_b += change\n return False\n\n\n\"\"\"\nThe best approach is to count the elements of array A and calculate\nthe difference d between the sums of the elements of array A and B.\n\nFor every element of array B, we assume that we will swap it with some element from\narray A. The difference d tells us the value from array A that we are interested in swapping,\nbecause only one value will cause the two totals to be equal. The occurrence of this value can\nbe found in constant time from the array used for counting\n\"\"\"\ndef fast_solution(A, B, m):\n n = len(A)\n sum_a = sum(A)\n sum_b = sum(B)\n d = sum_b - sum_a\n if d % 2 == 1:\n return False\n d /= 2\n count = counting(A, m)\n for i in xrange(n):\n if 0 <= B[i] - d and B[i] - d <= m and count[B[i] - d] > 0:\n return True\n return False\n \n","sub_path":"Python/CodilityLessons/Counting.py","file_name":"Counting.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"588580776","text":"# =============================================================================\n#\n# PUBLIC DOMAIN NOTICE\n# National Center for Biotechnology Information\n#\n# This software/database is a \"United States Government Work\" under the\n# terms of the United States Copyright Act. It was written as part of\n# the author's official duties as a United States Government employee and\n# thus cannot be copyrighted. This software/database is freely available\n# to the public for use. The National Library of Medicine and the U.S.\n# Government have not placed any restriction on its use or reproduction.\n#\n# Although all reasonable efforts have been taken to ensure the accuracy\n# and reliability of the software and data, the NLM and the U.S.\n# Government do not and cannot warrant the performance or results that\n# may be obtained by using this software or data. The NLM and the U.S.\n# Government disclaim all warranties, express or implied, including\n# warranties of performance, merchantability or fitness for any particular\n# purpose.\n#\n# Please cite the author in any work or product based on this material.\n#\n# =============================================================================\n\n\"\"\"\n This module defines a transparent HTTP proxy for the NCBI's DRS webservice\n\"\"\"\n\nimport requests\nimport flask\nimport logging\nimport sys\nimport os\nfrom urllib.parse import urlsplit, urlunsplit, urljoin\n\ntry:\n from .rewrite import Rewriter\n from .cloud import ComputeEnvironmentToken\nexcept:\n sys.path.append(os.path.dirname(os.path.abspath(__file__)))\n from rewrite import Rewriter\n from cloud import ComputeEnvironmentToken\n\n_rewriter = Rewriter()\n\n\n_CHUNK_SIZE = 10 * 1024 * 1024 # 10MB\n\n\ndef _streamContent(resp: requests.Response):\n \"\"\" Generator function, iterates over the content of a requests.Response in _CHUNK_SIZE chunks\n\n Parameters\n ----------\n resp: request.Response to be iterated\n\n Returns\n -------\n yields the next chunk (bytes, <= _CHUNK_SIZE long) of the response's,\n or returns if there is no more or an exception has occurred\n\n \"\"\"\n\n try:\n for chunk in resp.iter_content(_CHUNK_SIZE):\n yield chunk\n except Exception as ex:\n logging.error(\"streamContent(): resp.iter_content() threw \" + str(ex))\n finally:\n return\n\n\ndef _redirect(shortID: str):\n \"\"\" For a given shortID retrieve the coresponding URL to the NCBI redirector service,\n get a temporary signed URL to the target file from the redirector,\n produce a Flask.Response object that can stream the target file\n\n Parameters\n ----------\n shortID: a key returned by an earlier request to \"http://$HOST:$PORT/ga4gh/drs/v1/objects/$ACCESSION\"\n\n Returns\n -------\n a Flask.Response object that can stream the target file\n\n \"\"\"\n\n # retrieve the redirectURL corresponding to the shortID\n redirectorURL = _rewriter.Retrieve(shortID)\n if not redirectorURL:\n return {\"status_code\": 404, \"msg\": \"Accession is not found\"}, 404, {}\n\n # POST to the redirector with ident=CE\n redir = requests.post(\n redirectorURL, data={\"ident\": ComputeEnvironmentToken()}, allow_redirects=False\n )\n\n # intercept a redirect, capture the temporary signed bucket URL\n if redir.status_code == 307:\n try:\n bucketUrl = redir.headers[\"Location\"]\n # expiration = redir.headers[\"Expires\"]\n\n # send request to bucket server, ready to stream the data\n resp = requests.get(bucketUrl, stream=True)\n\n # this will start streaming\n ret = flask.Response(flask.stream_with_context(_streamContent(resp)))\n ret.content_type = resp.headers[\"Content-Type\"]\n ret.content_length = resp.headers[\"Content-Length\"]\n\n return ret\n\n except Exception as ex:\n return {\"status_code\": 500, \"msg\": str(ex)}, 500, {}\n\n return {\n \"status_code\": 501,\n \"msg\": f\"unexpected response from redirector: {redir.status_code} {redir.reason}\",\n }, 501, {}\n\n\ndef do_proxy(shortID: str):\n \"\"\" For a given shortID retrieve the coresponding URL to the NCBI redirector service,\n get a temporary signed URL to the target file from the redirector,\n produce a Flask.Response object that can stream the target file\n\n Parameters\n ----------\n shortID: a key returned by an earlier request to \"http://$HOST:$PORT/ga4gh/drs/v1/objects/$ACCESSION\"\n\n Returns\n -------\n a Flask.Response object that can stream the target file\n\n \"\"\"\n return _redirect(shortID)\n\n\n# --------------------- Unit tests\n\ndef test_Proxy_BadAcc():\n (res, *dummy) = _redirect(\"blah\")\n assert 404 == res[\"status_code\"]\n\n# def test_Proxy_BadJwt(self):\n# # TODO: a working jwt\n# shortID = _rewriter.Rewrite(\n# \"https://locate.ncbi.nlm.nih.gov/sdlr/sdlr.fcgi?jwt=eyJ\"\n# )\n# res = _redirect(shortID)\n# print(res)\n# self.assertEqual(404, res['status_code'])\n","sub_path":"source/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":5088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"471043488","text":"\n# coding: utf-8\n\n# In[5]:\n\n\ndef start():\n my_number = int(input(\"What's the number?\"))\n your_guess = int(input(\"I am thinking of a number from 1-10. Can you find it? You have 5 tries. Guess what is it\"))\n tries = 1\n while your_guess != my_number:\n if your_guess != my_number:\n if your_guess < my_number:\n tries += 1\n return \"Nope. It's higher than that. Try again.\"\n elif your_guess > my_number:\n tries += 1\n return \"Nope. It's lower than that. Try again.\"\n else:\n return f\"Congratulations! You guessed it in {tries} tries.\"\n\ndef play_again():\n answer = input(\"Do you want to play again? Y or N\")\n if answer == \"Y\":\n start()\n else:\n return \"Thank you for playing!\"\n \nstart()\n\n\n# In[ ]:\n\n\ndef value(l):\n for items in l:\n print(l, end=' ')\n \nvalue([\"1\", \"2\", \"3\"])\n\n\n# In[ ]:\n\n\ndef value(nums):\n for items in nums:\n print(nums, end=\" \")\n \nvalue([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n\n# In[ ]:\n\n\nfor i in range(10):\n print(\"_\", end=\"\")\n #val, sep, end\n# end=\"\" used for \\n newline\n\n#print(list(map((lambda num: num%2!=0), range(11))))\n\n\n# In[14]:\n\n\ndef underscored(start, end):\n rangeof_nums = list(range(start, end + 1))\n underscored_nums = []\n for check in rangeof_nums:\n if rangeof_nums(check) % 2 != 0:\n underscored_nums += check\n else:\n underscored_nums += check\n\nunderscored(1, 10)\n\nlist(map(lambda num: num % 2 !=0))\n\n\n# In[23]:\n\n\nfor x in range(10): \n if x%2 != 0:\n return \"_\"\n else:\n return x\n\n\n# In[24]:\n\n\nprint(list(map(lambda i: \"_\"*(i%2!=0) or str(i), range(10))))\n\n\n# In[44]:\n\n\nprint(\"Welcome to\" , end = ' ') \nprint(\"GeeksforGeeks\", end = ' ')\n\n\n# In[48]:\n\n\ndef start():\n my_number = int(input(\"What's the number?\"))\n your_guess = int(input(\"I am thinking of a number from 1-10. Can you find it? You have 5 tries. Guess what is it\"))\n tries = 1\n while True:\n if your_guess != my_number:\n if your_guess < my_number:\n tries += 1\n return \"Nope. It's higher than that. Try again.\"\n elif your_guess > my_number:\n tries += 1\n return \"Nope. It's lower than that. Try again.\"\n else:\n return f\"Congratulations! You guessed it in {tries} tries.\"\n \nstart()\n\n\n# In[ ]:\n\n\nwhile expression: target = true_expression if test_expression else false_expression\n\n\n# In[ ]:\n\n\nwhile True: your_guess = my_number\n\n\n# In[ ]:\n\n\n# my_number = int(input(\"What's the number?\"))\n\ndef start():\n my_number = 5\n your_guess = int(input(\"I am thinking of a number from 1-10. Can you find it? You have 5 tries. Guess what is it\"))\n while True: my_number = your_guess if not \"Nope. It's higher than that. Try again.\" else \"Nope. It's lower than that. Try again.\"\n \nstart()\n\n\n# In[ ]:\n\n\ndef value(nums):\n for items in nums:\n print(nums, end=\"\")\n \nvalue([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n\n# In[ ]:\n\n\norders = [\n {\n 'id': 'order_001',\n 'item': 'Introduction to Python', \n 'quantity': 1, \n 'price_per_item': 32\n }, \n {\n 'id': 'order_002',\n 'item': 'Advanced Python', \n 'quantity': 3, \n 'price_per_item': 40\n \n },\n {\n 'id': 'order_003', \n 'item': 'Python web frameworks', \n 'quantity': 2, \n 'price_per_item': 51\n }\n]\n\nprint(orders(0))\n\n\n# In[ ]:\n\n\n# Create a set comprehension for all x in 0...9 for the function f(x)=x*x\nn = 10\nf = {x: x*2 for x in range(n)}\n\n# Display the result\nprint(f)\n\n","sub_path":"Week 1/Day 2/Practice Sheet 2.py","file_name":"Practice Sheet 2.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"431147718","text":"#!/Users/rlaney/.virtualenvs/NetEngineerONE/bin/python\n\nfrom __future__ import absolute_import, division, print_function\n\nimport netmiko\nimport paramiko\nimport json\nimport mytools\nimport os\nimport sys\nimport signal\n#from trigger.netdevices import NetDevices\n\nsignal.signal(signal.SIGPIPE, signal.SIG_DFL) # IOError: Broken pipe\nsignal.signal(signal.SIGINT, signal.SIG_DFL) # KeyboardInterrupt: Ctrl-C\n\n\n#if len(sys.argv) < 3:\n# print('Usage: cmdrunner.py commands.txt devices.json')\n# exit()\n\nnetmiko_exceptions = (netmiko.ssh_exception.NetMikoTimeoutException,\n netmiko.ssh_exception.NetMikoAuthenticationException,\n paramiko.ssh_exception.SSHException, ValueError,\n KeyError, IOError)\n\nwith open('nodes.json') as device_file:\n all_devices = json.load(device_file)\n mydevices = mytools.byteify(all_devices)\n\n''' Get all the IOS and IOS-XE devices with duplicates removed '''\nios_devices = []\n''' Get all the NXOS devices with duplicates removed '''\nnxos_devices = []\n''' Get all the Checkpoint devices with duplicates removed '''\ncp_devices = []\n''' Get all the Other devices with duplicates removed '''\not_devices = []\ner_devices = []\n\nfor dev in mydevices:\n try:\n ''' Define what we want to keep '''\n #node = (dev['name'], dev['host'], dev['deviceType'], dev['os'])\n if dev['os'].__contains__('IOS'):\n ios_devices.append(dev)\n elif dev['os'] == 'NXOS':\n nxos_devices.append(dev)\n elif dev['os'].__contains__('Linux' or 'Checkpoint'):\n cp_devices.append(dev)\n else:\n ot_devices.append(dev)\n except KeyError:\n #try:\n # busted_node = (dev['name'], dev['host'], dev['deviceType'], 'UNKNOWN')\n #except:\n # busted_node = (dev['name'], dev['host'], 'UNKNOWN', 'UNKNOWN')\n print('Device {} is missing os key'.format(dev['name']))\n er_devices.append(dev)\n\nwith open('ios_devices.txt', 'w') as ios_dev_file:\n json.dump(ios_devices, ios_dev_file, indent=4)\n\nwith open('nxos_devices.txt', 'w') as nxos_dev_file:\n json.dump(nxos_devices, ios_dev_file, indent=4)\n\nwith open('cp_devices.txt', 'w') as cp_dev_file:\n json.dump(cp_devices, ios_dev_file, indent=4)\n\nwith open('ot_devices.txt', 'w') as ot_dev_file:\n json.dump(ot_devices, ios_dev_file, indent=4)\n\nwith open('er_devices.txt', 'w') as er_dev_file:\n json.dump(er_devices, ios_dev_file, indent=4)\n\n\n'''\ndump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, sort_keys=False, **kw)\n Serialize ``obj`` as a JSON formatted stream to ``fp`` (a\n ``.write()``-supporting file-like object).\n\ndumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, encoding='utf-8', default=None, sort_keys=False, **kw)\n Serialize ``obj`` to a JSON formatted ``str``.\n'''\n\n\nwith open('ios_cmds.txt') as ios_file:\n ios_commands = ios_file.readlines()\n\nwith open('nxos_cmds.txt') as nxos_file:\n nxos_commands = nxos_file.readlines()\n\n\nchange_number = 'CHG0201835'\nuser = 'rlaney'\n#username = 'etnoc'\n#password = 'circlebackaround'\nusername = 'opmantek'\npassword = 'N0Data4U!'\n#username, password = mytools.get_creds()\n\n\nlog_file = open('log_file.txt', 'w')\nelog_file = open('error_log_IOS.txt', 'w')\nnlog_file = open('error_log_NXOS.txt', 'w')\n\nlog_file.write('Total sanitized IOS devices: {} \\n'.format(len(ios_devices)))\nlog_file.write('~'*79 + '\\n\\n')\n\n\nfor device in ios_devices:\n try:\n print('Connecting to IOS {} with IP {} \\n'.format(device['name'],\n device['host']))\n log_file.write('Connecting to IOS {} with IP {} \\n'.format(\n device['name'], device['host']))\n connection = netmiko.ConnectHandler(device_type='cisco_ios',\n ip=device['host'],\n username=username,\n password=password,\n global_delay_factor=2)\n print('IOS Device: ' + connection.base_prompt + '\\n')\n log_file.write('IOS Device: ' + connection.base_prompt + '\\n')\n connection.send_config_set(ios_commands)\n connection.send_command('write memory')\n connection.send_command('send log 6 \"{} has finnished change control: {}\"'.format(user, change_number))\n connection.disconnect()\n log_file.write('~'*79 + '\\n\\n')\n print('~'*79 + '\\n\\n')\n except netmiko_exceptions as e:\n elog_file.write('Failed on {} with IP: {} \\n'.format(\n device['name'], device['host']))\n elog_file.write('Error: {} \\n'.format(e))\n elog_file.write('~'*79 + '\\n\\n')\n\n print('Failed on {} with IP: {} \\n'.format(device['name'], device['host']))\n print('Error: {} \\n'.format(e))\n print('~'*79 + '\\n')\n\nlog_file.write('Total NXOS devices unsanitized: {} \\n'.format(len(nxos_devices)))\nlog_file.write('Total sanitized NXOS devices: {} \\n'.format(len(nxos_list)))\nlog_file.write('~'*79 + '\\n\\n')\nfor device in nxos_devices:\n try:\n print('Connecting to NXOS {} with IP {} \\n'.format(device['name'],\n device['host']))\n log_file.write('Connecting to NXOS {} with IP {} \\n'.format(\n device['name'], device['host']))\n connection = netmiko.ConnectHandler(device_type='cisco_nxos',\n ip=device['host'],\n username=username,\n password=password,\n global_delay_factor=2)\n print('NXOS Device: ' + connection.base_prompt + '\\n')\n log_file.write('NXOS Device: ' + connection.base_prompt + '\\n')\n connection.send_config_set(nxos_commands)\n connection.send_command('copy run start')\n connection.send_command('send log 6 \"{} has finnished change control: {}\"'.format(user, change_number))\n connection.disconnect()\n log_file.write('~'*79 + '\\n\\n')\n print('~'*79 + '\\n')\n except netmiko_exceptions as n:\n nlog_file.write('Failed on {} with IP: {} \\n'.format(\n device['name'], device['host']))\n nlog_file.write('Error: {} \\n'.format(n))\n nlog_file.write('~'*79 + '\\n\\n')\n\n print('Failed on {} with IP: {} \\n'.format(device['name'],\n device['host']))\n print('Error: {} \\n'.format(e))\n print('~'*79 + '\\n')\n\n\nlog_file.close()\nelog_file.close()\nnlog_file.close()\n","sub_path":"scripts/nxos-ios-runner.py","file_name":"nxos-ios-runner.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"320988210","text":"import abc\nimport re\nimport socket\nimport jframework.extras.writeformat as wf\n\n\n#Basic module\nclass Module(metaclass=abc.ABCMeta):\n\n def __init__(self):\n self.host = \"127.0.0.1\"\n\n @abc.abstractmethod\n def run(self):\n pass\n\n def get_options(self):\n return [\"ip\"]\n\n def help(self):\n print(\"\")\n print(\"______ MODULE _______\")\n print(\"back -> Remove loaded module\")\n print(\"run -> Execute the module\")\n print(\"put

    Recipes\"\nprint >>fp, \"\"\"\n

    \nInventory\n

    \n\n

    \nLiquor Types\n

    \n\n

    \nAdd Bottle Type\n

    \n\n\"\"\"\nfp.close()\n\n###############################################################\n#Recipes\n#Reference: github.com/ctb/cse491-linkz\n###############################################################\nfp = open('html/recipes.html', 'w')\n\nprint >>fp, \"Recipes

    \"\nprint >>fp, \"\"\" \n\n\n\n\"\n\n\"\"\"\n#For every recipe in the database\nfor r in db._recipe_db:\n possible = 'No'\n if len(r.need_ingredients()) == 0:\n possible = 'Yes'\n\n #Display result\n print >> fp, \"\" % (r.Name, r.Ingredient, possible)\n\nprint >>fp, \"\"\"\n
    Recipe NameIngredientsEnough ingredients?
    %s %s %s
    \n\n\n\nLink to the other three files:\n

    Back to Index\n

    \n

    Inventory\n

    \n

    Liquor Types\n

    \n\"\"\"\nfp.close()\n\n#############################################################\n#Inventory\n##############################################################\nfp = open('html/inventory.html', 'w')\n\nprint >>fp, \"Inventory

    \"\nprint >>fp, \"\"\" \n\n\n\n\"\n\n\"\"\"\nfor mfg, liquor in db.get_liquor_inventory():\n #Get the amount in ml \n amt = db.get_liquor_amount(mfg,liquor)\n amount = str(amt) + ' ml'\n print >> fp, \"\" % (mfg, liquor, amount)\n\nprint >>fp, \"\"\"\n
    ManufacturerLiquor TypeAmount
    %s %s %s
    \n\n\n\n\nLink to the other three files:\n

    Back to Index\n

    \n

    Recipes\n

    \n

    Liquor Types\n

    \n\"\"\"\nfp.close()\n\n#############################################################\n#liquor_types\n#Reference: github.com/ctb/cse491-linkz\n#############################################################\nfp = open('html/liquor_types.html', 'w')\n\nprint >>fp, \"Liquor Types

    \"\nprint >>fp, \"\"\"\n\n\n\n\"\n\n\"\"\"\nfor (mfg, liquor, type) in db._bottle_types_db:\n print >> fp, \"\" % (mfg, liquor, type)\n\nprint >>fp, \"\"\"\n
    ManufacturerLiquorType
    %s %s %s
    \n\n\n\nLink to the other three files:\n

    Back to Index\n

    \n

    Recipes\n

    \n

    Inventory\n

    \n\"\"\"\nfp.close()\n\n\n#############################################################\n#Add_bottle_type\n#############################################################\nfp = open('html/Add_bottle_type.html', 'w')\n\nprint >>fp, \"Add Bottle Type

    \"\nprint >>fp, \"\"\"\n
    \n Manufacturer? \n Liquor? \n Type? \n\n
    \n\"\"\"\nfp.close()\n\n\n\n","sub_path":"make-html.py","file_name":"make-html.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"517585719","text":"import scrapy\n\n\nclass SkytechSpider(scrapy.Spider):\n name = \"skytech_crawler\"\n\n start_urls = [\n 'http://www.skytech.lt/kompiuteriu-komponentai-procesoriai-cpu-c-86_85_584.html?f=s(),g(),p(3530,3509,3489),k(35.19,2238.00)&frag=&fragd=0&pav=undefined&sort=5a&sand=0&grp=1&pagesize=100&page=1'\n ]\n\n base_url = 'http://www.skytech.lt/'\n\n def parse(self, response):\n links = response.xpath(\"//td[@class='name']/a/@href\").extract()\n for link in links:\n absolute_url = self.base_url + link\n yield response.follow(absolute_url, callback=self.parse_info)\n\n def parse_info(self, response):\n all_info = response.xpath(\".//tr/td[@class='param-name']/text()\").extract()\n all_spec = response.xpath(\".//tr/td[@class='param-value']/text()\").extract()\n \n for info, spec in zip(all_info, all_spec):\n yield {\n info.strip(): spec.strip()\n }\n ","sub_path":"ProjectScrape/ProjectScrape/spiders/skytech_crawler.py","file_name":"skytech_crawler.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"242825472","text":"#coding=utf-8\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom compute.models import *\nfrom network.models import *\nfrom storage.models import *\nfrom image.models import *\nfrom ..vm import get, get_list, create, status, op, edit, migrate\nfrom .testsettings import *\n\nimport subprocess, os, libvirt, time\n\nfrom .tools import create_user, create_superuser\nfrom .tools import create_center, create_group, create_host\nfrom .tools import create_vlantype, create_vlan, create_ip\nfrom .tools import create_ceph_host, create_ceph_image_pool\nfrom .tools import create_imagetype, create_image, create_xml\n\nclass VmTest(TestCase):\n vcpu = 2\n mem = 2048\n def setUp(self):\n self.u1 = create_user('apiuser1')\n self.u2 = create_user('apiuser2')\n self.u3 = create_superuser('superuser')\n self.u4 = create_user('apiuser4')\n\n self.c1 = create_center('1', '1', '1')\n\n self.g1 = create_group(self.c1, '1', '1', [self.u1, self.u4])\n \n self.vt1 = create_vlantype('vlantype1')\n \n self.v1 = create_vlan(str(TEST_VLAN), str(TEST_BR), self.vt1)\n \n self.ip1 = create_ip(self.v1, TEST_MAC, TEST_IP)\n \n self.h1 = create_host(self.g1, str(TEST_HOST), True, [self.v1])\n\n self.ch1 = create_ceph_host(self.c1, str(TEST_CEPH['host']), TEST_CEPH['port'], str(TEST_CEPH['uuid']))\n\n self.cp1 = create_ceph_image_pool(self.ch1, TEST_CEPH['pool'])\n \n self.it1 = create_imagetype('imagetype1')\n \n self.x1 = create_xml('linux', TEST_XML)\n \n self.i1 = create_image(self.cp1, self.x1, self.it1, 'image1', 'v0.1', TEST_IMAGE)\n\n \n# def tearDown(self):\n# # print 'tear down ========================================'\n# if self.vm_uuid:\n# if self._vm_exist(self.h1.ipv4, self.vm_uuid):\n# cmd = 'ssh %s virsh destroy %s' % (self.h1.ipv4, self.vm_uuid)\n# r, info = subprocess.getstatusoutput(cmd)\n# # if r != 0:\n# # os.system('ssh %s virsh destroy %s' % (self.h1.ipv4, self.vm_uuid))\n# # print info\n \n# cmd = 'ssh %s virsh undefine %s' % (self.h1.ipv4, self.vm_uuid)\n# r, info = subprocess.getstatusoutput(cmd)\n# if r != 0:\n# os.system('ssh %s virsh undefine %s' % (self.h1.ipv4, self.vm_uuid))\n# # print info\n \n# if not self.vm_disk:\n# self.vm_disk = 'test_'+self.vm_uuid\n \n# if self.vm_disk:\n# cmd = 'ssh %s rbd ls %s | grep x_%s' % (self.cp1.host.host, self.cp1.pool, self.vm_disk)\n# r, info = subprocess.getstatusoutput(cmd)\n# if r == 0:\n# cmd1 = 'ssh %s rbd rm %s/x_%s' % (self.cp1.host.host, self.cp1.pool, self.vm_disk)\n# r1, info1 = subprocess.getstatusoutput(cmd1)\n# if r1 != 0:\n# os.system('ssh %s rbd rm %s/x_%s' % (self.cp1.host.host, self.cp1.pool, self.vm_disk))\n# # print info1\n \n# cmd = 'ssh %s rbd ls %s | grep %s' % (self.cp1.host.host, self.cp1.pool, self.vm_disk)\n# r, info = subprocess.getstatusoutput(cmd)\n# if r == 0:\n# cmd1 = 'ssh %s rbd rm %s/%s' % (self.cp1.host.host, self.cp1.pool, self.vm_disk)\n# r1, info1 = subprocess.getstatusoutput(cmd1)\n# if r1 != 0:\n# os.system('ssh %s rbd rm %s/%s' % (self.cp1.host.host, self.cp1.pool, self.vm_disk))\n# # print info1\n \n# # print 'finished tear down ================================'\n \n def test_create_vm_err_args(self):\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem}\n exp = {'res': False}\n res = create(req)\n if res['res']:\n self.assertTrue(self._vm_exist(self.h1.ipv4, res['uuid']))\n self._del_vm(self.h1.ipv4, res['uuid'])\n self._del_ceph(self.cp1, res['uuid'])\n self.assertDictContainsSubset(exp, res)\n\n def test_create_vm_err_args1(self):\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, 'net_type_id': self.vt1.code}\n exp = {'res': False}\n res = create(req)\n if res['res']:\n self.assertTrue(self._vm_exist(self.h1.ipv4, res['uuid']))\n self._del_vm(self.h1.ipv4, res['uuid'])\n self._del_ceph(self.cp1, res['uuid'])\n self.assertDictContainsSubset(exp, res)\n\n def test_create_vm_err_args2(self):\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, 'group_id': self.g1.id}\n exp = {'res': False}\n res = create(req)\n if res['res']:\n self.assertTrue(self._vm_exist(self.h1.ipv4, res['uuid']))\n self._del_vm(self.h1.ipv4, res['uuid'])\n self._del_ceph(self.cp1, res['uuid'])\n self.assertDictContainsSubset(exp, res)\n\n def test_create_vm_nettype(self):\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, \n 'net_type_id': self.vt1.code, 'group_id': self.g1.id}\n exp = {'res': True}\n res = create(req)\n if res['res']:\n self.assertTrue(self._vm_exist(self.h1.ipv4, res['uuid']))\n self._del_vm(self.h1.ipv4, res['uuid'])\n self._del_ceph(self.cp1, res['uuid'])\n self.assertDictContainsSubset(exp, res) \n\n def test_create_vm_nettype1(self):\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, \n 'net_type_id': self.vt1.code, 'host_id': self.h1.id}\n exp = {'res': True}\n res = create(req)\n if res['res']:\n self.assertTrue(self._vm_exist(self.h1.ipv4, res['uuid']))\n self._del_vm(self.h1.ipv4, res['uuid'])\n self._del_ceph(self.cp1, res['uuid'])\n self.assertDictContainsSubset(exp, res) \n \n def test_create_vm_vlan(self):\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, \n 'vlan_id': self.v1.id, 'group_id': self.g1.id}\n exp = {'res': True}\n res = create(req)\n if res['res']:\n self.assertTrue(self._vm_exist(self.h1.ipv4, res['uuid']))\n self._del_vm(self.h1.ipv4, res['uuid'])\n self._del_ceph(self.cp1, res['uuid'])\n self.assertDictContainsSubset(exp, res) \n\n def test_create_vm_vlan1(self):\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, \n 'vlan_id': self.v1.id, 'host_id': self.h1.id}\n exp = {'res': True}\n res = create(req)\n if res['res']:\n self.assertTrue(self._vm_exist(self.h1.ipv4, res['uuid']))\n self._del_vm(self.h1.ipv4, res['uuid'])\n self._del_ceph(self.cp1, res['uuid'])\n self.assertDictContainsSubset(exp, res) \n\n def test_create_get_vm(self):\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, \n 'net_type_id': self.vt1.code, 'group_id': self.g1.id, 'remarks': 'test11122333'}\n exp = {'res': True}\n res = create(req)\n if res['res']:\n #正常获取vm信息\n req1 = {'req_user': self.u1, 'uuid': res['uuid']}\n exp1 = {'res': True, 'info':{\n 'uuid': res['uuid'],\n 'name': res['uuid'], \n 'vcpu': self.vcpu ,\n 'mem': self.mem ,\n 'creator': self.u1.username, \n 'remarks': req['remarks'],\n 'image_id': self.i1.id,\n 'image_snap': self.i1.snap,\n 'image': self.i1.fullname,\n 'host_id': self.h1.id,\n 'host_ipv4': self.h1.ipv4,\n 'group_id': self.g1.id,\n 'group_name': self.g1.name,\n 'center_id': self.c1.id,\n 'center_name': self.c1.name, \n \n 'vlan_id': self.v1.id,\n 'vlan_name': self.v1.vlan,\n 'mac': self.ip1.mac,\n 'ipv4': self.ip1.ipv4,\n \n 'ceph_id': self.cp1.id,\n 'ceph_host': self.ch1.host,\n 'ceph_pool': self.cp1.pool\n }}\n res1 = get(req1)\n self.assertTrue(res1['res'])\n if res1['res']:\n self.assertDictContainsSubset(exp1['info'], res1['info'])\n\n #越权获取vm信息\n req3 = {'req_user': self.u2, 'uuid': res['uuid']}\n exp3 = {'res': False}\n res3 = get(req3)\n self.assertDictContainsSubset(exp3, res3)\n\n\n self.assertTrue(self._vm_exist(self.h1.ipv4, res['uuid']))\n self._del_vm(self.h1.ipv4, res['uuid'])\n self._del_ceph(self.cp1, res['uuid'])\n self.assertDictContainsSubset(exp, res) \n \n\n def test_create_get_vmlist(self):\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, \n 'net_type_id': self.vt1.code, 'group_id': self.g1.id, 'remarks': 'test11122333'}\n exp = {'res': True}\n res = create(req)\n if res['res']:\n #正常获取vm列表\n req2 = {'req_user': self.u1, 'group_id': self.g1.id}\n exp2 = {'res': True, 'list':[\n {\n 'uuid': res['uuid'],\n 'name': res['uuid'], \n 'group_id': self.g1.id,\n 'group_name': self.g1.name,\n 'center_id': self.c1.id,\n 'center_name': self.c1.name, \n 'host_id': self.h1.id,\n 'host_ipv4': self.h1.ipv4,\n 'image_id': self.i1.id,\n 'image': self.i1.fullname,\n 'ipv4': self.ip1.ipv4,\n 'vcpu': self.vcpu ,\n 'mem': self.mem ,\n 'remarks': req['remarks'],\n }\n ]}\n res2 = get_list(req2)\n \n self.assertTrue(res2['res'])\n if res2['res']:\n self.assertEqual(len(res2['list']), len(exp2['list']))\n for i in range(len(exp2['list'])):\n self.assertDictContainsSubset(exp2['list'][i], res2['list'][i])\n\n #越权获取vm列表\n req4 = {'req_user': self.u2, 'group_id': self.g1.id}\n exp4 = {'res': False}\n res4 = get_list(req4)\n self.assertDictContainsSubset(exp4, res4)\n\n\n self.assertTrue(self._vm_exist(self.h1.ipv4, res['uuid']))\n self._del_vm(self.h1.ipv4, res['uuid'])\n self._del_ceph(self.cp1, res['uuid'])\n self.assertDictContainsSubset(exp, res) \n\n def test_create_delete_vm_db(self):\n host_pre = Host.objects.get(id = self.h1.id)\n ip_pre = MacIP.objects.get(id = self.ip1.id)\n\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, \n 'net_type_id': self.vt1.code, 'group_id': self.g1.id}\n exp = {'res': True}\n res = create(req)\n if res['res']:\n host_aft = Host.objects.get(id = self.h1.id)\n ip_aft = MacIP.objects.get(id = self.ip1.id)\n self.assertEqual(host_pre.vcpu_allocated + self.vcpu, host_aft.vcpu_allocated)\n self.assertEqual(host_pre.mem_allocated + self.mem, host_aft.mem_allocated)\n self.assertEqual(host_pre.vm_created + 1, host_aft.vm_created)\n\n self.assertEqual(ip_aft.vmid, res['uuid'])\n\n req1 = {'req_user': self.u1, 'uuid': res['uuid'], 'op': 'delete'}\n exp1 = {'res': True}\n res1 = op(req1)\n self.assertDictContainsSubset(exp1, res1)\n if res1['res']:\n host_del = Host.objects.get(id = self.h1.id)\n ip_del = MacIP.objects.get(id = self.ip1.id)\n self.assertEqual(host_del.vcpu_allocated, host_aft.vcpu_allocated - self.vcpu)\n self.assertEqual(host_del.mem_allocated, host_aft.mem_allocated - self.mem)\n self.assertEqual(host_del.vm_created, host_aft.vm_created - 1)\n self.assertEqual(ip_del.vmid, '')\n\n # domain 是否删除\n self.assertFalse(self._vm_exist(self.h1.ipv4, res['uuid']))\n \n # 虚拟机记录是否删除\n vmobj = Vm.objects.filter(uuid = res['uuid'])\n self.assertFalse(vmobj.exists())\n \n # 归档记录是否添加\n vmarc = VmArchive.objects.filter(uuid = res['uuid'])\n self.assertTrue(vmarc.exists())\n self.assertTrue(vmarc.count() == 1)\n \n # 归档记录是否正确\n if vmarc.count() == 1:\n vmarc = vmarc[0]\n self.assertTrue(vmarc.center_id == self.c1.id)\n self.assertTrue(vmarc.center_name == self.c1.name)\n self.assertTrue(vmarc.group_id == self.g1.id)\n self.assertTrue(vmarc.group_name == self.g1.name)\n self.assertTrue(vmarc.host_id == self.h1.id)\n self.assertTrue(vmarc.host_ipv4 == self.h1.ipv4)\n self.assertTrue(vmarc.ceph_host == self.cp1.host.host)\n self.assertTrue(vmarc.ceph_pool == self.cp1.pool)\n self.assertTrue(vmarc.image_id == self.i1.id)\n self.assertTrue(vmarc.image_snap == self.i1.snap)\n self.assertTrue(vmarc.name == res['uuid'])\n self.assertTrue(vmarc.uuid == res['uuid'])\n self.assertTrue(vmarc.vcpu == self.vcpu)\n self.assertTrue(vmarc.mem == self.mem)\n self.assertTrue(vmarc.disk[2:-21] == res['uuid'])\n self.assertTrue(vmarc.mac == self.ip1.mac)\n self.assertTrue(vmarc.ipv4 == self.ip1.ipv4)\n self.assertTrue(vmarc.vlan == self.v1.vlan)\n self.assertTrue(vmarc.br == self.v1.br)\n\n self._teardownvm(res['uuid'])\n self.assertDictContainsSubset(exp, res) \n\n def test_vm_op_perm(self):\n self.assertTrue(self._host_alive(self.ip1) == False)\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, \n 'net_type_id': self.vt1.code, 'group_id': self.g1.id}\n exp = {'res': True}\n res = create(req)\n if res['res']:\n #自己\n req1 = {'req_user': self.u1, 'uuid': res['uuid'], 'op': 'reboot'}\n exp1 = {'res': True}\n res1 = op(req1)\n self.assertDictContainsSubset(exp1, res1)\n\n #同集群其他管理员\n req1 = {'req_user': self.u4, 'uuid': res['uuid'], 'op': 'reboot'}\n exp1 = {'res': False}\n res1 = op(req1)\n self.assertDictContainsSubset(exp1, res1) \n\n #非该集群管理员\n req1 = {'req_user': self.u2, 'uuid': res['uuid'], 'op': 'reboot'}\n exp1 = {'res': False}\n res1 = op(req1)\n self.assertDictContainsSubset(exp1, res1)\n\n #超级管理员\n req1 = {'req_user': self.u3, 'uuid': res['uuid'], 'op': 'reboot'}\n exp1 = {'res': True}\n res1 = op(req1)\n self.assertDictContainsSubset(exp1, res1)\n\n self.assertTrue(self._vm_exist(self.h1.ipv4, res['uuid']))\n self._del_vm(self.h1.ipv4, res['uuid'])\n self._del_ceph(self.cp1, res['uuid'])\n self.assertDictContainsSubset(exp, res) \n\n\n def test_edit_shutdown_start(self):\n vm_uuid = self._setupvm()\n if not vm_uuid:\n return\n\n #修改备注测试\n vmobj = Vm.objects.get(uuid = vm_uuid)\n hostobj = Host.objects.get(id = self.h1.id)\n remarks = 'test123'\n req = {'req_user': self.u1, 'uuid': vmobj.uuid, 'remarks': remarks}\n exp = {'res': True}\n res = edit(req)\n self.assertDictEqual(exp, res)\n self._assert_host(hostobj)\n vmobj.remarks = remarks\n self._assert_vm(vmobj)\n \n #运行状态修改\n vmobj = Vm.objects.get(uuid = vm_uuid)\n hostobj = Host.objects.get(id = self.h1.id)\n vcpu = 3\n mem = 3000\n req1 = {'req_user': self.u1, 'uuid': vmobj.uuid, 'vcpu': vcpu, 'mem': mem}\n exp1 = {'res': False}\n res1 = edit(req1)\n self.assertDictContainsSubset(exp1, res1)\n self._assert_host(hostobj)\n self._assert_vm(vmobj)\n\n #poweroff操作测试\n req11 = {'req_user': self.u1, 'uuid': vmobj.uuid, 'op':'poweroff'}\n exp11 = {'res': True}\n res11 = op(req11)\n self.assertDictEqual(exp11, res11)\n\n if res11['res']:\n #状态\n req6 = {'req_user': self.u1, 'uuid': vm_uuid}\n conn = libvirt.open(\"qemu+ssh://%s/system\" % self.h1.ipv4)\n domain = conn.lookupByUUIDString(vm_uuid)\n info = domain.info()\n exp6 = {'res': True, 'status': info[0]}\n res6 = status(req6)\n self.assertDictEqual(exp6, res6)\n\n #修改cpu测试\n vmobj = Vm.objects.get(uuid = vm_uuid)\n hostobj = Host.objects.get(id = self.h1.id)\n vcpu = 4\n req2 = {'req_user': self.u1, 'uuid': vmobj.uuid, 'vcpu': vcpu}\n exp2 = {'res': True}\n res2 = edit(req2)\n self.assertDictEqual(exp2, res2)\n\n hostobj.vcpu_allocated = hostobj.vcpu_allocated - vmobj.vcpu + vcpu\n self._assert_host(hostobj)\n \n vmobj.vcpu = vcpu\n self._assert_vm(vmobj)\n \n #修改mem测试\n vmobj = Vm.objects.get(uuid = vm_uuid)\n hostobj = Host.objects.get(id = self.h1.id)\n mem = 4096\n req3 = {'req_user': self.u1, 'uuid': vmobj.uuid, 'mem': mem}\n exp3 = {'res': True}\n res3 = edit(req3)\n self.assertDictEqual(exp3, res3)\n \n hostobj.mem_allocated = hostobj.mem_allocated - vmobj.mem + mem\n self._assert_host(hostobj)\n\n vmobj.mem = mem\n self._assert_vm(vmobj)\n\n #修改cpu和mem测试\n vmobj = Vm.objects.get(uuid = vm_uuid)\n hostobj = Host.objects.get(id = self.h1.id)\n vcpu = 3\n mem = 3000\n req4 = {'req_user': self.u1, 'uuid': vmobj.uuid, 'vcpu': vcpu, 'mem': mem}\n exp4 = {'res': True}\n res4 = edit(req4)\n self.assertDictEqual(exp4, res4)\n \n hostobj.vcpu_allocated = hostobj.vcpu_allocated - vmobj.vcpu + vcpu\n hostobj.mem_allocated = hostobj.mem_allocated - vmobj.mem + mem\n self._assert_host(hostobj)\n\n vmobj.vcpu = vcpu\n vmobj.mem = mem\n self._assert_vm(vmobj)\n\n #start操作测试\n req5 = {'req_user': self.u1, 'uuid': vmobj.uuid, 'op': 'start'}\n exp5 = {'res': True}\n res5 = op(req5)\n self.assertDictEqual(exp5, res5)\n\n \n\n self._teardownvm(vm_uuid)\n\n \n def test_op_reset(self):\n vm_uuid = self._setupvm()\n if not vm_uuid:\n return\n \n def get_disk_count(host, pool, disk):\n cmd = 'ssh %s rbd ls %s | grep ^%s | wc -l' % (host, pool, disk)\n r, info = subprocess.getstatusoutput(cmd)\n if r == 0:\n return int(info)\n return -1\n \n #非关机状态判断\n disk_count = get_disk_count(self.cp1.host.host, self.cp1.pool, vm_uuid)\n x_disk_count = get_disk_count(self.cp1.host.host, self.cp1.pool, 'x_'+vm_uuid)\n res1 = op({'req_user': self.u1, 'uuid': vm_uuid, 'op': 'reset'})\n exp1 = {'res': False}\n self.assertDictContainsSubset(exp1, res1)\n self.assertEqual(disk_count, get_disk_count(self.cp1.host.host, self.cp1.pool, vm_uuid))\n self.assertEqual(x_disk_count, get_disk_count(self.cp1.host.host, self.cp1.pool, 'x_'+vm_uuid))\n \n conn = libvirt.open(\"qemu+ssh://%s/system\" % self.h1.ipv4)\n domain = conn.lookupByUUIDString(vm_uuid)\n domain.destroy()\n \n #常规测试\n disk_count = get_disk_count(self.cp1.host.host, self.cp1.pool, vm_uuid)\n x_disk_count = get_disk_count(self.cp1.host.host, self.cp1.pool, 'x_'+vm_uuid)\n res2 = op({'req_user': self.u1, 'uuid': vm_uuid, 'op': 'reset'})\n exp2 = {'res': True}\n self.assertDictEqual(exp2, res2)\n self.assertEqual(disk_count, get_disk_count(self.cp1.host.host, self.cp1.pool, vm_uuid))\n self.assertEqual(x_disk_count + 1, get_disk_count(self.cp1.host.host, self.cp1.pool, 'x_'+vm_uuid))\n \n \n\n def _setupvm(self):\n req = {'req_user': self.u1, 'image_id': self.i1.id, 'vcpu': self.vcpu, 'mem': self.mem, \n 'net_type_id': self.vt1.code, 'group_id': self.g1.id}\n res = create(req)\n if res['res']:\n return res['uuid']\n return False\n\n def _teardownvm(self, vm_uuid):\n self._del_vm(self.h1.ipv4, vm_uuid)\n self._del_ceph(self.cp1, vm_uuid)\n\n def _host_alive(self, ipv4):\n cmd = 'fping %s -c %d' % (ipv4, 1)\n# print cmd\n res, info = subprocess.getstatusoutput(cmd)\n# print info\n if res == 0:\n return True\n return False\n \n def _disk_exist(self, cephpool, disk_uuid):\n cmd = 'ssh %s rbd ls %s | grep %s' % (cephpool.host.host, cephpool.pool, disk_uuid)\n r, info = subprocess.getstatusoutput(cmd)\n if r == 0:\n return True\n return False\n\n def _vm_exist(self, host_ip, vm_uuid):\n cmd = 'ssh %s virsh list --all | grep %s' % (host_ip, vm_uuid) \n r, info = subprocess.getstatusoutput(cmd)\n if r == 0:\n return True\n return False\n\n def _del_ceph(self, cephpool, disk_uuid):\n if self._disk_exist(cephpool, disk_uuid):\n try:\n cmd = 'ssh %s rbd rm %s/%s' % (cephpool.host.host, cephpool.pool, disk_uuid)\n r, info = subprocess.getstatusoutput(cmd)\n if r != 0:\n os.system('ssh %s rbd rm %s/%s' % (cephpool.host.host, cephpool.pool, disk_uuid))\n except:pass\n\n def _del_vm(self, host_ip, vm_uuid):\n if self._vm_exist(host_ip, vm_uuid):\n try:\n cmd = 'ssh %s virsh destroy %s' % (host_ip, vm_uuid)\n r, info = subprocess.getstatusoutput(cmd)\n if r != 0:\n os.system('ssh %s virsh destroy %s' % (host_ip, vm_uuid))\n # print info\n \n cmd = 'ssh %s virsh undefine %s' % (host_ip, vm_uuid)\n r, info = subprocess.getstatusoutput(cmd)\n if r != 0:\n os.system('ssh %s virsh undefine %s' % (host_ip, vm_uuid))\n # print info\n except:pass\n \n def _assert_vm(self, vm_tmp):\n vmobj = Vm.objects.get(id = vm_tmp.id)\n self.assertTrue(vmobj.vcpu == vm_tmp.vcpu)\n self.assertTrue(vmobj.mem == vm_tmp.mem)\n \n def _assert_host(self, host_tmp):\n hostobj = Host.objects.get(pk = host_tmp.pk)\n self.assertTrue(hostobj.vcpu_total == host_tmp.vcpu_total)\n self.assertTrue(hostobj.vcpu_allocated == host_tmp.vcpu_allocated)\n self.assertTrue(hostobj.mem_total == host_tmp.mem_total)\n self.assertTrue(hostobj.mem_allocated == host_tmp.mem_allocated)\n self.assertTrue(hostobj.mem_reserved == host_tmp.mem_reserved)\n self.assertTrue(hostobj.vm_limit == host_tmp.vm_limit)\n self.assertTrue(hostobj.vm_created == host_tmp.vm_created)\n","sub_path":"api/tests/test_vm.py","file_name":"test_vm.py","file_ext":"py","file_size_in_byte":25066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"377593637","text":"#For supporting Future-Host effect, Australia and United Kingdom's Medals count as been compared.\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\ncount1 = []\r\ncount2 = []\r\ndf = pd.read_csv('summer.csv')\r\nlis = df.Year\r\nyear = list(set(lis))\r\nyear.sort()\r\nfor i in year:\r\n\tframe = df[(df.Year == i) & (df.Country == 'AUS')]\r\n\tcount1.append(frame.Medal.count())\r\n\r\nfor i in year:\r\n\tframe = df[(df.Year == i) & (df.Country == 'GBR')]\r\n\tcount2.append(frame.Medal.count())\r\ndf2 = pd.DataFrame(count1,year)\r\ndf3 = pd.DataFrame(count2,year)\r\n\r\nplt.subplot(2,1,1)\r\nplt.plot(df2,'ro',df2,'k')\r\nplt.title('AUSTRALIA')\r\n\r\nplt.ylabel('Medals')\r\nplt.subplot(2,1,2)\r\nplt.plot(df3,'bo',df3,'k')\r\nplt.xlabel('YEARS')\r\nplt.ylabel('Medals')\r\nplt.title('UK')\r\nplt.tight_layout()\r\nplt.show()","sub_path":"171046026_N.ShivaGanesh/ausVSgbr.py","file_name":"ausVSgbr.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"571067768","text":"\"\"\"\r\nmnist_svm\r\n~~~~~~~~~\r\n\r\nA classifier program for recognizing handwritten digits from the MNIST\r\ndata set, using an SVM classifier and grid search for hyperparameter tuning.\"\"\"\r\n\r\nimport mnist_loader\r\nimport numpy as np\r\nimport random\r\nimport time\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.svm import SVC\r\n\r\nnp.random.seed(123456)\r\nrandom.seed(256)\r\n\r\n\r\ndef trim_data(training_data, validation_data, test_data, percentage=100):\r\n\r\n m_train = training_data[0].shape[0]\r\n ind_train = list(range(m_train))\r\n random.shuffle(ind_train)\r\n ind_train = ind_train[0:(percentage * m_train) // 100]\r\n\r\n m_val = validation_data[0].shape[0]\r\n ind_val = list(range(m_val))\r\n random.shuffle(ind_val)\r\n ind_val = ind_val[0:(percentage * m_val) // 100]\r\n\r\n m_test = test_data[0].shape[0]\r\n ind_test = list(range(m_test))\r\n random.shuffle(ind_test)\r\n ind_test = ind_test[0:(percentage * m_test) // 100]\r\n\r\n training_data = (training_data[0][ind_train, :],\r\n training_data[1][ind_train])\r\n validation_data = (validation_data[0][ind_val, :],\r\n validation_data[1][ind_val])\r\n test_data = (test_data[0][ind_test, :],\r\n test_data[1][ind_test])\r\n\r\n return training_data, validation_data, test_data\r\n\r\n\r\n# Set the percentage of data to be used\r\npercentage = 10\r\n\r\n# Load and trim the data\r\ntraining_data, validation_data, test_data = trim_data(\r\n *mnist_loader.load_data(), percentage=percentage)\r\n\r\n\r\n# Combine trainging + validation, rename to X and y\r\n\r\nX_train = np.vstack((training_data[0], validation_data[0]))\r\ny_train = np.concatenate((training_data[1], validation_data[1]))\r\n\r\nX_test = test_data[0]\r\ny_test = test_data[1]\r\n\r\n\r\nstart_time = time.time()\r\n\r\n\r\n# Train with pre-specified hyperparameters:\r\n# ---------------------------------------------------------------------------\r\n\r\n# clf = SVC(kernel='rbf', C=10, gamma=.024)\r\n# clf.fit(X_train, y_train)\r\n\r\n\r\n# Grid Search\r\n# ---------------------------------------------------------------------------\r\n\r\n# Set the grid to search from:\r\ntuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.022, 0.23, 0.024],\r\n 'C': [3, 10]}]\r\n\r\nscore = 'accuracy'\r\nprint(\"# Tuning hyper-parameters for %s\" % score)\r\nprint()\r\n\r\nclf = GridSearchCV(SVC(), tuned_parameters, cv=3,\r\n scoring=score, n_jobs=-1)\r\nclf.fit(X_train, y_train)\r\n\r\nprint(\"Best parameters set found on development set:\")\r\nprint()\r\nprint(clf.best_params_)\r\nprint()\r\nprint(\"Grid scores on development set:\")\r\nprint()\r\nmeans = clf.cv_results_['mean_test_score']\r\nstds = clf.cv_results_['std_test_score']\r\nfor mean, std, params in zip(means, stds, clf.cv_results_['params']):\r\n print(\"%0.3f (+/-%0.03f) for %r\"\r\n % (mean, std * 2, params))\r\nprint()\r\n\r\nprint(\"Detailed classification report:\")\r\nprint()\r\nprint(\"The model is trained on the full development set.\")\r\nprint(\"The scores are computed on the full evaluation set.\")\r\nprint()\r\ny_true, y_pred = y_test, clf.predict(X_test)\r\nprint(classification_report(y_true, y_pred))\r\nprint()\r\n\r\n\r\nend_time = time.time()\r\n\r\nprint(f'Total time elapsed: {end_time - start_time} seconds')\r\n\r\n\r\n\"\"\"RESULTS:\r\n\r\npercentage = 1\r\n-----------------------------------------------------------------------------\r\n\r\ntuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.018, 0.020, 0.022, 0.024, 0.026],\r\n 'C': [3, 10, 30, 100]}]\r\n\r\nBest parameters set found on development set:\r\n{'C': 3, 'gamma': 0.02, 'kernel': 'rbf'}\r\n\r\nDetailed classification report:\r\n\r\n precision recall f1-score support\r\n\r\n 0 1.00 1.00 1.00 11\r\n 1 1.00 1.00 1.00 10\r\n 2 0.83 0.83 0.83 12\r\n 3 0.92 0.92 0.92 13\r\n 4 0.92 1.00 0.96 11\r\n 5 1.00 0.80 0.89 5\r\n 6 1.00 1.00 1.00 9\r\n 7 0.70 0.78 0.74 9\r\n 8 1.00 0.90 0.95 10\r\n 9 1.00 1.00 1.00 10\r\n\r\n micro avg 0.93 0.93 0.93 100\r\n macro avg 0.94 0.92 0.93 100\r\nweighted avg 0.93 0.93 0.93 100\r\n\r\nOverall test accuracy: 93.0%\r\nTotal time elapsed: 24.52613377571106 seconds\r\n\r\n\r\npercentage = 10\r\n-----------------------------------------------------------------------------\r\n\r\ntuned_parameters = [{'kernel': ['rbf'], 'gamma': [0.020, 0.022, 0.024],\r\n 'C': [1, 3, 10]}]\r\n\r\nBest parameters set found on development set:\r\n{'C': 10, 'gamma': 0.024, 'kernel': 'rbf'}\r\n\r\nDetailed classification report:\r\n\r\n precision recall f1-score support\r\n\r\n 0 1.00 1.00 1.00 107\r\n 1 0.98 1.00 0.99 116\r\n 2 0.95 0.94 0.95 108\r\n 3 0.95 0.95 0.95 111\r\n 4 0.97 0.97 0.97 96\r\n 5 0.96 0.95 0.96 85\r\n 6 0.99 1.00 0.99 79\r\n 7 0.93 0.96 0.95 110\r\n 8 0.96 0.98 0.97 82\r\n 9 0.97 0.91 0.94 106\r\n\r\n micro avg 0.97 0.97 0.97 1000\r\n macro avg 0.97 0.97 0.97 1000\r\nweighted avg 0.97 0.97 0.97 1000\r\n\r\nOverall test accuracy: 96.6%\r\nTotal time elapsed: 477.4482045173645 seconds\r\n\r\n\r\npercentage = 100\r\n-----------------------------------------------------------------------------\r\n\r\nTrained on the parameters chosen by grid search on small data:\r\n{'C': 10, 'gamma': 0.024, 'kernel': 'rbf'}\r\n\r\nDetailed classification report:\r\n\r\n precision recall f1-score support\r\n\r\n 0 0.98 0.99 0.99 980\r\n 1 0.99 0.99 0.99 1135\r\n 2 0.98 0.98 0.98 1032\r\n 3 0.98 0.99 0.98 1010\r\n 4 0.99 0.98 0.99 982\r\n 5 0.99 0.98 0.98 892\r\n 6 0.99 0.99 0.99 958\r\n 7 0.98 0.98 0.98 1028\r\n 8 0.98 0.98 0.98 974\r\n 9 0.98 0.98 0.98 1009\r\n\r\n micro avg 0.99 0.99 0.99 10000\r\n macro avg 0.99 0.99 0.99 10000\r\nweighted avg 0.99 0.99 0.99 10000\r\n\r\nOverall test accuracy: 98.56%\r\nTotal time elapsed: 543.1020934581757 seconds\r\n\r\n\"\"\"\r\n","sub_path":"src/low-data-SVM.py","file_name":"low-data-SVM.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"434180669","text":"from __future__ import print_function\nimport argparse\n\nfrom . import BarCodeReader, BarCode\n\np = argparse.ArgumentParser()\np.add_argument('-P','--classpath')\np.add_argument('-J','--java')\np.add_argument('--try-harder', action='store_true')\np.add_argument('image', nargs='+')\nargs = p.parse_args()\n\nbcr = BarCodeReader(args.classpath, args.java)\n\nfor fn in args.image:\n print(\"%s\\n%s\" % (fn, '='*len(fn)))\n bc = bcr.decode(fn, try_harder=args.try_harder)\n if bc is None:\n print(\" ERROR: Failed to decode barcode.\")\n else:\n print(\" Decoded %s barcode in %s format.\" % (bc.type, bc.format))\n print(\" Raw text: %r\" % bc.raw)\n print(\" Parsed text: %r\\n\" % bc.parsed)\n","sub_path":"zxing/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"313531186","text":"#\n# Time Complexity = O(log(n)) where n is the total number of characters\n# Space Complexity = O(1)\n# Works on Leetcode https://leetcode.com/problems/find-peak-element/\n#\nclass Solution:\n def findPeakElement(self, nums: List[int]) -> int:\n if len(nums) == 1:\n return 0\n else:\n if nums[1] < nums[0]:\n return 0\n if nums[len(nums)-1] > nums[len(nums)-2]:\n return len(nums)-1\n l = 1\n r = len(nums)-2\n while(l <= r):\n mid = l+(r-l)//2\n if nums[mid] > nums[mid-1] and nums[mid] > nums[mid+1]:\n return mid\n elif nums[mid] < nums[mid-1]:\n r = mid-1\n else:\n l = mid+1\n","sub_path":"Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"350770203","text":"#!/usr/bin/env python\n#\n# NOTE: when testing, use \"pip install ... --upgrade\"\n\n__author__ = \"Graham Klyne (GK@ACM.ORG)\"\n__copyright__ = \"Copyright 2011-2013, University of Oxford\"\n__license__ = \"MIT (http://opensource.org/licenses/MIT)\"\n\n# Setup.py based on https://github.com/paltman/python-setup-template/blob/master/setup.py,\n# following http://www.ibm.com/developerworks/opensource/library/os-pythonpackaging/index.html\n#\n# These could be useful:\n# https://wiki.python.org/moin/Distutils/Tutorial\n# https://pypi.python.org/pypi/check-manifest\n\n\nimport codecs\nimport os\nimport sys\n\nfrom distutils.util import convert_path\nfrom fnmatch import fnmatchcase\nfrom setuptools import setup, find_packages\nfrom pip.req import parse_requirements # See: http://stackoverflow.com/questions/14399534/\n\nif sys.version_info[:2] != (2,7):\n raise AssertionError(\"Annalist requires Python 2.7 (found Python %s.%s)\"%sys.version_info[:2])\n\ndir_here = os.path.dirname(__file__)\nsys.path.insert(0, os.path.join(dir_here, \"annalist_root\"))\n\n# Helper to load README.md, etc.\ndef read(fname):\n return codecs.open(os.path.join(dir_here, fname)).read()\n\nPACKAGE = \"annalist\"\nPACKAGE_MODULE = __import__(PACKAGE, globals(), locals(), ['__version__', '__author__'])\nVERSION = PACKAGE_MODULE.__version__\nAUTHOR = PACKAGE_MODULE.__author__\nAUTHOR_EMAIL = \"gk-pypi@ninebynine.org\"\nNAME = \"Annalist\"\nDESCRIPTION = \"Annalist linked data notebook\"\nURL = \"https://github.com/gklyne/annalist\"\n\nsetup(\n name = NAME,\n version = VERSION,\n description = DESCRIPTION,\n long_description = read(\"README.md\"),\n author = AUTHOR,\n author_email = AUTHOR_EMAIL,\n license = \"MIT\",\n url = URL,\n packages = \n [ 'annalist_root'\n , 'annalist_root.annalist'\n , 'annalist_root.annalist.models'\n , 'annalist_root.annalist.views'\n , 'annalist_root.annalist.views.fields'\n , 'annalist_root.annalist.views.form_utils'\n , 'annalist_root.annalist.tests'\n , 'annalist_root.annalist_site'\n , 'annalist_root.annalist_site.settings'\n , 'annalist_root.annalist_manager'\n , 'annalist_root.utils'\n , 'annalist_root.oauth2'\n , 'annalist_root.miscutils'\n ],\n package_dir = \n { 'annalist_root': 'annalist_root'\n # , 'annalist': 'annalist_root/annalist'\n # , 'annalist_site': 'annalist_root/annalist_site'\n # , 'utils': 'annalist_root/annalist'\n # , 'oauth2': 'annalist_root/utils'\n # , 'miscutils': 'annalist_root/miscutils'\n },\n # >>>> REMEMBER to also update MANIFEST.in ... <<<<\n package_data = \n { 'annalist_root':\n [ '*.sh', '*.txt'\n , 'sampledata/README.md'\n , 'sampledata/init/annalist_site/README.md'\n , 'sampledata/init/annalist_site/_annalist_site/*.jsonld'\n , 'sampledata/init/annalist_site/c/*/_annalist_collection/*.jsonld'\n , 'sampledata/init/annalist_site/c/*/_annalist_collection/lists/*/*.jsonld'\n , 'sampledata/init/annalist_site/c/*/_annalist_collection/types/*/*.jsonld'\n , 'sampledata/init/annalist_site/c/*/_annalist_collection/views/*/*.jsonld'\n , 'sampledata/init/annalist_site/c/*/d/*/*/*.jsonld'\n , 'sampledata/empty/annalist_site/README.md'\n , 'sampledata/empty/annalist_site/_annalist_site/*.jsonld'\n , 'sampledata/empty/annalist_site/c/README.md'\n ]\n , 'annalist_root.annalist':\n [ 'templates/*.html'\n , 'templates/field/*.html'\n , 'static/css/*.css'\n , 'static/js/*.js'\n , 'static/images/*.png'\n , 'static/images/icons/warning_32.png'\n , 'static/images/icons/search_32.png'\n , 'static/foundation/css/*.css'\n # , 'static/foundation/img/*.png'\n , 'static/foundation/js/foundation/*.js'\n , 'static/foundation/js/vendor/*.js'\n , 'sitedata/enums/*/*/*.jsonld'\n , 'sitedata/fields/*/*.jsonld'\n , 'sitedata/groups/*/*.jsonld'\n , 'sitedata/lists/*/*.jsonld'\n , 'sitedata/types/*/*.jsonld'\n , 'sitedata/views/*/*.jsonld'\n , 'sitedata/users/*/*.jsonld'\n ]\n , 'annalist_root.annalist.views':\n [ 'help/*.md'\n , 'help/*.html'\n ]\n , 'annalist_root.oauth2':\n [ 'templates/*.html'\n ]\n },\n exclude_package_data = {\n '': ['spike/*'] \n },\n data_files = \n [\n ],\n classifiers=\n [\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n ],\n zip_safe = False,\n install_requires =\n [ 'Django==1.7'\n , 'wsgiref==0.1.2'\n , 'oauth2client==1.2'\n , 'httplib2==0.9'\n , 'pyparsing==2.0.2' # Does RDFlib need 1.5.7?\n # For testing:\n , 'beautifulsoup4'\n # For development - used by miscutils/MockHttpResources:\n # , 'httpretty==0.7.1'\n # Probably used by RDFlib stuff..\n # , 'SPARQLWrapper==1.5.2'\n # , 'html5lib==1.0b3'\n # , 'isodate==0.4.9'\n # , 'six==1.4.1'\n ],\n entry_points =\n {\n 'console_scripts':\n [ 'annalist-manager = annalist_root.annalist_manager.am_main:runMain',\n ]\n }\n )\n\n# End.\n","sub_path":"src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"427394300","text":"\"\"\"\ncompressible-specific boundary conditions. Here, in particular, we\nimplement an HSE BC in the vertical direction.\n\nNote: the pyro BC routines operate on a single variable at a time, so\nsome work will necessarily be repeated.\n\"\"\"\n\nimport eos\nfrom util import msg\n\ndef user(bcName, bcEdge, variable, my_data):\n\n dens = my_data.get_var(\"density\")\n xmom = my_data.get_var(\"x-momentum\")\n ymom = my_data.get_var(\"y-momentum\")\n ener = my_data.get_var(\"energy\")\n\n grav = my_data.rp.get_param(\"compressible.grav\")\n\n myg = my_data.grid\n\n if (bcName == \"hse\"):\n \n if (bcEdge == \"ylb\"):\n\n # lower y boundary\n \n # we will take the density to be constant, the velocity to\n # be outflow, and the pressure to be in HSE\n if variable == \"density\":\n j = myg.jlo-1\n while (j >= 0):\n dens[:,j] = dens[:,myg.jlo]\n j -= 1\n\n elif variable == \"x-momentum\":\n j = myg.jlo-1\n while (j >= 0):\n xmom[:,j] = xmom[:,myg.jlo] \n j -= 1\n\n elif variable == \"y-momentum\":\n j = myg.jlo-1\n while (j >= 0):\n ymom[:,j] = ymom[:,myg.jlo] \n j -= 1\n\n elif variable == \"energy\":\n dens_base = dens[:,myg.jlo]\n ke_base = 0.5*(xmom[:,myg.jlo]**2 + ymom[:,myg.jlo]**2) / \\\n dens[:,myg.jlo]\n\n eint_base = (ener[:,myg.jlo] - ke_base)/dens[:,myg.jlo]\n pres_base = eos.pres(dens_base, eint_base)\n \n # we are assuming that the density is constant in this\n # formulation of HSE, so the pressure comes simply from\n # differencing the HSE equation\n j = myg.jlo-1\n while (j >= 0):\n pres_below = pres_base - grav*dens_base*myg.dy\n rhoe = eos.rhoe(pres_below)\n\n ener[:,j] = rhoe + ke_base\n\n pres_base = pres_below.copy()\n\n j -= 1\n\n else:\n msg.fail(\"error: variable not defined\")\n\n\n elif (bcEdge == \"yrb\"):\n\n # upper y boundary\n \n # we will take the density to be constant, the velocity to\n # be outflow, and the pressure to be in HSE\n if variable == \"density\":\n j = myg.jhi+1\n while (j <= myg.jhi+myg.ng):\n dens[:,j] = dens[:,myg.jhi]\n j += 1\n\n elif variable == \"x-momentum\":\n j = myg.jhi+1\n while (j <= myg.jhi+myg.ng):\n xmom[:,j] = xmom[:,myg.jhi] \n j += 1\n\n elif variable == \"y-momentum\":\n j = myg.jhi+1\n while (j <= myg.jhi+myg.ng):\n ymom[:,j] = ymom[:,myg.jhi] \n j += 1\n\n elif variable == \"energy\":\n dens_base = dens[:,myg.jhi]\n ke_base = 0.5*(xmom[:,myg.jhi]**2 + ymom[:,myg.jhi]**2) / \\\n dens[:,myg.jhi]\n\n eint_base = (ener[:,myg.jhi] - ke_base)/dens[:,myg.jhi]\n pres_base = eos.pres(dens_base, eint_base)\n \n # we are assuming that the density is constant in this\n # formulation of HSE, so the pressure comes simply from\n # differencing the HSE equation\n j = myg.jhi+1\n while (j <= myg.jhi+myg.ng):\n pres_above = pres_base + grav*dens_base*myg.dy\n rhoe = eos.rhoe(pres_above)\n\n ener[:,j] = rhoe + ke_base\n\n pres_base = pres_above.copy()\n\n j += 1\n\n else:\n msg.fail(\"error: variable not defined\")\n\n\n else:\n msg.fail(\"error: hse BC not supported for xlb or xrb\")\n\n\n else:\n msg.fail(\"error: bc type %s not supported\" % (bcName) )\n","sub_path":"compressible/BC.py","file_name":"BC.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"210062357","text":"from __future__ import absolute_import, unicode_literals, print_function\n\nimport os\n\nfrom .tournament import *\nfrom .deterministic_cache import DeterministicCache\nfrom .plot import *\nfrom .ecosystem import *\nfrom .utils import *\n\n\nclass TournamentManager(object):\n \"\"\"A class to manage and create tournaments.\"\"\"\n\n plot_types = {'boxplot': \"Payoffs. \", 'payoff': \"Payoffs. \",\n 'winplot': \"Wins. \", 'sdvplot': \"Std Payoffs. \",\n 'pdplot': \"Payoff differences. \", 'lengthplot': \"Lengths. \"}\n\n ecoturns = {\n 'basic_strategies': 1000,\n 'cheating_strategies': 10,\n 'ordinary_strategies': 1000,\n 'strategies': 10,\n }\n\n def __init__(self, output_directory, with_ecological,\n pass_cache=True, load_cache=True, save_cache=False,\n cache_file='./cache.txt', image_format=\"svg\"):\n self._tournaments = []\n self._ecological_variants = []\n self._logger = logging.getLogger(__name__)\n self._output_directory = output_directory\n self._with_ecological = with_ecological\n self._pass_cache = pass_cache\n self._save_cache = save_cache\n self._cache_file = cache_file\n self._deterministic_cache = DeterministicCache()\n self._load_cache = False\n self._image_format = image_format\n\n if load_cache and not save_cache:\n self.load_cache = self._load_cache_from_file(cache_file)\n\n @staticmethod\n def one_player_per_strategy(strategies):\n return [strategy() for strategy in strategies]\n\n def add_tournament(self, name, players, game=None, turns=200,\n repetitions=10, processes=None, noise=0,\n with_morality=True):\n tournament = Tournament(\n name=name,\n players=players,\n turns=turns,\n repetitions=repetitions,\n processes=processes,\n noise=noise,\n with_morality=with_morality)\n self._tournaments.append(tournament)\n\n def run_tournaments(self):\n t0 = time.time()\n for tournament in self._tournaments:\n self._run_single_tournament(tournament)\n if self._save_cache and not tournament.noise:\n self._save_cache_to_file(self._deterministic_cache, self._cache_file)\n self._logger.info(timed_message('Finished all tournaments', t0))\n\n def _run_single_tournament(self, tournament):\n self._logger.info(\n 'Starting {} tournament: '.format(tournament.name) + self._tournament_label(tournament)\n )\n\n t0 = time.time()\n\n if not tournament.noise and self._pass_cache and self._valid_cache(tournament.turns):\n self._logger.debug('Passing cache with %d entries to %s tournament' %\n (len(self._deterministic_cache), tournament.name))\n tournament.deterministic_cache = self._deterministic_cache\n if self._load_cache:\n tournament.prebuilt_cache = True\n else:\n self._logger.debug('Cache is not valid for %s tournament' %\n tournament.name)\n tournament.play()\n\n self._logger.debug(timed_message('Finished %s tournament' % tournament.name, t0))\n\n if self._with_ecological:\n ecosystem = Ecosystem(tournament.result_set)\n self.run_ecological_variant(tournament, ecosystem)\n else:\n ecosystem = None\n\n self._generate_output_files(tournament, ecosystem)\n self._cache_valid_for_turns = tournament.turns\n\n self._logger.debug('Cache now has %d entries' %\n len(self._deterministic_cache))\n\n self._logger.info(\n timed_message('Finished all %s tasks' % tournament.name, t0))\n\n def _valid_cache(self, turns):\n return ((len(self._deterministic_cache) == 0) or\n (len(self._deterministic_cache) > 0) and\n turns == self._deterministic_cache.turns)\n\n def run_ecological_variant(self, tournament, ecosystem):\n self._logger.debug(\n 'Starting ecological variant of %s' % tournament.name)\n t0 = time.time()\n ecosystem.reproduce(self.ecoturns.get(tournament.name))\n self._logger.debug(\n timed_message('Finished ecological variant of %s' % tournament.name, t0))\n\n def _generate_output_files(self, tournament, ecosystem=None):\n self._save_csv(tournament)\n self._save_plots(tournament, ecosystem,\n image_format=self._image_format)\n\n def _save_csv(self, tournament):\n csv = tournament.result_set.csv()\n file_name = self._output_file_path(\n tournament.name, 'csv')\n with open(file_name, 'w') as f:\n f.write(csv)\n\n def _save_plots(self, tournament, ecosystem=None, image_format=\"svg\"):\n results = tournament.result_set\n plot = Plot(results)\n if not plot.matplotlib_installed:\n self._logger.error('The matplotlib library is not installed. '\n 'No plots will be produced')\n return\n label = self._tournament_label(tournament)\n for plot_type, name in self.plot_types.items():\n title = name + label\n figure = getattr(plot, plot_type)(title=title)\n file_name = self._output_file_path(\n tournament.name + '_' + plot_type, image_format)\n self._save_plot(figure, file_name)\n if ecosystem is not None:\n title = \"Eco. \" + label\n figure = plot.stackplot(ecosystem, title=title)\n file_name = self._output_file_path(\n tournament.name + '_reproduce', image_format)\n self._save_plot(figure, file_name)\n\n def _tournament_label(self, tournament):\n \"\"\"A label for the tournament for the corresponding title plots\"\"\"\n return \"Turns: {}, Repetitions: {}, Strategies: {}.\".format(tournament.turns,\n tournament.repetitions,\n len(tournament.players))\n\n def _output_file_path(self, file_name, file_extension):\n return os.path.join(\n self._output_directory,\n file_name + '.' + file_extension)\n\n @staticmethod\n def _save_plot(figure, file_name, dpi=400):\n figure.savefig(file_name, bbox_inches='tight', dpi=dpi)\n figure.clf()\n plt.close(figure)\n\n def _save_cache_to_file(self, cache, file_name):\n self._logger.debug(\n 'Saving cache with %d entries to %s' % (len(cache), file_name))\n cache.save(file_name)\n return True\n\n def _load_cache_from_file(self, file_name):\n try:\n self._deterministic_cache.load(file_name)\n self._logger.debug(\n 'Loaded cache with %d entries' % len(self._deterministic_cache))\n return True\n except IOError:\n self._logger.debug('Cache file not found. Starting with empty cache')\n return False\n\n\nclass ProbEndTournamentManager(TournamentManager):\n \"\"\"A class to manage and create probabilistic ending tournaments.\"\"\"\n\n ecoturns = {\n 'basic_strategies_prob_end': 1000,\n 'cheating_strategies_prob_end': 10,\n 'ordinary_strategies_prob_end': 1000,\n 'strategies_prob_end': 10,\n }\n\n def add_tournament(self, name, players, game=None, prob_end=.01,\n repetitions=10, processes=None, noise=0,\n with_morality=True):\n tournament = ProbEndTournament(\n name=name,\n players=players,\n prob_end=prob_end,\n repetitions=repetitions,\n processes=processes,\n noise=noise,\n with_morality=with_morality)\n self._tournaments.append(tournament)\n\n def _tournament_label(self, tournament):\n \"\"\"A label for the tournament for the corresponding title plots\"\"\"\n return \"Prob end: {}, Repetitions: {}, Strategies: {}.\".format(tournament.prob_end,\n tournament.repetitions,\n len(tournament.players))\n","sub_path":"axelrod/tournament_manager.py","file_name":"tournament_manager.py","file_ext":"py","file_size_in_byte":8321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"370787516","text":"from pyramid.view import view_config\nfrom pyramid.response import Response\nfrom thirtysixweeks.models.child import Child\nimport os\n\n\n@view_config(route_name='profile', renderer='templates/profile.html')\ndef profile(request):\n return {'baby_images': get_baby_images()}\n\ndef get_baby_images():\n\tbaby_images = []\n\tpath = os.getcwd() + '/static/images/' + str(Child.child_username)\n\n\tfor file_name in os.lisdir(path):\n\t\timage = {\n\t\t\t'title': file_name.replace('.jpg', ''),\n\t\t\t'description': 'Cute Picture!',\n\t\t\t'src': '/static/images/' + str(Child.child_username) + '/' + file_name\n\t\t}\n\t\tbaby_images.append(image)\n\n\treturn baby_images","sub_path":"thirtysixweeks/thirtysixweeks/views/profile_view.py","file_name":"profile_view.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"625651697","text":"'''\nTensorflow implementation of AutoInt described in:\nAutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks.\nauthor: Chence Shi\nemail: chenceshi@pku.edu.cn\n'''\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom time import time\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.metrics import roc_auc_score, log_loss\nfrom tensorflow.contrib.layers.python.layers import batch_norm as batch_norm\n\n\n\n'''\nThe following two functions are adapted from kyubyong park's implementation of transformer\nWe slightly modify the code to make it suitable for our work.(add relu, delete key masking and causality mask)\nJune 2017 by kyubyong park. \nkbpark.linguist@gmail.com.\nhttps://www.github.com/kyubyong/transformer\n'''\n\n\ndef normalize(inputs, epsilon=1e-8):\n '''\n Applies layer normalization\n Args:\n inputs: A tensor with 2 or more dimensions\n epsilon: A floating number to prevent Zero Division\n Returns:\n A tensor with the same shape and data dtype\n '''\n inputs_shape = inputs.get_shape()\n params_shape = inputs_shape[-1:]\n\n mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)\n beta = tf.Variable(tf.zeros(params_shape))\n gamma = tf.Variable(tf.ones(params_shape))\n normalized = (inputs - mean) / ((variance + epsilon) ** (.5))\n outputs = gamma * normalized + beta\n\n return outputs\n\n \ndef multihead_attention(queries,\n keys,\n values,\n num_units=None,\n num_heads=1,\n dropout_keep_prob=1,\n is_training=True,\n has_residual=True):\n\t\n if num_units is None:\n num_units = queries.get_shape().as_list[-1]\n\n # Linear projections\n Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu)\n K = tf.layers.dense(keys, num_units, activation=tf.nn.relu)\n V = tf.layers.dense(values, num_units, activation=tf.nn.relu)\n if has_residual:\n V_res = tf.layers.dense(values, num_units, activation=tf.nn.relu)\n\n # Split and concat\n Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0)\n K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0)\n V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0)\n\n # Multiplication\n weights = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))\n\n # Scale\n weights = weights / (K_.get_shape().as_list()[-1] ** 0.5)\n\n # Activation\n weights = tf.nn.softmax(weights)\n\n\n # Dropouts\n weights = tf.layers.dropout(weights, rate=1-dropout_keep_prob,\n training=tf.convert_to_tensor(is_training))\n\n # Weighted sum\n outputs = tf.matmul(weights, V_)\n\n # Restore shape\n outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2)\n\n # Residual connection\n if has_residual:\n outputs += V_res\n\n outputs = tf.nn.relu(outputs)\n # Normalize\n outputs = normalize(outputs)\n \n return outputs\n\n\nclass AutoInt():\n def __init__(self, args, feature_size, run_cnt):\n #print(args.block_shape)\n #print(type(args.block_shape))\n\n self.feature_size = feature_size # denote as n, dimension of concatenated features\n self.field_size = args.field_size # denote as M, number of total feature fields\n self.embedding_size = args.embedding_size # denote as d, size of the feature embedding\n self.blocks = args.blocks # number of the blocks\n self.heads = args.heads # number of the heads\n self.block_shape = args.block_shape\n self.output_size = args.block_shape[-1] \n self.has_residual = args.has_residual\n self.has_wide = args.has_wide # whether to add wide part\n self.deep_layers = args.deep_layers # whether to joint train with deep networks as described in paper\n\n\n self.batch_norm = args.batch_norm\n self.batch_norm_decay = args.batch_norm_decay\n self.drop_keep_prob = args.dropout_keep_prob\n self.l2_reg = args.l2_reg\n self.epoch = args.epoch\n self.batch_size = args.batch_size\n self.learning_rate = args.learning_rate\n self.learning_rate_wide = args.learning_rate_wide\n self.optimizer_type = args.optimizer_type\n\n self.save_path = args.save_path + str(run_cnt) + '/'\n self.is_save = args.is_save\n if (args.is_save == True and os.path.exists(self.save_path) == False):\n os.makedirs(self.save_path)\t\n\n self.verbose = args.verbose\n self.random_seed = args.random_seed\n self.loss_type = args.loss_type\n self.eval_metric = roc_auc_score\n self.best_loss = 1.0\n self.greater_is_better = args.greater_is_better\n self.train_result, self.valid_result = [], []\n self.train_loss, self.valid_loss = [], []\n \n self._init_graph()\n\n \n def _init_graph(self):\n self.graph = tf.Graph()\n with self.graph.as_default():\n\n tf.set_random_seed(self.random_seed)\n\n self.feat_index = tf.placeholder(tf.int32, shape=[None, None],\n name=\"feat_index\") # None * M\n self.feat_value = tf.placeholder(tf.float32, shape=[None, None],\n name=\"feat_value\") # None * M\n self.label = tf.placeholder(tf.float32, shape=[None, 1], name=\"label\") # None * 1\n # In our implementation, the shape of dropout_keep_prob is [3], used in 3 different parts.\n self.dropout_keep_prob = tf.placeholder(tf.float32, shape=[None], name=\"dropout_keep_prob\")\n self.train_phase = tf.placeholder(tf.bool, name=\"train_phase\")\n\n self.weights = self._initialize_weights()\n\n # model\n self.embeddings = tf.nn.embedding_lookup(self.weights[\"feature_embeddings\"],\n self.feat_index) # None * M * d\n feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size, 1])\n self.embeddings = tf.multiply(self.embeddings, feat_value) # None * M * d\n self.embeddings = tf.nn.dropout(self.embeddings, self.dropout_keep_prob[1]) # None * M * d\n if self.has_wide: \n self.y_first_order = tf.nn.embedding_lookup(self.weights[\"feature_bias\"], self.feat_index) # None * M * 1\n self.y_first_order = tf.reduce_sum(tf.multiply(self.y_first_order, feat_value), 1) # None * 1\n\n # joint training with feedforward nn\n if self.deep_layers != None:\n self.y_dense = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.embedding_size])\n for i in range(0, len(self.deep_layers)):\n self.y_dense = tf.add(tf.matmul(self.y_dense, self.weights[\"layer_%d\" %i]), self.weights[\"bias_%d\"%i]) # None * layer[i]\n if self.batch_norm:\n self.y_dense = self.batch_norm_layer(self.y_dense, train_phase=self.train_phase, scope_bn=\"bn_%d\" %i)\n self.y_dense = tf.nn.relu(self.y_dense)\n self.y_dense = tf.nn.dropout(self.y_dense, self.dropout_keep_prob[2])\n self.y_dense = tf.add(tf.matmul(self.y_dense, self.weights[\"prediction_dense\"]),\n self.weights[\"prediction_bias_dense\"], name='logits_dense') # None * 1\n \n \n # ---------- main part of AutoInt-------------------\n self.y_deep = self.embeddings # None * M * d\n for i in range(self.blocks): \n self.y_deep = multihead_attention(queries=self.y_deep,\n keys=self.y_deep,\n values=self.y_deep,\n num_units=self.block_shape[i],\n num_heads=self.heads,\n dropout_keep_prob=self.dropout_keep_prob[0],\n is_training=self.train_phase,\n has_residual=self.has_residual)\n\n self.flat = tf.reshape(self.y_deep, \n shape=[-1, self.output_size * self.field_size]) \n #if self.has_wide:\n # self.flat = tf.concat([self.flat, self.y_first_order], axis=1)\n #if self.deep_layers != None:\n # self.flat = tf.concat([self.flat, self.y_dense], axis=1)\n self.out = tf.add(tf.matmul(self.flat, self.weights[\"prediction\"]), \n self.weights[\"prediction_bias\"], name='logits') # None * 1\n \n if self.has_wide:\n self.out += self.y_first_order\n\n if self.deep_layers != None:\n self.out += self.y_dense\n \n # ---------- Compute the loss ----------\n # loss\n if self.loss_type == \"logloss\":\n self.out = tf.nn.sigmoid(self.out, name='pred')\n self.loss = tf.losses.log_loss(self.label, self.out)\n elif self.loss_type == \"mse\":\n self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))\n\n # l2 regularization on weights\n if self.l2_reg > 0:\n if self.deep_layers != None:\n for i in range(len(self.deep_layers)):\n self.loss += tf.contrib.layers.l2_regularizer(\n self.l2_reg)(self.weights[\"layer_%d\"%i])\n #self.loss += tf.contrib.layers.l2_regularizer(self.l2_reg)(self.embeddings)\n #all_vars = tf.trainable_variables()\n #lossL2 = tf.add_n([ tf.nn.l2_loss(v) for v in all_vars\n # if 'bias' not in v.name and 'embeddings' not in v.name]) * self.l2_reg\n #self.loss += lossL2 \n \n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n self.var1 = [v for v in tf.trainable_variables() if v.name != 'feature_bias:0']\n self.var2 = [tf.trainable_variables()[1]] # self.var2 = [feature_bias]\n # optimizer\n # here we should use two different optimizer for wide and deep model(if we add wide part).\n if self.optimizer_type == \"adam\":\n if self.has_wide:\n optimizer1 = tf.train.AdamOptimizer(learning_rate=self.learning_rate, \n beta1=0.9, beta2=0.999, epsilon=1e-8)\n optimizer2 = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate_wide)\n #minimize(self.loss, global_step=self.global_step)\n var_list1 = self.var1\n var_list2 = self.var2\n grads = tf.gradients(self.loss, var_list1 + var_list2)\n grads1 = grads[:len(var_list1)]\n grads2 = grads[len(var_list1):]\n train_op1 = optimizer1.apply_gradients(zip(grads1, var_list1), global_step=self.global_step)\n train_op2 = optimizer2.apply_gradients(zip(grads2, var_list2))\n self.optimizer = tf.group(train_op1, train_op2)\n else:\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, \n beta1=0.9, beta2=0.999, epsilon=1e-8).\\\n minimize(self.loss, global_step=self.global_step)\n elif self.optimizer_type == \"adagrad\":\n self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,\n initial_accumulator_value=1e-8).\\\n minimize(self.loss)\n elif self.optimizer_type == \"gd\":\n self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).\\\n minimize(self.loss)\n elif self.optimizer_type == \"momentum\":\n self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).\\\n minimize(self.loss)\n\n # init\n self.saver = tf.train.Saver(max_to_keep=5)\n init = tf.global_variables_initializer()\n self.sess = self._init_session()\n self.sess.run(init)\n self.count_param()\n\n\n def count_param(self):\n k = (np.sum([np.prod(v.get_shape().as_list()) \n for v in tf.trainable_variables()]))\n\n #print(tf.trainable_variables())\n print(\"total parameters :%d\" % k) \n print(\"extra parameters : %d\" % (k - self.feature_size * self.embedding_size))\n \n\n def _init_session(self):\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n return tf.Session(config=config)\n\n\n def _initialize_weights(self):\n weights = dict()\n\n # embeddings\n weights[\"feature_embeddings\"] = tf.Variable(\n tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.01),\n name=\"feature_embeddings\") # feature_size(n) * d\n \n if self.has_wide:\n weights[\"feature_bias\"] = tf.Variable(\n tf.random_normal([self.feature_size, 1], 0.0, 0.001),\n name=\"feature_bias\") # feature_size(n) * 1\n input_size = self.output_size * self.field_size\n #if self.deep_layers != None:\n # input_size += self.deep_layers[-1]\n #if self.has_wide:\n # input_size += self.field_size\n\n # dense layers\n if self.deep_layers != None:\n num_layer = len(self.deep_layers)\n layer0_size = self.field_size * self.embedding_size\n glorot = np.sqrt(2.0 / (layer0_size + self.deep_layers[0]))\n weights[\"layer_0\"] = tf.Variable(\n np.random.normal(loc=0, scale=glorot, size=(layer0_size, self.deep_layers[0])), dtype=np.float32)\n weights[\"bias_0\"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])),\n dtype=np.float32) # 1 * layers[0]\n for i in range(1, num_layer):\n glorot = np.sqrt(2.0 / (self.deep_layers[i-1] + self.deep_layers[i]))\n weights[\"layer_%d\" % i] = tf.Variable(\n np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i-1], self.deep_layers[i])),\n dtype=np.float32) # layers[i-1] * layers[i]\n weights[\"bias_%d\" % i] = tf.Variable(\n np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])),\n dtype=np.float32) # 1 * layer[i]\n glorot = np.sqrt(2.0 / (self.deep_layers[-1] + 1))\n weights[\"prediction_dense\"] = tf.Variable(\n np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[-1], 1)),\n dtype=np.float32, name=\"prediction_dense\")\n weights[\"prediction_bias_dense\"] = tf.Variable(\n np.random.normal(), dtype=np.float32, name=\"prediction_bias_dense\")\n\n\n #---------- prediciton weight ------------------# \n glorot = np.sqrt(2.0 / (input_size + 1))\n weights[\"prediction\"] = tf.Variable(\n np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),\n dtype=np.float32, name=\"prediction\")\n weights[\"prediction_bias\"] = tf.Variable(\n np.random.normal(), dtype=np.float32, name=\"prediction_bias\")\n\n return weights\n\n def batch_norm_layer(self, x, train_phase, scope_bn):\n bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,\n is_training=True, reuse=None, trainable=True, scope=scope_bn)\n bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,\n is_training=False, reuse=True, trainable=True, scope=scope_bn)\n z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)\n return z\n\n \n def get_batch(self, Xi, Xv, y, batch_size, index):\n start = index * batch_size\n end = (index+1) * batch_size\n end = end if end < len(y) else len(y)\n return Xi[start:end], Xv[start:end], [[y_] for y_ in y[start:end]]\n\n\n # shuffle three lists simutaneously\n def shuffle_in_unison_scary(self, a, b, c):\n rng_state = np.random.get_state()\n np.random.shuffle(a)\n np.random.set_state(rng_state)\n np.random.shuffle(b)\n np.random.set_state(rng_state)\n np.random.shuffle(c)\n\n\n def fit_on_batch(self, Xi, Xv, y):\n feed_dict = {self.feat_index: Xi,\n self.feat_value: Xv,\n self.label: y,\n self.dropout_keep_prob: self.drop_keep_prob,\n self.train_phase: True}\n step, loss, opt = self.sess.run((self.global_step, self.loss, self.optimizer), feed_dict=feed_dict)\n return step, loss\n\n # Since the train data is very large, they can not be fit into the memory at the same time.\n # We separate the whole train data into several files and call \"fit_once\" for each file.\n def fit_once(self, Xi_train, Xv_train, y_train,\n epoch, file_count, Xi_valid=None, \n\t Xv_valid=None, y_valid=None,\n early_stopping=False):\n \n has_valid = Xv_valid is not None\n last_step = 0\n t1 = time()\n self.shuffle_in_unison_scary(Xi_train, Xv_train, y_train)\n total_batch = int(len(y_train) / self.batch_size)\n for i in range(total_batch):\n Xi_batch, Xv_batch, y_batch = self.get_batch(Xi_train, Xv_train, y_train, self.batch_size, i)\n step, loss = self.fit_on_batch(Xi_batch, Xv_batch, y_batch)\n last_step = step\n\n # evaluate training and validation datasets\n train_result, train_loss = self.evaluate(Xi_train, Xv_train, y_train)\n self.train_result.append(train_result)\n self.train_loss.append(train_loss)\n if has_valid:\n valid_result, valid_loss = self.evaluate(Xi_valid, Xv_valid, y_valid)\n self.valid_result.append(valid_result)\n self.valid_loss.append(valid_loss)\n if valid_loss < self.best_loss and self.is_save == True:\n old_loss = self.best_loss\n self.best_loss = valid_loss\n self.saver.save(self.sess, self.save_path + 'model.ckpt',global_step=last_step)\n print(\"[%d-%d] model saved!. Valid loss is improved from %.4f to %.4f\" \n % (epoch, file_count, old_loss, self.best_loss))\n\n if self.verbose > 0 and ((epoch-1)*9 + file_count) % self.verbose == 0:\n if has_valid:\n print(\"[%d-%d] train-result=%.4f, train-logloss=%.4f, valid-result=%.4f, valid-logloss=%.4f [%.1f s]\" % (epoch, file_count, train_result, train_loss, valid_result, valid_loss, time() - t1))\n else:\n print(\"[%d-%d] train-result=%.4f [%.1f s]\" \\\n % (epoch, file_count, train_result, time() - t1))\n if has_valid and early_stopping and self.training_termination(self.valid_loss):\n return False\n else:\n return True\n\n\n\n def training_termination(self, valid_result):\n if len(valid_result) > 5:\n if self.greater_is_better:\n if valid_result[-1] < valid_result[-2] and \\\n valid_result[-2] < valid_result[-3] and \\\n valid_result[-3] < valid_result[-4] and \\\n valid_result[-4] < valid_result[-5]:\n return True\n else:\n if valid_result[-1] > valid_result[-2] and \\\n valid_result[-2] > valid_result[-3] and \\\n valid_result[-3] > valid_result[-4] and \\\n valid_result[-4] > valid_result[-5]:\n return True\n return False\n\n\n def predict(self, Xi, Xv):\n \"\"\"\n :param Xi: list of list of feature indices of each sample in the dataset\n :param Xv: list of list of feature values of each sample in the dataset\n :return: predicted probability of each sample\n \"\"\"\n # dummy y\n dummy_y = [1] * len(Xi)\n batch_index = 0\n Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)\n y_pred = None\n #y_loss = None\n while len(Xi_batch) > 0:\n num_batch = len(y_batch)\n feed_dict = {self.feat_index: Xi_batch,\n self.feat_value: Xv_batch,\n self.label: y_batch,\n self.dropout_keep_prob: [1.0] * len(self.drop_keep_prob),\n self.train_phase: False}\n batch_out = self.sess.run(self.out, feed_dict=feed_dict)\n\n if batch_index == 0:\n y_pred = np.reshape(batch_out, (num_batch,))\n\t #y_loss = np.reshape(batch_loss, (num_batch,))\n else:\n y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))\n #y_loss = np.concatenate((y_loss, np.reshape(batch_loss, (num_batch,))))\n\n batch_index += 1\n Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index)\n\n return y_pred\n\n\n def evaluate(self, Xi, Xv, y):\n \"\"\"\n :param Xi: list of list of feature indices of each sample in the dataset\n :param Xv: list of list of feature values of each sample in the dataset\n :param y: label of each sample in the dataset\n :return: metric of the evaluation\n \"\"\"\n y_pred = self.predict(Xi, Xv)\n y_pred = np.clip(y_pred,1e-6,1-1e-6)\n return self.eval_metric(y, y_pred), log_loss(y, y_pred)\n\n def restore(self, save_path=None):\n if (save_path == None):\n save_path = self.save_path\n ckpt = tf.train.get_checkpoint_state(save_path) \n if ckpt and ckpt.model_checkpoint_path: \n self.saver.restore(self.sess, ckpt.model_checkpoint_path) \n if self.verbose > 0:\n print (\"restored from %s\" % (save_path))\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":23023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"134298011","text":"#api endpoints\nimport requests as request\nimport json \nimport pandas as pd\n\n#per day info of a clan-war-league\nclass WarDay(): \n def __init__(self,day,warTag,res): \n self.day = day\n self.warTag = warTag\n self.responce = res\n \n\nclass ClanInfo:\n def __init__(self,clantag): \n self.base = \"https://api.clashofclans.com/v1/\"\n self.token = \"Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzUxMiIsImtpZCI6IjI4YTMxOGY3LTAwMDAtYTFlYi03ZmExLTJjNzQzM2M2Y2NhNSJ9.eyJpc3MiOiJzdXBlcmNlbGwiLCJhdWQiOiJzdXBlcmNlbGw6Z2FtZWFwaSIsImp0aSI6ImM5MzRmMjllLTA0Y2UtNGQ2ZS1iZDM1LWVmZmNlNDI4MWEzYiIsImlhdCI6MTYxNDY4NjcxNywic3ViIjoiZGV2ZWxvcGVyLzhlOTIyMmY4LWNjZDUtN2MyZi1mMDE5LWRhNjI0MjlhMjdhNCIsInNjb3BlcyI6WyJjbGFzaCJdLCJsaW1pdHMiOlt7InRpZXIiOiJkZXZlbG9wZXIvc2lsdmVyIiwidHlwZSI6InRocm90dGxpbmcifSx7ImNpZHJzIjpbIjc3LjE2MS4xMjUuMTM1IiwiNS4xOTkuMTQ4LjIwMSJdLCJ0eXBlIjoiY2xpZW50In1dfQ.aU5Oe-l_Way2quK62V1f25FuVuo_efwUfdnQXmZbkV6o-Sf8Ph5mAdGNd6EJ3WizgBklDnRHx0p4JN_80Nqlug\"\n self.headers = {'Content-Type': 'application/json', 'Authorization': self.token}\n self.ownClanWars = []\n self.clanTag = \"%23\"+clantag[1:]\n\n\n # get /clanwarleagues/wars/{warTag} \n #https://api.clashofclans.com/v1/clanwarleagues/wars/%232RL9C2CUG\n def clanwarleagues_wars(self,warTags):\n urlSaveWarTag = \"%23\"+warTags[1:]\n endpoint = self.base + f\"clanwarleagues/wars/{urlSaveWarTag}\"\n res = request.get(endpoint,headers=self.headers)\n if res.status_code == 200: \n return res.json()\n else: \n return res.status_code\n\n #https://api.clashofclans.com/v1/clans/%2329G2CU2JY/currentwar/leaguegroup\n def currentwar_leaguegroup(self): \n endpoint = self.base+\"clans/\"+self.clanTag+\"/currentwar/leaguegroup\"\n r = request.get(endpoint, headers = self.headers)\n if r.status_code == 200: \n return r.json()\n\n #goes through currentwar_leaguegroup(\"war tag\")\n def get_tags(self,day): \n tags = []\n r = self.currentwar_leaguegroup()\n if r: \n for warTag in r['rounds'][day]['warTags']: \n tags.append(warTag)\n return tags\n\n def clan_war_league_event(self,days):\n \n tags = []\n for day in range(days):\n #print(day) \n tags = self.get_tags(day)\n for warTag in tags: \n res = self.clanwarleagues_wars(warTag)\n if res['clan']['tag'] == '#29G2CU2JY' or res['opponent']['tag'] == '#29G2CU2JY':\n # print( \"own clan\", res['clan']['tag'])\n _war = WarDay(day,warTag,res)\n # print(_war.warTag)\n self.ownClanWars.append( _war ) \n \n\n \n\n\n\n\n\n\nclaninfo = ClanInfo(\"#29G2CU2JY\")\nclaninfo.clan_war_league_event(2)\n\nprint(claninfo)\n\nclass Fight(): \n def __init__(self,tag):\n self.tag = tag \n self.townhallLevel = None\n self.stars = None\n self.mapPosition = None\n self.opponentTag = None\n self.opponentMapPosition = None\n self.opponentTownhallLevel = None\n\n\ndef attack_results():\n fights = {}\n for fight in claninfo.ownClanWars: \n for member in fight.responce[\"clan\"][\"members\"]:\n if 'attacks' in member: \n F = Fight(member['tag'])\n F.townhallLevel = member[\"townhallLevel\"]\n F.mapPosition = member['mapPosition']\n F.opponentTag = member['attacks'][0]['defenderTag']\n F.stars = member['attacks'][0]['stars']\n\n for opponent in fight.responce['opponent']['members']: \n if opponent['tag'] == F.opponentTag: \n F.opponentTownhallLevel = opponent['townhallLevel']\n F.opponentMapPosition = opponent[\"mapPosition\"]\n fights[ member['tag'] ]= F\n\n return fights\nresults = attack_results()\n\nfor key in results: \n fight = results[key]\n print(\"stars: \", fight.stars)\n print(\"tags: \", fight.tag, fight.opponentTag)\n print(\"townhall levels:\" , fight.townhallLevel, fight.opponentTownhallLevel)\n print(\"--------------------------------------------------------------------\")\n\nprint(\"test\")\n\n# print(\"number of clan war entries:\", len(claninfo.ownClanWars))\n# for war in claninfo.ownClanWars:\n# if war.responce[\"clan\"][\"tag\"] == '#29G2CU2JY':\n# for member in war.responce[\"clan\"][\"members\"]: \n\n# # FOR DISPLAY PURPOSES\n# if len(member['name']) <= 6: #character limit that will place tab on right spot\n# member['name'] += \"\\t\" #if name would be too short, add a tab to the name \n# # FOR DISPLAY PURPOSES\n\n# print(member['name'],\"\\t\",member['townhallLevel'],\"\\t\",member['mapPosition'])\n# print(\"\\n\")\n \n# if war.responce[\"opponent\"][\"tag\"] == '#29G2CU2JY':\n# for member in war.responce[\"opponent\"][\"members\"]: \n \n# # FOR DISPLAY PURPOSES\n# if len(member['name']) <= 6: #character limit that will place tab on right spot\n# member['name'] += \"\\t\" #if name would be too short, add a tab to the name \n# # FOR DISPLAY PURPOSES\n\n# print(member['name'],\"\\t\",member['townhallLevel'],\"\\t\",member['mapPosition'])\n# print(\"\\n\")\n\n\n#clan tag = #29G2CU2JY\n#war tag = #2RL9C2CUG\n#https://api.clashofclans.com/v1/clans/clanwarleagues/wars/%232RLL9UYPG\n#https://api.clashofclans.com/v1/clanwarleagues/wars/%232RL9C2CUG","sub_path":"coc.py","file_name":"coc.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"638059027","text":"from configparser import ConfigParser\nfrom urllib.parse import quote_plus\nimport requests\nimport os\nfrom multiprocessing import Pool\nimport logging\n\ndef get_config():\n conf = ConfigParser()\n conf.read('prod.cfg')\n print('conf.read success')\n return conf\n\ndef get_lat_long(config, address):\n print('def get_lat_long(config, address')\n qs_dict = {'address': quote_plus(address), 'key': config.get('google', 'api_key'),}\n logging.ddebug('Requesting weather data from google for %s', address)\n resp = requests.get('https://maps.googleapis.com/maps/api/geocode/json', params=qs_dict)\n\n try:\n lat, lon = resp.json().get('result')[0].get('geometry').get('location').values()\n except KeyError:\n raise Exception('Could not find address: %s', address)\n return lat, lon\n\ndef upcoming_forecast(args):\n print('def upcoming_forecast(args):')\n config, address = args\n lat, lon = get_lat_long(config, address)\n logging.debug('Have lat and long for %s', address)\n resp =requests.get('http://api.openweathermap.org/data/2.5/forecast', params={'lat':lat, 'lon':lon, 'appid':config.get('openweather', 'api_key'), 'units': 'metric'})\n return (address, resp.json())\n\ndef get_city_forecasts(addresses):\n config = get_config()\n pool = Pool(processes=4)\n return pool.map(upcoming_forecast, [(config, addy) for addy in addresses])\n\nmy_weather = get_city_forecasts(['Los Angeles, CA', 'New York, NY', 'Berlin, Germany'])\n\nprint(type(my_weather))\n\nfor item in my_weather:\n print(item[0])\n","sub_path":"Chapter 5/myParallelProcessing.py","file_name":"myParallelProcessing.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"245994528","text":"def fatorial(numero):\n if numero == 0 or numero == 1:\n return 1\n return numero * fatorial(numero - 1)\n\nlista = []\n\nfor i in range(5):\n lista.append(int(input()))\n\nsoma = 0\nfor i in lista:\n if i % 3 == 0:\n soma += fatorial(i)\n\nprint(soma)","sub_path":"Python/Somando fatoriais.py","file_name":"Somando fatoriais.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"137939146","text":"# https://leetcode.com/problems/count-complete-tree-nodes\n\n\nfrom TreeNode import TreeNode\n\nclass Solution:\n # runtime; 140ms, 30.32%\n # memory; 15.2MB, 66.50%\n def countNodes(self, root):\n if root is None:\n return 0\n cnt, q = 0, [root]\n while q:\n node = q.pop(0)\n cnt += 1\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n return cnt\n\n\ns = Solution()\nroot = TreeNode(1)\nroot.left = TreeNode(2)\nroot.right = TreeNode(3)\nroot.left.left = TreeNode(4)\nroot.left.right = TreeNode(5)\nroot.right.left = TreeNode(6)\nprint(s.countNodes(root))\n","sub_path":"python/problem-tree/count_complete_tree_nodes.py","file_name":"count_complete_tree_nodes.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"78721877","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import ensemble\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.externals import joblib\n\n\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.preprocessing import StandardScaler\n\nfrom openpyxl import Workbook\n\nimport numpy\n\n# Make a data set\nfrom random import *\nwb = Workbook()\nws = wb.active\n\nin_columns= ['year_built','num_bedrooms','total_sqft','garage_sqft','sale_price']\n\ndf_in = pd.DataFrame([], columns= in_columns)\n\n\nroom_size_factor = [200,400,600]\ngarage_size_factor = [0,500,1000]\n\nrand = Random(0)\n\nprice_histogram = {}\nprices = []\n\n\nfor i in range(0,7500):\n year_built = rand.randint(1940,2020)\n num_bedrooms = rand.randint(1,6)\n total_sqft = num_bedrooms * room_size_factor[rand.randint(0,len(room_size_factor)-1)]\n garage_sqft = garage_size_factor[rand.randint(0,len(garage_size_factor))-1]\n sale_price = ((total_sqft * 200) + (garage_sqft *4)) * ((100 + randrange(-10,10))/100.0)\n\n\n\n\n range_step = 25000\n\n price_range = int((sale_price + (range_step-1)) /range_step)*range_step\n\n if not price_range in price_histogram:\n price_histogram[price_range] = 0\n\n price_histogram[price_range] += 1\n prices.append(price_range)\n\n sale_price = float(price_range)\n\n\n\n total_sqft = int((total_sqft + 249)/250)*250\n\n garage_sqft =int((garage_sqft + 99) / 100) * 100\n\n year_built = int((year_built / 10) * 10)+5\n\n\n temp = pd.DataFrame([[year_built,num_bedrooms,total_sqft,garage_sqft,sale_price]], columns=in_columns)\n\n df_in = pd.concat([temp, df_in])\n\n\n\n#the output values will be an array of sale_prices\ny = df_in['sale_price'].values\ni = 0\nfor label in price_histogram:\n ws.cell(row=i + 1, column=1, value=label)\n ws.cell(row=i + 1, column=2, value=price_histogram[label])\n i = i + 1\n\nwb.save('houseprices.xlsx')\n\n\n\n#Scale input values to make the more useful for the classifier\n#remove the sale_price from the input data\n\nsc = StandardScaler()\nfeatures_df = df_in.copy()\ndel features_df['sale_price']\n\nX = sc.fit_transform(features_df)\n\n#X = features_df\n\n# Split the data set in a training set (25%) and a test set (75%)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)\n\nmodel = MLPClassifier(max_iter = 2000)\n\nprint('doing training ...')\nmodel.fit(X_train, y_train)\nprint('done training ...')\n\n\n# Save the trained model to a file so we can use it in other programs\njoblib.dump(model, 'trained_house_bp_model.pkl')\njoblib.dump(sc, 'trained_house_bp_scaler.pkl')\n\n# Find the error rate on the training set\nmse = mean_absolute_error(y_train, model.predict(X_train))\nprint(\"Training Set Mean Absolute Error: %.4f\" % mse)\n\n# Find the error rate on the test set\nmse = mean_absolute_error(y_test, model.predict(X_test))\nprint(\"Test Set Mean Absolute Error: %.4f\" % mse)\n\npredicted_home_values = model.predict(X)\n\nfor val in predicted_home_values:\n print(\"This house has an estimated value of ${:,.2f}\".format(val))\n\n\n","sub_path":"2.houseprice classification model/2.houseprice classification model/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"439298304","text":"from django.shortcuts import render\nfrom .forms import DatabaseForm\n \nfrom django.http import JsonResponse\nfrom core.models import databases\n\n\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom .serializers import DataSeriallizer\n\nimport os\nimport zipfile\n\n\ndef index(request):\n form = DatabaseForm()\n\n if request.method == 'POST':\n print(request.POST)\n form= DatabaseForm(request.POST)\n if form.is_valid():\n form.save()\n \n context ={'form':form}\n return render(request,'home.html',context)\n\n\n\nclass TestView(APIView):\n\n permission_classes = (IsAuthenticated,)\n\n def get(self,request, *args, **kwargs):\n qs= databases.objects.all()\n serializer = DataSeriallizer(qs, many=True)\n return Response(serializer.data)\n \n def post(self, request, *args, **kwargs):\n serializer = DataSeriallizer(data= request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors) \n\n\n\n\n\ndef database_detail_view(request):\n all_database = databases.objects.all()\n for content in all_database: \n print(content)\n DB_HOST = \tcontent.IP \n DB_USER = content.UserName\n DB_USER_PASSWORD = content.Password\n\n DB_NAME = content.DBname\n BACKUP_PATH = 'F:\\\\backitup'\n ZIP_BACKUP= 'F:\\\\backitup\\zip_backup'\n\n\n dumpcmd = \"mysqldump -h \" + DB_HOST + \" -u \" + DB_USER + \" -p\" + DB_USER_PASSWORD + \" \" + DB_NAME + \" > \" + BACKUP_PATH + \"/\" + DB_NAME + \".sql\"\n os.system(dumpcmd)\n\n handle = zipfile.ZipFile('Sql-DB.zip','w')\n os.chdir('F:\\\\backitup')\n \n for x in os.listdir():\n if x.endswith('.sql'):\n handle.write(x, compress_type= zipfile.ZIP_DEFLATED)\n handle.close()\n \n \n \n return render (request,'detail.html',{'data':all_database})\n\n\ndef ListOfDatabase(request):\n listdatabase = databases.objects.all()\n return render(request,'display.html',{'list':listdatabase})\n\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"466683073","text":"from django.urls import path, re_path\r\nfrom . import views\r\n \r\napp_name = 'project'\r\nurlpatterns = [\r\n \r\n path('', views.index, name='index'),\t\t\t\t\t# 主页\r\n re_path(r'^register/$', views.register, name='register'),\t # 注册\r\n re_path(r'^login/$', views.login, name='login'),\t\t# 登录\r\n re_path(r'^logout/$', views.logout, name='logout'), # 退出登陆\r\n re_path(r'^user/(?P\\d+)/profile/$', views.profile, name='profile'), # 信息显示\r\n re_path(r'^user/(?P\\d+)/profile/update/$',\r\n views.profile_update,\r\n name='profile_update'), # 信息更新\r\n re_path(r'^user/(?P\\d+)/pwdchange/$',\r\n views.pwd_change,\r\n name='pwd_change'), # 密码更新\r\n # 课程 增 删 查 改\r\n path('course/', views.CourseList.as_view(), name='course_list'),\r\n re_path(r'^user/(?P\\d+)/course/$',\r\n views.CourseListSelf.as_view(),\r\n name='course_list_self'), # 用户查看自己的课程\r\n re_path(r'^course/create/$',\r\n views.CourseCreate.as_view(),\r\n name='course_create'), # 教师创建课程\r\n re_path(r'^course/(?P\\d+)/$',\r\n views.CourseDetail.as_view(),\r\n name='course_detail'), # 查看课程详情\r\n re_path(r'^course/(?P\\d+)/update/$',\r\n views.CourseUpdate.as_view(),\r\n name='course_update'), # 教师更新课程\r\n re_path(r'^course/(?P\\d+)/delete$',\r\n views.CourseDelete.as_view(),\r\n name='course_delete'), # 教师删除课程\r\n re_path(r'^course/(?P\\d+)/select$',\r\n views.course_select,\r\n name='course_select'), # 学生加入课程\r\n re_path(r'^course/(?P\\d+)/cancel$',\r\n views.course_cancel,\r\n name='course_cancel'), # 学生退出课程\r\n # re_path(r'^course/(?P\\d+)/list$',\r\n # views.HomeworkList,\r\n # name='homework_list'), # 学生进入课程 记得注释!!!\r\n # re_path(r'^course/(?P\\d+)/homework/publish/$', views.HomeworkListPublished, name='homework_list_published'), # 教师进入课程 记得注释!!!\r\n # 教师发布作业 增 删 查 改\r\n re_path(r'^course/(?P\\d+)/homework/create/$',\r\n views.HomeworkCreate.as_view(),\r\n name='homework_create'), # 创建作业\r\n re_path(r'^course/(?P\\d+)/homework/(?P\\d+)/$',\r\n views.HomeworkDetail.as_view(),\r\n name='homework_detail'), # 作业详情\r\n re_path(r'^course/(?P\\d+)/homework/(?P\\d+)/update/$',\r\n views.HomeworkUpdate.as_view(),\r\n name='homework_update'), # 更新作业\r\n re_path(r'^course/(?P\\d+)/homework/(?P\\d+)/delete$',\r\n views.HomeworkDelete.as_view(),\r\n name='homework_delete'), # 删除作业\r\n re_path(r'^course/(?P\\d+)/homework/draft/$',\r\n views.HomeworkListDraft.as_view(),\r\n name='homework_list_publishing'), # 待发布作业列表\r\n re_path(r'^course/(?P\\d+)/homework/publish/$',\r\n views.HomeworkListPublished.as_view(),\r\n name='homework_list_published'), # 已发布作业列表\r\n re_path(r'^course/(?P\\d+)/homework/(?P\\d+)/publish/$',\r\n views.homework_publish,\r\n name='homework_publish'), # 将草稿发布\r\n\r\n\r\n # 学生提交作业 增 删 查 改\r\n re_path(r'^course/(?P\\d+)/list$',\r\n views.HomeworkList.as_view(),\r\n name='homework_list'), # 所有作业列表\r\n re_path(r'^course/(?P\\d+)/handin/list$',\r\n views.HandinListDone.as_view(),\r\n name='handin_list_done'), # 已提交的作业列表\r\n re_path(r'^course/(?P\\d+)/homework/(?P\\d+)/handin/create/$',\r\n views.HandinCreate.as_view(),\r\n name='handin_create'), # 创建提交的作业\r\n re_path(r'^course/(?P\\d+)/homework/(?P\\d+)/handin/(?P\\d+)/$',\r\n views.HandinDetail.as_view(),\r\n name='handin_detail'), # 提交的作业详情\r\n re_path(r'^course/(?P\\d+)/homework/(?P\\d+)/handin/(?P\\d+)/update/$',\r\n views.HandinUpdate.as_view(),\r\n name='handin_update'), # 更新提交的作业\r\n re_path(r'^course/(?P\\d+)/homework/(?P\\d+)/handin/(?P\\d+)/delete/$',\r\n views.HandinDelete.as_view(),\r\n name='handin_delete'), # 删除提交的作业\r\n\r\n # 学生作业统计\r\n re_path(r'^course/(?P\\d+)/homework/(?P\\d+)/count$',\r\n views.HomeworkHandin.as_view(),\r\n name='homework_handin_count'),\r\n # 学生统计\r\n re_path(r'^course/(?P\\d+)/student$',\r\n views.course_student,\r\n name='course_student'),\r\n # 评论功能\r\n re_path(r'^comment/(?P[0-9]+)/$',\r\n views.homework_comment,\r\n name='homework_comment'),\r\n # 打分\r\n re_path(r'^handin/(?P\\d+)/score$', views.score, name='score'),\r\n # 获取成绩\r\n path('get_score/', views.get_score, name='get_score'),\r\n\r\n]","sub_path":"project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"238396742","text":"import _plotly_utils.basevalidators\n\n\nclass IcicleValidator(_plotly_utils.basevalidators.CompoundValidator):\n def __init__(self, plotly_name=\"icicle\", parent_name=\"\", **kwargs):\n super(IcicleValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n data_class_str=kwargs.pop(\"data_class_str\", \"Icicle\"),\n data_docs=kwargs.pop(\n \"data_docs\",\n \"\"\"\n branchvalues\n Determines how the items in `values` are\n summed. When set to \"total\", items in `values`\n are taken to be value of all its descendants.\n When set to \"remainder\", items in `values`\n corresponding to the root and the branches\n sectors are taken to be the extra part not part\n of the sum of the values at their leaves.\n count\n Determines default for `values` when it is not\n provided, by inferring a 1 for each of the\n \"leaves\" and/or \"branches\", otherwise 0.\n customdata\n Assigns extra data each datum. This may be\n useful when listening to hover, click and\n selection events. Note that, \"scatter\" traces\n also appends customdata items in the markers\n DOM elements\n customdatasrc\n Sets the source reference on Chart Studio Cloud\n for `customdata`.\n domain\n :class:`plotly.graph_objects.icicle.Domain`\n instance or dict with compatible properties\n hoverinfo\n Determines which trace information appear on\n hover. If `none` or `skip` are set, no\n information is displayed upon hovering. But, if\n `none` is set, click and hover events are still\n fired.\n hoverinfosrc\n Sets the source reference on Chart Studio Cloud\n for `hoverinfo`.\n hoverlabel\n :class:`plotly.graph_objects.icicle.Hoverlabel`\n instance or dict with compatible properties\n hovertemplate\n Template string used for rendering the\n information that appear on hover box. Note that\n this will override `hoverinfo`. Variables are\n inserted using %{variable}, for example \"y:\n %{y}\" as well as %{xother}, {%_xother},\n {%_xother_}, {%xother_}. When showing info for\n several points, \"xother\" will be added to those\n with different x positions from the first\n point. An underscore before or after\n \"(x|y)other\" will add a space on that side,\n only when this field is shown. Numbers are\n formatted using d3-format's syntax\n %{variable:d3-format}, for example \"Price:\n %{y:$.2f}\". https://github.com/d3/d3-\n format/tree/v1.4.5#d3-format for details on the\n formatting syntax. Dates are formatted using\n d3-time-format's syntax %{variable|d3-time-\n format}, for example \"Day: %{2019-01-01|%A}\".\n https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format for details on\n the date formatting syntax. The variables\n available in `hovertemplate` are the ones\n emitted as event data described at this link\n https://plotly.com/javascript/plotlyjs-\n events/#event-data. Additionally, every\n attributes that can be specified per-point (the\n ones that are `arrayOk: true`) are available.\n Finally, the template string has access to\n variables `currentPath`, `root`, `entry`,\n `percentRoot`, `percentEntry` and\n `percentParent`. Anything contained in tag\n `` is displayed in the secondary box,\n for example \"{fullData.name}\".\n To hide the secondary box completely, use an\n empty tag ``.\n hovertemplatesrc\n Sets the source reference on Chart Studio Cloud\n for `hovertemplate`.\n hovertext\n Sets hover text elements associated with each\n sector. If a single string, the same string\n appears for all data points. If an array of\n string, the items are mapped in order of this\n trace's sectors. To be seen, trace `hoverinfo`\n must contain a \"text\" flag.\n hovertextsrc\n Sets the source reference on Chart Studio Cloud\n for `hovertext`.\n ids\n Assigns id labels to each datum. These ids for\n object constancy of data points during\n animation. Should be an array of strings, not\n numbers or any other type.\n idssrc\n Sets the source reference on Chart Studio Cloud\n for `ids`.\n insidetextfont\n Sets the font used for `textinfo` lying inside\n the sector.\n labels\n Sets the labels of each of the sectors.\n labelssrc\n Sets the source reference on Chart Studio Cloud\n for `labels`.\n leaf\n :class:`plotly.graph_objects.icicle.Leaf`\n instance or dict with compatible properties\n legend\n Sets the reference to a legend to show this\n trace in. References to these legends are\n \"legend\", \"legend2\", \"legend3\", etc. Settings\n for these legends are set in the layout, under\n `layout.legend`, `layout.legend2`, etc.\n legendgrouptitle\n :class:`plotly.graph_objects.icicle.Legendgroup\n title` instance or dict with compatible\n properties\n legendrank\n Sets the legend rank for this trace. Items and\n groups with smaller ranks are presented on\n top/left side while with \"reversed\"\n `legend.traceorder` they are on bottom/right\n side. The default legendrank is 1000, so that\n you can use ranks less than 1000 to place\n certain items before all unranked items, and\n ranks greater than 1000 to go after all\n unranked items. When having unranked or equal\n rank items shapes would be displayed after\n traces i.e. according to their order in data\n and layout.\n legendwidth\n Sets the width (in px or fraction) of the\n legend for this trace.\n level\n Sets the level from which this trace hierarchy\n is rendered. Set `level` to `''` to start from\n the root node in the hierarchy. Must be an \"id\"\n if `ids` is filled in, otherwise plotly\n attempts to find a matching item in `labels`.\n marker\n :class:`plotly.graph_objects.icicle.Marker`\n instance or dict with compatible properties\n maxdepth\n Sets the number of rendered sectors from any\n given `level`. Set `maxdepth` to \"-1\" to render\n all the levels in the hierarchy.\n meta\n Assigns extra meta information associated with\n this trace that can be used in various text\n attributes. Attributes such as trace `name`,\n graph, axis and colorbar `title.text`,\n annotation `text` `rangeselector`,\n `updatemenues` and `sliders` `label` text all\n support `meta`. To access the trace `meta`\n values in an attribute in the same trace,\n simply use `%{meta[i]}` where `i` is the index\n or key of the `meta` item in question. To\n access trace `meta` in layout attributes, use\n `%{data[n[.meta[i]}` where `i` is the index or\n key of the `meta` and `n` is the trace index.\n metasrc\n Sets the source reference on Chart Studio Cloud\n for `meta`.\n name\n Sets the trace name. The trace name appears as\n the legend item and on hover.\n opacity\n Sets the opacity of the trace.\n outsidetextfont\n Sets the font used for `textinfo` lying outside\n the sector. This option refers to the root of\n the hierarchy presented on top left corner of a\n treemap graph. Please note that if a hierarchy\n has multiple root nodes, this option won't have\n any effect and `insidetextfont` would be used.\n parents\n Sets the parent sectors for each of the\n sectors. Empty string items '' are understood\n to reference the root node in the hierarchy. If\n `ids` is filled, `parents` items are understood\n to be \"ids\" themselves. When `ids` is not set,\n plotly attempts to find matching items in\n `labels`, but beware they must be unique.\n parentssrc\n Sets the source reference on Chart Studio Cloud\n for `parents`.\n pathbar\n :class:`plotly.graph_objects.icicle.Pathbar`\n instance or dict with compatible properties\n root\n :class:`plotly.graph_objects.icicle.Root`\n instance or dict with compatible properties\n sort\n Determines whether or not the sectors are\n reordered from largest to smallest.\n stream\n :class:`plotly.graph_objects.icicle.Stream`\n instance or dict with compatible properties\n text\n Sets text elements associated with each sector.\n If trace `textinfo` contains a \"text\" flag,\n these elements will be seen on the chart. If\n trace `hoverinfo` contains a \"text\" flag and\n \"hovertext\" is not set, these elements will be\n seen in the hover labels.\n textfont\n Sets the font used for `textinfo`.\n textinfo\n Determines which trace information appear on\n the graph.\n textposition\n Sets the positions of the `text` elements.\n textsrc\n Sets the source reference on Chart Studio Cloud\n for `text`.\n texttemplate\n Template string used for rendering the\n information text that appear on points. Note\n that this will override `textinfo`. Variables\n are inserted using %{variable}, for example \"y:\n %{y}\". Numbers are formatted using d3-format's\n syntax %{variable:d3-format}, for example\n \"Price: %{y:$.2f}\". https://github.com/d3/d3-\n format/tree/v1.4.5#d3-format for details on the\n formatting syntax. Dates are formatted using\n d3-time-format's syntax %{variable|d3-time-\n format}, for example \"Day: %{2019-01-01|%A}\".\n https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format for details on\n the date formatting syntax. Every attributes\n that can be specified per-point (the ones that\n are `arrayOk: true`) are available. Finally,\n the template string has access to variables\n `currentPath`, `root`, `entry`, `percentRoot`,\n `percentEntry`, `percentParent`, `label` and\n `value`.\n texttemplatesrc\n Sets the source reference on Chart Studio Cloud\n for `texttemplate`.\n tiling\n :class:`plotly.graph_objects.icicle.Tiling`\n instance or dict with compatible properties\n uid\n Assign an id to this trace, Use this to provide\n object constancy between traces during\n animations and transitions.\n uirevision\n Controls persistence of some user-driven\n changes to the trace: `constraintrange` in\n `parcoords` traces, as well as some `editable:\n true` modifications such as `name` and\n `colorbar.title`. Defaults to\n `layout.uirevision`. Note that other user-\n driven trace attribute changes are controlled\n by `layout` attributes: `trace.visible` is\n controlled by `layout.legend.uirevision`,\n `selectedpoints` is controlled by\n `layout.selectionrevision`, and\n `colorbar.(x|y)` (accessible with `config:\n {editable: true}`) is controlled by\n `layout.editrevision`. Trace changes are\n tracked by `uid`, which only falls back on\n trace index if no `uid` is provided. So if your\n app can add/remove traces before the end of the\n `data` array, such that the same trace has a\n different index, you can still preserve user-\n driven changes if you give each trace a `uid`\n that stays with it as it moves.\n values\n Sets the values associated with each of the\n sectors. Use with `branchvalues` to determine\n how the values are summed.\n valuessrc\n Sets the source reference on Chart Studio Cloud\n for `values`.\n visible\n Determines whether or not this trace is\n visible. If \"legendonly\", the trace is not\n drawn, but can appear as a legend item\n (provided that the legend itself is visible).\n\"\"\",\n ),\n **kwargs,\n )\n","sub_path":"packages/python/plotly/plotly/validators/_icicle.py","file_name":"_icicle.py","file_ext":"py","file_size_in_byte":14515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"486270758","text":"#\n# @lc app=leetcode.cn id=107 lang=python3\n#\n# [107] 二叉树的层序遍历 II\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:\n result = []\n\n if root == None:\n return result\n\n layer = [root]\n while layer:\n temp = []\n size = len(layer)\n for i in range(size):\n item = layer.pop(0)\n temp.append(item.val)\n\n if item.left:\n layer.append(item.left)\n if item.right:\n layer.append(item.right)\n result.append(temp)\n\n return result[::-1]\n# @lc code=end\n\n","sub_path":"107.二叉树的层序遍历-ii.py","file_name":"107.二叉树的层序遍历-ii.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"61638477","text":"import sys\nimport file_helper as fh\nimport lxml.etree as ET\n\nscript_name = sys.argv\n\nfiles_to_process = fh.filter_files_by_suffix(fh.files_to_process(\"../l2j_datapack/dist/game/data/stats/npcs/\"), '.xml')\n\nfor item in files_to_process:\n dom = ET.parse(item)\n npcs = dom.findall('.//npc')\n for npc in npcs:\n if npc.get('originalLevel'):\n npc.set('level', npc.get('originalLevel'))\n npc.attrib.pop(\"originalLevel\", None)\n\n with open(item, 'wb') as f:\n f.write(ET.tostring(dom))\n","sub_path":"tools/remove_original_level.py","file_name":"remove_original_level.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"381556374","text":"from sets import Set\r\n\r\n\r\ndef checkDigits(number, digits):\r\n if ( len(digits) == 0 ):\r\n return True\r\n\r\n while( number > 0 ):\r\n digit = number%10\r\n number = number/10\r\n if ( digit in digits ):\r\n #print digit\r\n digits.remove( digit )\r\n\r\n if ( len(digits) == 0 ):\r\n return True\r\n else:\r\n return False\r\n\r\ndef checkInsomnia(initnum):\r\n \"Check Insomnia\"\r\n digits = Set([0,1,2,3,4,5,6,7,8,9])\r\n find = False\r\n for N in range(1,1000000):\r\n number = N * initnum\r\n #print str(number) + '/ i * N: ' + str(N) + ' * ' + str(number)\r\n if checkDigits(number, digits):\r\n find = True\r\n break\r\n\r\n if find == True:\r\n return str(number)\r\n else:\r\n return \"INSOMNIA\"\r\n\r\nf = open ( \"./A-large.in\", 'r' )\r\nfoutput = open ( \"./output_large.txt\", 'w')\r\nT = int(f.readline())\r\nif T >= 1 and T <= 100:\r\n i = 1\r\n while True:\r\n input = f.readline()\r\n if not input:\r\n break\r\n\r\n number = int(input)\r\n answer = checkInsomnia(number)\r\n output = 'Case #'+ str(i) + ': ' + answer + '\\n'\r\n foutput.write(output)\r\n i = i+1\r\nelse:\r\n print('Limits Invalid')\r\n\r\nf.close()\r\nfoutput.close()\r\n","sub_path":"codes/CodeJamCrawler/16_0_1/gawoon/countingsheep.py","file_name":"countingsheep.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"49704277","text":"TOTAL = 5000\n\n\ndef sortTest(func, total=1000):\n import random\n import copy\n import operator\n import math\n import time\n arrList = [i for i in range(-math.floor(total / 2), math.ceil(total / 2))]\n arrListR = copy.deepcopy(arrList)\n while operator.eq(arrList, arrListR):\n random.shuffle(arrListR)\n # print(\"--- [Origin List]\", arrList, \"Use\",\n # func.__name__, \"with Total:\", len(arrList))\n # print(\"--> [Random List]\", arrListR, \"Use\",\n # func.__name__, \"with Total:\", len(arrList))\n start = time.clock()\n arrListR = func(arrListR)\n end = time.clock()\n runtime = end - start\n # print(\"--> [Sorted List]\", arrListR, \"Use\",\n # func.__name__, \"with Total:\", len(arrList))\n if operator.eq(arrList, arrListR):\n print(\"[Success]\", func.__name__, \"with Total:\",\n len(arrList), \"in %.5fs\" % runtime)\n return True\n else:\n print(\"[Fail]\", func.__name__, \"with Total:\",\n len(arrList), \"in %.5fs\" % runtime)\n return False\n\n\n# Bubble Sort\ndef BubbleSort(arr):\n '''\n 冒泡排序\n 时间复杂度O(n^2)(最好情况O(n))\n 空间复杂度O(1)\n 内部排序\n 稳定\n '''\n for i in range(1, len(arr)):\n for j in range(0, len(arr) - i):\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n return arr\n\n\n# Selection Sort\ndef SelectionSort(arr):\n '''\n 选择排序\n 时间复杂度O(n^2)\n 空间复杂度O(1)\n 内部排序\n 不稳定\n '''\n for i in range(len(arr)-1):\n MinIndex = i\n for j in range(i, len(arr)):\n if arr[j] < arr[MinIndex]:\n MinIndex = j\n\n if MinIndex != i:\n arr[i], arr[MinIndex] = arr[MinIndex], arr[i]\n\n return arr\n\n\n# Insertion Sort\ndef InsertionSort(arr):\n '''\n 插入排序\n 时间复杂度O(n^2)(最好情况O(n))\n 空间复杂度O(1)\n 内部排序\n 稳定\n '''\n for i in range(len(arr)-1):\n SortedIndex = i\n current = arr[i+1]\n while SortedIndex >= 0 and arr[SortedIndex] > current:\n arr[SortedIndex+1] = arr[SortedIndex]\n SortedIndex -= 1\n arr[SortedIndex+1] = current\n\n return arr\n\n\n# Merge Sort\ndef MergeSort(arr):\n import math\n if len(arr) < 2:\n return arr\n middle = math.floor(len(arr)/2)\n left = arr[:middle]\n right = arr[middle:]\n\n return Merge(MergeSort(left), MergeSort(right))\n\n\ndef Merge(left, right):\n result = []\n while left and right:\n if left[0] < right[0]:\n result.append(left.pop(0))\n else:\n result.append(right.pop(0))\n\n while left:\n result.append(left.pop(0))\n\n while right:\n result.append(right.pop(0))\n\n return result\n\n\n# Shell Sort\ndef ShellSort(arr):\n GapList = [1, 4, 10, 23, 57, 132, 301, 701]\n for g in GapList:\n if len(arr) > g:\n GapIndex = GapList.index(g)\n\n gap = GapList[GapIndex]\n\n while GapIndex >= 0:\n for i in range(gap, len(arr)):\n current = arr[i]\n j = i - gap\n while j >= 0 and arr[j] > current:\n arr[j+gap] = arr[j]\n j -= gap\n arr[j+gap] = current\n GapIndex -= 1\n gap = GapList[GapIndex]\n return arr\n\n\n# Quick Sort\ndef QuickSort(arr, left=None, right=None):\n left = 0 if not isinstance(left, int) else left\n right = len(arr) - 1 if not isinstance(right, int) else right\n if left < right:\n PivotIndex = Partition(arr, left, right)\n PartitionLeft = QuickSort(arr, left, PivotIndex-1)\n PartitionRight = QuickSort(arr, PivotIndex+1, right)\n\n return arr\n\n\ndef Partition(arr, left, right):\n pivot = left\n index = pivot + 1\n i = index\n while i <= right:\n if arr[i] < arr[pivot]:\n arr[index], arr[i] = arr[i], arr[index]\n index += 1\n i += 1\n arr[pivot], arr[index-1] = arr[index-1], arr[pivot]\n return index-1\n\n\n# Heap Sort\ndef max_heapify(heap, root):\n left = 2*root + 1\n right = left + 1\n largest = root\n\n if left < arrLen and heap[left] > heap[largest]:\n largest = left\n if right < arrLen and heap[right] > heap[largest]:\n largest = right\n\n if largest != root:\n heap[largest], heap[root] = heap[root], heap[largest]\n max_heapify(heap, largest)\n\n\ndef build_max_heap(arr):\n import math\n for i in range(math.floor(len(arr)/2)-1, -1, -1):\n max_heapify(arr, i) # 从最后一个非叶子节点向前最大堆化。\n\n\ndef heap_sort(arr):\n global arrLen\n arrLen = len(arr)\n\n build_max_heap(arr)\n for i in range(len(arr)-1, 0, -1):\n arr[0], arr[i] = arr[i], arr[0]\n arrLen -= 1\n max_heapify(arr, 0)\n\n return arr\n\n\n# Count Sort\ndef count_sort(arr):\n max_val = max(arr)\n min_val = min(arr)\n StoreLen = max_val - min_val + 1\n StoreArr = [0] * StoreLen\n SortedIndex = 0\n\n for i in range(len(arr)):\n if not StoreArr[arr[i] - min_val]:\n StoreArr[arr[i] - min_val] = 0\n StoreArr[arr[i] - min_val] += 1\n\n for i in range(StoreLen):\n while StoreArr[i] > 0:\n arr[SortedIndex] = i + min_val\n SortedIndex += 1\n StoreArr[i] -= 1\n\n return arr\n\n\nsortTest(count_sort)\n","sub_path":"算法/Sorting.py","file_name":"Sorting.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"213887818","text":"from __future__ import print_function, division\nfrom multiprocessing import Pool\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n#from multiprocessing import Process, Array\n#import multiprocessing as mp\nfrom sys import argv\nimport numpy as np\nimport readsubfHDF5\nimport snapHDF5\ntry:\n import cPickle as pickle\nexcept:\n import pickle\npi = 3.14159265358979\nHYDROGEN_MASSFRAC = 0.76\nimport sys\n\n#wraps to account for period boundary conditions. This mutates the original entry\ndef dx_wrap(dx, box):\n idx = dx > +box/2.0\n dx[idx] -= box\n idx = dx < -box/2.0\n dx[idx] += box\n return dx\n#Calculates distance taking into account periodic boundary conditions\ndef dist2(dx, dy, dz, box):\n return dx_wrap(dx,box)**2 + dx_wrap(dy,box)**2 + dx_wrap(dz,box)**2\n\n# Units\nGRAVITY_cgs = 6.672e-8\nBOLTZMANN = 1.38065e-16\nPROTONMASS = 1.67262178e-24\nGAMMA = 5.0 / 3.0\nGAMMA_MINUS1 = GAMMA - 1.0\nMSUN = 1.989e33\nMPC = 3.085678e24\nKPC = 3.085678e21\nZSUN = 0.0127\nUnitLength_in_cm = 3.085678e21 # code length unit in cm/h\nUnitMass_in_g = 1.989e43 # code length unit in g/h\nUnitVelocity_in_cm_per_s = 1.0e5\nUnitTime_in_s = UnitLength_in_cm / UnitVelocity_in_cm_per_s\nUnitDensity_in_cgs = UnitMass_in_g/ np.power(UnitLength_in_cm,3)\nUnitPressure_in_cgs = UnitMass_in_g / UnitLength_in_cm / np.power(UnitTime_in_s,2)\nUnitEnergy_in_cgs = UnitMass_in_g * np.power(UnitLength_in_cm,2) / np.power(UnitTime_in_s,2)\nGCONST = GRAVITY_cgs / np.power(UnitLength_in_cm,3) * UnitMass_in_g * np.power(UnitTime_in_s,2)\ncritical_density = 3.0 * .1 * .1 / 8.0 / np.pi / GCONST #.1 is to convert 100/Mpc to 1/kpc, this is in units of h^2\nhubbleparam = .71 #hubble constant\nbaryonfraction = .044 / .27 #OmegaB/Omega0\ncolors = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\n\nres = '14Mpc'\nvel = '118kms'\nsnapnum = 32\nspecies = 'H'\nref = 'OFF'\n\ns_vel = vel.replace(\".\",\"\")\ns_res = res.replace(\".\",\"\")\n\n#fp_halo = open('/home/nakazatoyr/arepo-c/'+res+vel+species+'/SIGOs_info_'+res+'_'+vel+'_'+species+'.dat', 'w')\nif str(ref) == 'ON':\n fp_halo = open('/home/nakazatoyr/arepo-c/Analysis/output/'+res+vel+species+'/SIGOs_info_'+res+'_'+vel+'_'+species+'.dat', 'w')\n fp_parID = open('/home/nakazatoyr/arepo-c/Analysis/output/'+res+vel+species+'/SIGO_IDinfo_'+res+'_'+vel+'_'+species+'.dat', 'w')\n fp_posi = open('/home/nakazatoyr/arepo-c/Analysis/output/'+res+vel+species+'/SIGOs_ID_posi_'+res+'_'+vel+'_'+species+'.dat', 'w')\n fp_test = open('/home/nakazatoyr/arepo-c/Analysis/output/'+res+vel+species+'/SIGOs_ID_posi_test.dat', 'w')\nif str(ref) == 'OFF':\n fp_halo = open('/home/nakazatoyr/arepo-c/Analysis/output/'+res+vel+species+'_nonref/SIGOs_info_'+res+'_'+vel+'_'+species+'.dat', 'w')\n fp_parID = open('/home/nakazatoyr/arepo-c/Analysis/output/'+res+vel+species+'_nonref/SIGO_IDinfo_'+res+'_'+vel+'_'+species+'.dat', 'w')\n fp_posi = open('/home/nakazatoyr/arepo-c/Analysis/output/'+res+vel+species+'_nonref/SIGOs_ID_posi_'+res+'_'+vel+'_'+species+'.dat', 'w')\n fp_test = open('/home/nakazatoyr/arepo-c/Analysis/output/'+res+vel+species+'_nonref/SIGOs_ID_posi_test.dat', 'w')\n\nfor snapnum in [32]:\n if str(ref) == 'ON':\n #filename = \"/xc-work/nakazatoyr/arepo-c/\" + res + vel + species + \"_refine/\"\n filename = \"/xc-work/nakazatoyr/arepo-c/\" + res + vel + species + \"_refine/\"\n if str(ref) == 'OFF':\n filename = \"/xc-work/nakazatoyr/arepo-c/\" + res + vel + species + \"_nonref/\"\n #filename = \"/xc-work/chiakign/arepo-c/\" + res + vel + species + \"/\"\n filename2 = filename + \"GasOnly_FOF\" #Used for readsubfHDF5\n filename3 = filename2 + \"/snap-groupordered_\" + str(snapnum ).zfill(3) #Used for snapHDF5\n filename4 = filename + \"snap_\" + str(snapnum).zfill(3) #Used for hdf5lib, snapHDF5\n\n #read header information\n header = snapHDF5.snapshot_header(filename3)\n red = header.redshift\n atime = header.time\n boxSize = header.boxsize\n Omega0 = header.omega0\n OmegaLambda = header.omegaL\n massDMParticle = header.massarr[1] #all DM particles have same mass\n\n print('reading header information done')\n #redshift evolution of critical density\n critical_density *= Omega0 + atime**3 * OmegaLambda\n critical_density_gas = critical_density * baryonfraction\n\n print(filename3)\n #load particle indices and catalogs\n pGas= snapHDF5.read_block(filename3,\"POS \", parttype=0)\n iGas= snapHDF5.read_block(filename3,\"ID \", parttype=0)\n mGas= snapHDF5.read_block(filename3,\"MASS\", parttype=0)\n eGas= snapHDF5.read_block(filename3,\"U \", parttype=0)\n dGas= snapHDF5.read_block(filename3,\"RHO \", parttype=0)\n xHI = snapHDF5.read_block(filename3,\"HI \", parttype=0)\n if str(species)=='H2':\n xH2I= snapHDF5.read_block(filename3,\"H2I \", parttype=0)\n pDM = snapHDF5.read_block(filename3,\"POS \", parttype=1)\n cat = readsubfHDF5.subfind_catalog(filename2, snapnum)\n #Read in particles\n '''\n posgas = snapHDF5.read_block(filename4, \"POS \", parttype=0)\n posdm = snapHDF5.read_block(filename4, \"POS \", parttype=1)\n idxdm = snapHDF5.read_block(filename4, \"ID \", parttype=1)\n idxgas = snapHDF5.read_block(filename4, \"ID \", parttype=0)\n '''\n r200 = cat.Group_R_Crit200\n print('loding catalog done')\n\n halo100_indices = np.where(cat.GroupLenType[:,0] > 100)[0]\n startAllGas = []\n endAllGas = []\n for i in halo100_indices:\n startAllGas += [np.sum(cat.GroupLenType[:i,0])]\n endAllGas += [startAllGas[-1] + cat.GroupLenType[i,0]]\n print('defining stat and endAllGas done!')\n\n #load shrinker and match data\n if str(ref) == 'ON':\n with open(\"/home/nakazatoyr/arepo-c/Analysis/output/\"+res+vel+species+'/shrinker'+s_res+'_'+s_vel+'_'+str(snapnum)+'.dat','rb') as f:\n shrunken = pickle.load(f)\n with open('/home/nakazatoyr/arepo-c/Analysis/output/'+res+vel+species+'/match'+res+'_'+vel+'_'+str(snapnum)+'.dat','rb') as f:\n matched = pickle.load(f)\n if str(ref) == 'OFF':\n with open(\"/home/nakazatoyr/arepo-c/Analysis/output/\"+res+vel+species+'_nonref/shrinker'+s_res+'_'+s_vel+'_'+str(snapnum)+'.dat','rb') as f:\n shrunken = pickle.load(f)\n with open('/home/nakazatoyr/arepo-c/Analysis/output/'+res+vel+species+'_nonref/match'+res+'_'+vel+'_'+str(snapnum)+'.dat','rb') as f:\n matched = pickle.load(f)\n\n print('loading data done!')\n SIGO_call = 0\n for i in halo100_indices:\n print('now, halo' + str(i)+ 'is reading..')\n SIGO = 0\n Fbar = 0.0\n cm = shrunken['cm'][i]\n rotation = shrunken['rotation'][i]\n radii = shrunken['radii'][i]\n mDM = shrunken['mDM'][i]\n DMinEll = shrunken['DMindices'][i]\n Rclosest = matched['Rmin'][i]\n R200dm = matched['R200dm'][i]\n\n if radii[0] > 0.: #In case of shrinker errors\n if np.sum(cm == np.array([0., 0., 0.]))==3:\n totalGas = np.sum(mGas[startAllGas[i]: endAllGas[i]])\n cm = np.array([np.sum(pGas[startAllGas[i]: endAllGas[i], j]*mGas[startAllGas[i]: endAllGas[i]])/totalGas for j in range(3)])\n\n #Get positions of gas particles\n P = pGas[startAllGas[i]: endAllGas[i]]\n M = mGas[startAllGas[i]: endAllGas[i]]\n Pdm = pDM[DMinEll]\n #Shift cooedinate system to center on the center of the ellipsoid\n Precentered = dx_wrap(P - cm, boxSize)\n PrecenteredDM = dx_wrap(Pdm -cm, boxSize)\n #Rotate coordinated to the axes point along x,y,z directions:\n Precentered = np.array([np.dot(pp, rotation.T) for pp in Precentered])\n PrecenteredDM = np.array([np.dot(pp, rotation.T) for pp in PrecenteredDM])\n\n #Figure out which particles are inside the ellipsoid\n inEll = (Precentered[:,0]**2./radii[0]**2. + Precentered[:, 1]**2./radii[1]**2 + Precentered[:,2]**2./radii[2]**2)<=1.\n if np.shape(P[inEll])[0] > 32: #Only consider SIGOs with greater than 32 particles\n if(np.sum(M[inEll])/(np.sum(M[inEll])+ mDM) > .4) and (Rclosest/R200dm>1.): #'inEll' stands for inside the ellipsoid\n if SIGO_call > 10:\n break\n else:\n #fp_parID = '/home/nakazatoyr/arepo-c/Analysis/ArepoPostProcessing-H/SIGO'+ str(i) +'_IDinfo_'+res+'_'+vel+'_'+species+'.dat'\n #baryon fraction\n Fbar = np.sum(M[inEll])/(np.sum(M[inEll])+mDM)\n #HI and H2I fraction in the clump\n ID = iGas[startAllGas[i]: endAllGas[i]]\n M = mGas[startAllGas[i]: endAllGas[i]]\n Rho = dGas[startAllGas[i]: endAllGas[i]]\n E = eGas[startAllGas[i]: endAllGas[i]]\n XHI = xHI[startAllGas[i]: endAllGas[i]]\n posi_x = pGas[startAllGas[i]: endAllGas[i], 0]\n posi_y = pGas[startAllGas[i]: endAllGas[i], 1]\n posi_z = pGas[startAllGas[i]: endAllGas[i], 2]\n if str(species)=='H2':\n XH2I = xH2I[startAllGas[i]: endAllGas[i]]\n print('now writing the halo information..')\n print(\"%3d %2d %5d %13.5e %13.5e\\n\", snapnum, red, i, Rclosest, Fbar)\n fp_halo.write(\"%3d %2d %5d %13.5e %13.5e\\n\" % (\n \t\t\t snapnum\n \t\t\t, red\n \t\t\t, i\n \t\t\t, Rclosest\n \t\t\t, Fbar\n \t\t\t#, HYDROGEN_MASSFRAC * (rho * hubbleparam**2 / atime**3 * UnitDensity_in_cgs) / PROTONMASS\n \t\t\t#, 1.23*PROTONMASS / BOLTZMANN * GAMMA_MINUS1 * (e * UnitVelocity_in_cm_per_s**2)\n \t\t\t#, yHI\n \t\t\t#, yH2I\n \t\t\t))\n fp_halo.flush()\n\n '''\n partinfo = np.concatenate([\n , posi_x.reshape(len(ID),1)\n , posi_y.reshape(len(ID),1)\n , posi_z.reshape(len(ID),1)],1)\n np.savetxt(fp_parID, partinfo)\n '''\n\n fp_parID.write(\"%3d %2d %5d\\n\" %(\n i\n , startAllGas[i]\n , endAllGas[i]\n ))\n fp_parID.flush()\n '''\n fp_test.write(\"%3d %2d %5d\\n\" %(\n i\n , iGas[startAllGas[i]]\n , iGas[startAllGas[i] + 1]\n , iGas[startAllGas[i] + 2]\n ))\n fp_test.flush()\n '''\n '''\n fp_parID = [ i\n , startAllGas[i]\n , endAllGas[i]]\n np.savetxt('/home/nakazatoyr/arepo-c/Analysis/ArepoPostProcessing-H/SIGO_IDinfo_'+res+'_'+vel+'_'+species+'.dat', fp_parID)\n '''\n print('saved halo'+ str(i) + '_IDcoordinations!')\n print(len(posi_x))\n print('from now, plotting task' + str(i) + '...')\n\n for j in range(len(posi_x)):\n plt.scatter(posi_x[j], posi_y[j], s=1, marker = 'o', c = 'black')\n plt.title('SIGO_plot_' +str(i)+ str(species) + '_z = ' + str(int(red)))\n plt.xlabel('x [kPc]')\n plt.ylabel('y [kPc]')\n plt.savefig('SIGO_plot_' +str(i)+ str(species) + '_z' + str(int(red)) +'.png')\n plt.close('all')\n print('plotting task '+ str(i) + 'done!')\n\n SIGO_call += 1\n\n '''\n for j in ID:\n fp_posi.write(\"3d %13.7f %5d %13.5e %13.5e %13.5e\\n\" % (\n snapnum\n , red\n , i #halo ID\n , pGas[j,0] #x\n , pGas[j,1] #y\n , pGas[j,2] #z\n ))\n plt.scatter(pGas[j:0],pGas[j:1], pGas[j:2], s=1, maker = 'o', c = 'black')\n plt.savefig('SIGO_plot_' + str(species) + '_z = ' + redshift +'.png')\n plt.close('all')\n print('plotting task done!')\n '''\n\nfp_halo.close()\nfp_parID.close()\nfp_test.close()\nfp_posi.close()\n","sub_path":"SIGO_partposi.py","file_name":"SIGO_partposi.py","file_ext":"py","file_size_in_byte":12920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"163331093","text":"import json\nimport os\nfrom pprint import pprint\n\nfrom pathlib import Path\nfrom bottle import Bottle, run, PasteServer, response, request, static_file\nfrom service import default_out, aoConfig\nfrom bakerman import BakingMan, BakingJob\nfrom util import colorprint, prepareOutFilename, default_out_dir\nfrom remote import cachedir\n\napp = Bottle()\n\nbakingMan = BakingMan()\nbakingMan.start()\n\n\ndef extractPostParams(requestParam):\n jobSource = request.POST\n try:\n # check if json entry is available\n jsonSource = request.json\n if jsonSource is not None:\n jobSource = jsonSource\n except Exception as e:\n print(\"bakeDirect: json couldn't be parsed\")\n print(e)\n # print(jobSource)\n return jobSource\n\n\ndef staticFileWithCors(filename, root, **params):\n httpResponse = static_file(filename, root, **params)\n\n httpResponse.headers['Access-Control-Allow-Origin'] = '*'\n httpResponse.headers[\n 'Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n httpResponse.headers[\n 'Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\n return httpResponse\n\n\ndef PARAMETER():\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers[\n 'Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n response.headers[\n 'Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\n\ndef routeWithOptions(**kwargs):\n def decorator(callback):\n kwargs['callback'] = callback\n app.route(**kwargs)\n\n kwargs['method'] = 'OPTIONS'\n kwargs['callback'] = PARAMETER\n\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers[\n 'Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n response.headers[\n 'Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n app.route(**kwargs)\n return callback\n\n return decorator\n\n\n@app.hook('after_request')\ndef enable_cors():\n \"\"\"\n You need to add some headers to each request.\n Don't use the wildcard '*' for Access-Control-Allow-Origin in production.\n \"\"\"\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers[\n 'Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n response.headers[\n 'Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\n\n@routeWithOptions(path='/bakeFile/', method=\"GET\")\ndef bakeFile(fileParam: str):\n global bakingMan\n\n jobParams = {\"file\": fileParam, \"resolution\": aoConfig[\"resolution\"]}\n # print(jobParams)\n jobId = bakingMan.addJob(jobParams)\n response.content_type = \"application/json\"\n return {\"jobId\": jobId}\n\n\n@routeWithOptions(path='/getFile/', method=\"GET\")\ndef getFile(filename):\n colorprint(\"getFile \" + filename, 33)\n return staticFileWithCors(filename, './out/', download=filename)\n\n\n@routeWithOptions(path='/removeResults/', method=\"GET\")\ndef removeResults():\n print(\"remove result files\")\n folder = default_out_dir\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path) and str(the_file).startswith(\"AO_\"):\n print(\" remove\", the_file)\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n print(\"remove cache files\")\n folder = cachedir\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n print(\" remove\", the_file)\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n\n@routeWithOptions(path=\"/bakeUrl/\", method=\"POST\")\ndef bakeUrl():\n global bakingMan\n # print(request)\n # print(request.POST)\n # print(request.POST.__dict__)\n # print(request.headers.__dict__)\n # print(request.method)\n\n jobSource = extractPostParams(request)\n\n urlParam = jobSource[\"url\"]\n # print(urlParam)\n\n resolutionParam = jobSource[\"resolution\"]\n resolutionValue = aoConfig[\"resolution\"]\n if resolutionParam is not None:\n resolutionValue = int(resolutionParam)\n\n args = {\"url\": urlParam, \"resolution\": resolutionValue}\n # print(args)\n\n jobId = bakingMan.addJob(args)\n response.content_type = \"application/json\"\n return {\"jobId\": jobId}\n\n\n@routeWithOptions(path=\"/bakeDirect/\", method=\"POST\")\ndef bakeDirect():\n global bakingMan\n # print(request)\n # print(request.POST)\n # print(request.POST.__dict__)\n # print(request.headers.__dict__)\n # print(request.method)\n\n jobSource = extractPostParams(request)\n\n igxcString = jobSource[\"igxcContent\"]\n # print(igxcString)\n if not igxcString or igxcString == \"null\":\n colorprint(\"No igxcContent found in POST request in bakeDirect/\", 31)\n return {\"error\": \"No igxcContent found in POST request in bakeDirect/\"}\n\n try:\n if isinstance(igxcString, str):\n igxcContent = json.loads(igxcString)\n else:\n igxcContent = igxcString\n except Exception as e:\n colorprint(\"Exception in bakeDirect/\", 31)\n print(e)\n return {\"error\": \"igxcContent couldn't be parsed\"}\n # print(igxcContent)\n\n basePath = jobSource[\"basePath\"]\n # print(basepath)\n\n resolutionValue = aoConfig[\"resolution\"]\n resolutionParam = jobSource[\"resolution\"]\n if resolutionParam is not None:\n resolutionValue = int(resolutionParam)\n\n args = {\n \"basePath\": basePath,\n \"igxcContent\": igxcContent,\n \"resolution\": resolutionValue\n }\n # print(args)\n\n jobId = bakingMan.addJob(args)\n response.content_type = \"application/json\"\n return {\"jobId\": jobId}\n\n\n@routeWithOptions(path='/pullState/', method=\"GET\")\ndef pullState(jobId: str):\n global bakingMan\n colorprint(\"pullState id {}\".format(jobId), 33)\n\n result = {\"state\": \"undefined\"}\n if bakingMan.hasJob(jobId):\n result = bakingMan.getJob(jobId)\n\n # print(result)\n jsonResult = json.dumps(result,\n sort_keys=True,\n indent=4,\n separators=(',', ': '))\n response.content_type = \"application/json\"\n return jsonResult\n\n\n@routeWithOptions(path='/pullAll/', method=\"GET\")\ndef pullAll():\n global bakingMan\n colorprint(\"pullAll\", 33)\n result = bakingMan.getAllJobs()\n # print(result)\n jsonResult = json.dumps(result,\n sort_keys=True,\n indent=4,\n separators=(',', ': '))\n response.content_type = \"application/json\"\n return jsonResult\n\n\n@routeWithOptions(path='/getImage/', method=\"GET\")\ndef getImage(jobId: str):\n global bakingMan\n absPath = os.path.join(os.path.abspath(\".\"), default_out_dir)\n print(absPath)\n if bakingMan.isJobFinished(jobId):\n job = bakingMan.getJob(jobId)\n fileName = job[\"jobArgs\"][\"out\"] + \".png\"\n return staticFileWithCors(fileName, absPath)\n\n\nserverConfig = {\"port\": 8080, \"host\": \"0.0.0.0\"}\n\ntry:\n with open(\"config.json\", \"r\") as f:\n configContent = json.load(f)\n if \"port\" in configContent:\n serverConfig[\"port\"] = configContent[\"port\"]\n if \"host\" in configContent:\n serverConfig[\"host\"] = configContent[\"host\"]\n if \"resolution\" in configContent:\n aoConfig[\"resolution\"] = configContent[\"resolution\"]\n print(serverConfig)\n print(aoConfig)\nexcept FileNotFoundError:\n print(\"Config file not found, using standard port\", serverConfig[\"port\"])\n\ntry:\n app.run(host=serverConfig[\"host\"],\n port=serverConfig[\"port\"],\n debug=True,\n server=PasteServer)\nexcept KeyboardInterrupt:\n pass\nfinally:\n bakingMan.stop()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"536250548","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport requests\n\n\n# In[2]:\n\n\nfrom bs4 import BeautifulSoup\n\n\n# In[177]:\n\n\nurl=\"https://www.theguardian.com/football/results\"\n\n\n# In[178]:\n\n\nr=requests.get(url)\n\n\n# In[179]:\n\n\nsoup=BeautifulSoup(r.content,\"html5lib\")\n\n\n# In[199]:\n\n\nalltables=soup.findAll(\"div\",{\"football-matches__day\"})\n\n\n# In[213]:\n\n\nallleagues1=[]\np=[]\nfor i in range(len(alltables)):\n pleaguetable=alltables[i].findAll('div',{\"class\":\"football-table__container\"})\n for j in range(len(pleaguetable)):\n leaguename=pleaguetable[j].findAll(\"caption\",{\"class\":\"table__caption table__caption--top\"})\n p.append(leaguename[0].findAll('a')[0].text)\n body=pleaguetable[j].findAll('tbody')\n for k in range(len(body)):\n rows=body[k].findAll('tr',{\"class\":\"football-match football-match--result\"})\n for l in range(len(rows)):\n s={}\n ##print(rows[l].findAll('span',{\"class\":\"team-name__long\"})[0].text)\n ##print(rows[l].findAll('span',{\"class\":\"team-name__long\"})[1].text)\n ##print(rows[l].findAll('div',{\"class\":\"football-team__score\"})[0].text)\n ##print(rows[l].findAll('div',{\"class\":\"football-team__score\"})[1].text)\n s[\"match\"]=rows[l].findAll('span',{\"class\":\"team-name__long\"})[0].text+\" \"+rows[l].findAll('div',{\"class\":\"football-team__score\"})[0].text+\"-\"+rows[l].findAll('div',{\"class\":\"football-team__score\"})[1].text+\" \"+rows[l].findAll('span',{\"class\":\"team-name__long\"})[1].text\n s['link']=rows[l].findAll('a')[0]['href']\n p.append(s)\n s={}\n allleagues1.append(p)\n p=[]\n\n##if __name__ == '__main__':\n## print(allleagues1)\n","sub_path":"resulttoday.py","file_name":"resulttoday.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"130777738","text":"# import necessary modules\nimport random\nimport discord\nimport config\nfrom discord.ext import commands\n\n# creates a bot instance with \"$\" as the command prefix\nbot = commands.Bot(\"$\")\nclient = discord.Client()\n\nTOKEN = config.TOKEN\n\nheader = {\"User-Agent\": \"Magic Browser\"}\nheart = ''\nstaff = {'Admin': 553314578528993311,\n 'Mod': 553356112636936203}\nmuted = 553358001550131208\ntesting = 553314472341536776\n\n# This is how you define a discord.py event\n@bot.event\nasync def on_ready(): # the event `on_ready` is triggered when the bot is ready to function\n print(f\"{format(bot.user.name)} is online.\")\n bot.load_extension(\"cogs.Utility\")\n bot.load_extension(\"cogs.Fun\")\n await bot.change_presence(activity=discord.Activity(name=\"Despacito\", type=discord.ActivityType.listening))\n channel = bot.get_channel(testing)\n await channel.send(\"Bot is online.\")\n\n\n@bot.event\nasync def on_command_error(ctx, e):\n if hasattr(ctx.command, 'on_error'):\n print(type(e), e)\n return\n\n e = getattr(e, 'original', e)\n\n ignored = (commands.CommandNotFound, commands.UserInputError)\n\n if isinstance(e, ignored):\n return\n\n elif isinstance(e, commands.CommandOnCooldown):\n return await ctx.send(f'Stop spamming! Try again in {round(e.retry_after)+1} second(s).')\n elif isinstance(e, commands.CheckFailure):\n return await ctx.send(f\"I only respond to my mom.\")\n elif isinstance(e, commands.MissingPermissions):\n return await ctx.send(f\"You are missing these ({list.missing_perms}) to run this command.\")\n elif isinstance(e, commands.BotMissingPermissions):\n return await ctx.send(f\"I need these permissions ({list.missing_perms}) to run this command.\")\n elif isinstance(e, commands.DisabledCommand):\n return await ctx.send(f\"Command is disabled.\")\n elif isinstance(e, discord.HTTPException):\n return await ctx.send(f\"**Error:** {e}\")\n elif isinstance(e, commands.CommandError):\n return await ctx.send(f\"**Error:** {e}\")\n else:\n print(type(e), e)\n return\n # await asyncio.sleep(5)\n # await message.delete()\n # try: await ctx.message.delete()\n # except discord.HTTPException: pass\n\n\n@bot.event\nasync def on_message(message):\n if message.author.bot:\n return\n if random.randrange(4) == 0:\n if 'uwu' in message.content:\n await message.channel.send('uwu')\n if 'OwO' in message.content:\n await message.channel.send('whats this?')\n await bot.process_commands(message)\n\n\n@bot.event\nasync def on_member_join(member):\n channel = bot.get_channel(555216652589858816)\n message = 'Hello {}! Please enjoy your stay.'.format(member.mention)\n await channel.send(message)\n\n\n@bot.event\nasync def on_member_remove(member):\n channel = bot.get_channel(555216652589858816)\n message = 'Goodbye {}! We will miss you!'.format(member.mention)\n await channel.send(message)\n\ninitial_extensions = [\"cogs.Utility,cogs.Fun\"]\ncogs_dir = \"cogs\"\n\nif __name__ == '__cogs__':\n for extension in initial_extensions:\n try:\n bot.load_extension(extension)\n except Exception as error:\n print('{} cannot be loaded. [{}]'.format(extension, error))\n\n\n@bot.command()\n@commands.is_owner()\nasync def unload(ctx, extension_name: str):\n try:\n extension_dir = f\"{cogs_dir}.{extension_name}\"\n bot.unload_extension(extension_dir)\n await ctx.send(\"{} unloaded.\".format(extension_name))\n except Exception as error:\n await ctx.send('{} cannot be unloaded. [{}]'.format(extension_name, error))\n print('{} cannot be unloaded. [{}]'.format(extension_name, error))\n\n\n@bot.command()\n@commands.is_owner()\nasync def load(ctx, extension_name: str):\n try:\n extension_dir = f\"{cogs_dir}.{extension_name}\"\n bot.load_extension(extension_dir)\n await ctx.send(\"{} loaded.\".format(extension_name))\n except Exception as error:\n await ctx.send('{} cannot be loaded. [{}]'.format(extension_name, error))\n print('{} cannot be loaded. [{}]'.format(extension_name, error))\n\n\n@bot.command()\n@commands.is_owner()\nasync def update(ctx, extension_name: str):\n try:\n extension_dir = f\"{cogs_dir}.{extension_name}\"\n bot.reload_extension(extension_dir)\n await ctx.send(\"{} updated.\".format(extension_name))\n except Exception as error:\n await ctx.send('{} cannot be updated. [{}]'.format(extension_name, error))\n print('{} cannot be updated. [{}]'.format(extension_name, error))\n\n\n# starts the bot with the corresponding token\nbot.run(TOKEN)\n","sub_path":"marlow_bot.py","file_name":"marlow_bot.py","file_ext":"py","file_size_in_byte":4658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"347354295","text":"import os\nfrom binascii import hexlify, unhexlify\n\nfrom pytest import mark, fixture, skip\n\nfrom cose import CoseMessage\nfrom cose.attributes.algorithms import CoseAlgorithms\nfrom cose.attributes.headers import CoseHeaderKeys\nfrom cose.messages.enc0message import Enc0Message\nfrom cose.keys.cosekey import KeyOps, CoseKey\nfrom cose.keys.symmetric import SymmetricKey\nfrom tests.conftest import generic_test_setup, extract_phdr, extract_uhdr, extract_alg, extract_nonce, create_cose_key\n\n\n@fixture\ndef setup_encrypt0_tests(encrypt0_test_input: dict) -> tuple:\n return generic_test_setup(encrypt0_test_input)\n\n\n@mark.encoding\ndef test_encrypt0_encoding(setup_encrypt0_tests: tuple) -> None:\n _, test_input, test_output, test_intermediate, fail = setup_encrypt0_tests\n alg = extract_alg(test_input['encrypted'])\n nonce = extract_nonce(test_input, 0)\n\n # initialize a COSE_Encrypt0 message\n enc0 = Enc0Message(\n phdr=extract_phdr(test_input, 'encrypted'),\n uhdr=extract_uhdr(test_input, 'encrypted'),\n payload=test_input['plaintext'].encode('utf-8'),\n external_aad=unhexlify(test_input['encrypted'].get(\"external\", b'')))\n\n # set up key data and verify CEK\n key = create_cose_key(SymmetricKey, test_input[\"encrypted\"][\"recipients\"][0][\"key\"])\n\n # verify internal _enc_structure\n assert enc0._enc_structure == unhexlify(test_intermediate['AAD_hex'])\n\n # verify encoding (with automatic encryption)\n if fail:\n assert enc0.encode(alg=alg, nonce=nonce, key=key) != unhexlify(test_output)\n else:\n assert enc0.encode(alg=alg, nonce=nonce, key=key) == unhexlify(test_output)\n\n\n@mark.decoding\ndef test_encrypt0_decoding(setup_encrypt0_tests: tuple) -> None:\n _, test_input, test_output, test_intermediate, fail = setup_encrypt0_tests\n alg = extract_alg(test_input['encrypted'])\n nonce = extract_nonce(test_input, 0)\n\n if fail:\n skip(\"invalid test input\")\n\n # parse initial message\n msg: Enc0Message = CoseMessage.decode(unhexlify(test_output))\n\n # verify parsed (un)protected header\n assert msg.phdr == extract_phdr(test_input, 'encrypted')\n assert msg.uhdr == extract_uhdr(test_input, 'encrypted')\n\n # prepare and verify pre-shared key\n key = create_cose_key(\n SymmetricKey,\n test_input[\"encrypted\"][\"recipients\"][0][\"key\"],\n alg=alg,\n usage=KeyOps.DECRYPT)\n\n msg.key = key\n assert msg.key.k == unhexlify(test_intermediate['CEK_hex'])\n\n # look for external data and verify internal enc_structure\n msg.external_aad = unhexlify(test_input['encrypted'].get('external', b''))\n assert msg._enc_structure == unhexlify(test_intermediate['AAD_hex'])\n\n # verify decryption\n assert msg.decrypt(nonce=nonce, key=key) == test_input['plaintext'].encode('utf-8')\n\n # re-encode and verify we are back where we started\n assert msg.encode(encrypt=False, key=key, nonce=nonce) == unhexlify(test_output)\n\n\n@mark.parametrize(\"phdr, uhdr, payload, key\",\n [\n ({CoseHeaderKeys.ALG: CoseAlgorithms.A128GCM},\n {CoseHeaderKeys.IV: unhexlify(b'89F52F65A1C580933B5261A72F')},\n b'',\n SymmetricKey(kid=b'you_know', k=os.urandom(16), alg=CoseAlgorithms.A128GCM)),\n ({CoseHeaderKeys.ALG: CoseAlgorithms.A192GCM},\n {CoseHeaderKeys.IV: unhexlify(b'89F52F65A1C580933B5261A72F')},\n os.urandom(50),\n SymmetricKey(kid=b'you_know', k=os.urandom(16), alg=CoseAlgorithms.A192GCM)),\n ({CoseHeaderKeys.ALG: CoseAlgorithms.A256GCM},\n {CoseHeaderKeys.IV: unhexlify(b'89F52F65A1C580933B5261A72F')},\n os.urandom(100),\n SymmetricKey(kid=b'you_know', k=os.urandom(16), alg=CoseAlgorithms.A256GCM))\n ], ids=['test_encode_decode_1', 'test_encode_decode_2', 'test_encode_decode_3'])\ndef test_encode_decode_encrypt0(phdr, uhdr, payload, key):\n # create and encode a message\n original: Enc0Message = Enc0Message(phdr, uhdr, payload)\n encoded = original.encode(key=key, nonce=original.uhdr[CoseHeaderKeys.IV])\n\n # decode the message\n decoded: Enc0Message = CoseMessage.decode(encoded)\n\n # verify the different parts\n assert type(decoded) == Enc0Message\n assert original.encrypt(key=key, nonce=original.uhdr[CoseHeaderKeys.IV]) == decoded.payload\n assert decoded.phdr == phdr\n assert decoded.uhdr == uhdr\n\n # set the key and decode the message\n key.key_ops = KeyOps.DECRYPT\n assert decoded.decrypt(key=key, nonce=original.uhdr[CoseHeaderKeys.IV]) == payload\n\n\n@mark.parametrize(\"phdr, uhdr, alg, key1, key2, nonce, expected\",\n [\n ({CoseHeaderKeys.ALG: CoseAlgorithms.AES_CCM_16_64_128},\n {CoseHeaderKeys.IV: unhexlify(b'89F52F65A1C580933B5261A72F')},\n None,\n SymmetricKey(\n kid=b'our-secret',\n alg=CoseAlgorithms.AES_CCM_16_64_128,\n key_ops=KeyOps.ENCRYPT,\n k=CoseKey.base64decode(\"hJtXIZ2uSN5kbQfbtTNWbg\")),\n None,\n unhexlify(\"89F52F65A1C580933B5261A72F\"),\n b'6899DA0A132BD2D2B9B10915743EE1F7B92A4680E7C51BDBC1B320EA',),\n ({CoseHeaderKeys.ALG: CoseAlgorithms.AES_CCM_16_64_128},\n {},\n CoseAlgorithms.AES_CCM_16_64_128,\n SymmetricKey(\n kid=b'our-secret',\n key_ops=KeyOps.ENCRYPT,\n k=CoseKey.base64decode(\"hJtXIZ2uSN5kbQfbtTNWbg\")),\n None,\n unhexlify(b'89F52F65A1C580933B5261A72F'),\n b'6899DA0A132BD2D2B9B10915743EE1F7B92A4680E7C51BDBC1B320EA',),\n ], ids=['standalone_encryption_1', 'standalone_encryption_2']\n )\ndef test_encrypt0_standalone_encryption(phdr, uhdr, alg, key1, key2, nonce, expected):\n m = Enc0Message(phdr, uhdr, b'This is the content.')\n m.payload = m.encrypt(nonce=nonce, key=key1, alg=alg)\n\n key = key1 if key1 is not None else key2\n\n assert m._enc_structure == unhexlify(b\"8368456E63727970743043A1010A40\")\n assert key.k == unhexlify(b\"849B57219DAE48DE646D07DBB533566E\")\n assert hexlify(m.payload).upper() == expected\n","sub_path":"tests/test_enc0message.py","file_name":"test_enc0message.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"383978547","text":"# -*- coding:utf-8 -*-\nimport logging\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom shrecsys.util.fileSystemUtil import FileSystemUtil\n\nlogging.getLogger().setLevel(logging.INFO)\nfstool = FileSystemUtil()\ndef load_sen2vec_embedding(SEN2VEC, view_videos_index):\n input = open(SEN2VEC, \"r\")\n line = input.readline().strip()\n videos_index = dict()\n videos_embedding = []\n index = 0\n index_ = 0\n while line:\n points = line.split(' ')\n rawid = points[0]\n vid, site = rawid.split('#')\n id = vid + site\n if id in view_videos_index.keys():\n videos_index[id] = index\n embedding = points[1:]\n videos_embedding.append(embedding)\n index += 1\n line = input.readline().strip()\n index_ += 1\n if index_ % 100000 == 0:\n logging.info(\"build sen2vec embedding, index: {}\".format(index_))\n logging.info(\"generate embedding of sen2vec success! embedding size: {}\".format(len(videos_index)))\n return videos_embedding, videos_index\n\ndef add(x,y):\n return x+y\ndef build_users_embedding_np(videos_embedding, videos_index, view_seqs, with_userid=True):\n users_embedding = []\n users_index = dict()\n index_embed = dict()\n view_seqs_ = []\n logindex = 0\n if with_userid:\n index = 0\n for i, view_seq in enumerate(view_seqs):\n view_seq_ = \"\"\n users_index[view_seq[0]] = i\n for video in view_seq[1:]:\n if video in videos_index.keys():\n view_seq_ = view_seq_ + ' ' + video\n if len(view_seq_) > 0:\n view_seqs_.append(view_seq_)\n index_embed[index] = view_seq[0]\n index += 1\n logindex += 1\n if logindex % 10000 == 0:\n logging.info(\"filter view sequence, index: {}\".format(logindex))\n else:\n index = 0\n for i, view_seq in enumerate(view_seqs):\n view_seq_ = \"\"\n for video in view_seq:\n if video in videos_index.keys():\n view_seq_ = view_seq_ + ' ' + video\n if len(view_seq_) > 0:\n view_seqs_.append(view_seq_)\n index_embed[index] = i\n index += 1\n logindex += 1\n if logindex % 10000 == 0:\n logging.info(\"filter view sequence, index: {}\".format(logindex))\n logging.info(\"filter view sequence success, users size:{} from:{}\".format(len(view_seqs_), len(view_seqs)))\n vectorizer = CountVectorizer(min_df=0, token_pattern='\\w+')\n cropus_x = vectorizer.fit_transform(view_seqs_)\n videos = vectorizer.get_feature_names()\n tfidf_transformer = TfidfTransformer()\n videos_embedding_new = []\n for video in videos:\n videos_embedding_new.append(videos_embedding[videos_index[video]])\n logging.info(\"rebuild videos embedding success!\")\n Tfidf = tfidf_transformer.fit_transform(cropus_x)\n for i in range(len(view_seqs_)):\n tfidf = np.asarray(Tfidf.getrow(i).todense())[0]\n videos_embedding_array = np.array(videos_embedding_new)\n indices = np.where(tfidf > 0)\n rating = np.take(tfidf, indices)[0]\n videos_embed = np.take(videos_embedding_array, indices, axis=0)[0]\n user_embedding = np.divide(np.matmul(rating, np.float64(videos_embed)), sum(rating))\n users_embedding.append(user_embedding)\n if i % 10000 == 0:\n logging.info(\"build users embedding, users index:{}\".format(i))\n return users_embedding, users_index, index_embed\n\nif __name__==\"__main__\":\n videos_embedding = [[-4, 3, 2, 5, 7],\n [-2, 3, 6, 1, 9],\n [-1, 2, 2, 1, 3],\n [0, 3, 2, 7, 6],\n [-8, 3, 4, 2, 2],\n [-3, 1, 2, 3, 6]]\n videos_index = {\"3124\":0, \"4987\":1, \"6312\":2, \"3456\":3, \"7320\":4, \"2931\":5}\n view_seqs = [[\"3456\", \"4987\", \"6312\", \"2345\", \"2134\"],\n [\"3413\", \"3441\", \"1234\", \"1423\"],\n [\"2134\", \"7320\", \"3412\", \"6312\", \"2931\"]]\n #build_users_embedding_np(videos_embedding, videos_index, view_seqs, with_userid=False)\n print(build_users_embedding_np(videos_embedding, videos_index, view_seqs, with_userid=False))","sub_path":"shrecsys/preprocessing/preKmeans.py","file_name":"preKmeans.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"368681166","text":"import json\nfrom flask import Response\nfrom pottery.models.products import Product\nJSON_MIME_TYPE='application/json'\n\n\ndef search_prod(prods,product_id):\n for p in prods:\n if p['id']==product_id:\n print(p)\n return p\n\n\ndef to_dict(obj):\n return obj.__dict__\n","sub_path":"build/lib/pottery/resources/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"232400616","text":"import RPi.GPIO as GPIO\nimport time\n\ns2 = 25\ns3 = 26\nsignal = 27\nNUM_CYCLES = 10\n\n\ndef setup():\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(signal,GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.setup(s2,GPIO.OUT)\n GPIO.setup(s3,GPIO.OUT)\n print(\"GPIO setting over.\\n\")\n\n\ndef loop():\n temp = 1\n while(1):\n print(\"[do loop]\\n\")\n GPIO.output(s2,GPIO.LOW)\n GPIO.output(s3,GPIO.LOW)\n time.sleep(0.3)\n print(\"[edge detection(red) start]\\n\")\n start = time.time()\n for impulse_count in range(NUM_CYCLES):\n GPIO.wait_for_edge(signal, GPIO.FALLING)\n duration = time.time() - start \n red = NUM_CYCLES / duration\n print(\"[red detection over]\\n\")\n\n GPIO.output(s2,GPIO.LOW)\n GPIO.output(s3,GPIO.HIGH)\n time.sleep(0.3)\n print(\"[edge detection(blue) start]\\n\")\n start = time.time()\n for impulse_count in range(NUM_CYCLES):\n GPIO.wait_for_edge(signal, GPIO.FALLING)\n duration = time.time() - start\n blue = NUM_CYCLES / duration\n print(\"[blue detection over]\\n\")\n \n GPIO.output(s2,GPIO.HIGH)\n GPIO.output(s3,GPIO.HIGH)\n time.sleep(0.3)\n print(\"[edge detection(green) start]\\n\")\n start = time.time()\n for impulse_count in range(NUM_CYCLES):\n GPIO.wait_for_edge(signal, GPIO.FALLING)\n duration = time.time() - start\n green = NUM_CYCLES / duration\n print(\"[green detection over]\\n\")\n\n\n if green<7000 and blue<7000 and red>12000:\n print(\"- result = red\\n\")\n temp=1\n elif red<12000 and blue<12000 and green>12000:\n print(\"- result = green\\n\")\n temp=1\n elif green<7000 and red<7000 and blue>12000:\n print(\"- result = blue!\\n\")\n temp=1\n elif red>10000 and green>10000 and blue>10000 and temp==1:\n print(\"- place the object.....\\n\")\n temp=0\n\ndef endprogram():\n GPIO.cleanup()\n\n\nif __name__=='__main__':\n \n setup()\n\n try:\n loop()\n\n except KeyboardInterrupt:\n endprogram()\n","sub_path":"sensor/RPi/sensor_rpi.py","file_name":"sensor_rpi.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"357321131","text":"#! python3\r\n# Tester for German learners (from English)\r\n# Format of the info in the database:\r\n# {'german': ['english', 'class', 'theme', 'additional info']}\r\n\r\nimport shelve, re, random, time\r\n\r\n# ------------------------------------------------------------------------------\r\n# Auxiliar functions -----------------------------------------------------------\r\n# ------------------------------------------------------------------------------\r\n\r\ndef get_noun():\r\n# Function that controls the proper input of a noun\r\n while 1:\r\n print('German words are cap sensitive.')\r\n print('Noun requires its article and a starting capital character.')\r\n g_noun = input()\r\n validate = noun_regex.fullmatch(g_noun)\r\n if validate != None:\r\n break\r\n return g_noun\r\n\r\ndef get_verb():\r\n# Function that controls the proper input of a verb\r\n while 1:\r\n print('German words are cap sensitive (please do not use capital letters).')\r\n print('Please write the verb in infinitive.')\r\n g_verb = input()\r\n if g_verb.islower() and g_verb.isalpha() and g_verb[-1]=='n': # Verbs in german always finish in n\r\n break\r\n return g_verb\r\n\r\ndef get_rest():\r\n while 1:\r\n print('German words are cap sensitive (please do not use capital letters).')\r\n print('Please write the desired word.')\r\n g_rest = input()\r\n if g_rest.islower() and g_rest.isalpha():\r\n break\r\n return g_rest\r\n\r\ndef print_info(i_german, i_english, i_class, i_theme, extra):\r\n# Print the information of a word\r\n print(' German: ' + i_german)\r\n print(' English: ' + i_english)\r\n print(' Class: ' + i_class)\r\n print(' Theme: ' + i_theme)\r\n print('Extra info: ' + extra)\r\n\r\ndef print_list(f, l, c1, c2, c3):\r\n list_file = open(f, 'a')\r\n for key in l:\r\n # Print the line\r\n list_file.write(key.ljust(c1) + ' | ' + db[key][0].rjust(c2) +\r\n ' | '+db[key][2].rjust(c3))\r\n if db[key][3] != '':\r\n list_file.write(' (' + db[key][3] + ')')\r\n list_file.write('\\n')\r\n list_file.close()\r\n\r\ndef choose_class_test():\r\n print('''Chose the set of words you want to use:\r\n 1.- All the words.\r\n 2.- Nouns.\r\n 3.- Verbs.\r\n 4.- Adjectives.\r\n 5.- Others.''')\r\n test_list = []\r\n while 1:\r\n option = input()\r\n if option == '1':\r\n noun_flag = True\r\n test_list = list(db.keys())\r\n break\r\n elif option == '2':\r\n noun_flag = True\r\n for key in db:\r\n if db[key][1] == 'noun':\r\n test_list.append(key)\r\n break\r\n elif option == '3':\r\n noun_flag = False\r\n for key in db:\r\n if db[key][1] == 'verb':\r\n test_list.append(key)\r\n break\r\n elif option == '4':\r\n noun_flag = False\r\n for key in db:\r\n if db[key][1] == 'adjective':\r\n test_list.append(key)\r\n break\r\n elif option == '5':\r\n noun_flag = False\r\n for key in db:\r\n if db[key][1] == 'other':\r\n test_list.append(key)\r\n break\r\n else:\r\n print('Option not valid.')\r\n return (noun_flag, test_list)\r\n\r\ndef choose_theme(input_list):\r\n theme_list = []\r\n for key in input_list:\r\n if db[key][2] not in theme_list:\r\n theme_list.append(db[key][2])\r\n theme_list.sort()\r\n print('These are the available themes for this test:')\r\n for i in range(len(theme_list)):\r\n print(' ' + str(i+1) + '.- ' + theme_list[i])\r\n while 1:\r\n print('Choose one or press ENTER to go with all.')\r\n theme = input()\r\n if theme == '':\r\n return input_list\r\n elif theme.isdecimal():\r\n if (int(theme) - 1) in range(len(theme_list)):\r\n theme = theme_list[int(theme) - 1]\r\n break\r\n output_list = []\r\n for key in input_list:\r\n if db[key][2] == theme:\r\n output_list.append(key)\r\n return output_list\r\n\r\ndef print_results(total, right, duration):\r\n hours = duration // 3600\r\n minutes = (duration // 60) - hours * 60\r\n seconds = duration - minutes * 60 - hours * 3600\r\n # Print the results of the test\r\n try:\r\n print('These are the results of the test:')\r\n print(' Questions: {0:d}'.format(total))\r\n print(' Correct answers: {0:d}'.format(right))\r\n print(' Percentage: {0:.2f}%'.format(100*right/total))\r\n print(' Total time: {0:.0f}h {1:.0f}m {2:.2f}s'.format(hours, minutes, seconds))\r\n print(' Time per question: {0:.2f}s'.format(duration/total))\r\n except ZeroDivisionError:\r\n print('No more results avaliable')\r\n\r\n# ------------------------------------------------------------------------------\r\n# Tests ------------------------------------------------------------------------\r\n# ------------------------------------------------------------------------------\r\n\r\ndef e2g_test():\r\n (noun_flag, test_list) = choose_class_test()\r\n # Ask for the requirement of the article\r\n article_flag = False\r\n if noun_flag:\r\n print('''Would you like to include the article too?\r\n(article + noun OR only noun) [Y/N]''')\r\n article_option = ''\r\n while 1:\r\n article_option = input()\r\n article_option = article_option.lower()\r\n if article_option == 'y':\r\n break\r\n elif article_option == 'n':\r\n article_flag = True\r\n break\r\n # Check if there are words before continuing\r\n if len(test_list) > 0:\r\n test_list = choose_theme(test_list)\r\n print('''German words are cap sensitive.\r\n(Remember the capital letter for the nouns)''')\r\n total = 0\r\n right = 0\r\n # Start of the test\r\n start = time.time()\r\n while 1:\r\n ans = '1'\r\n key = random.choice(test_list)\r\n # Check is there is any extra info and print it\r\n if db[key][3] == '':\r\n print(db[key][0])\r\n else:\r\n print(db[key][0] + ' (' + db[key][3] + ')') \r\n while all(x.isalpha() or x.isspace() for x in ans) == False:\r\n ans = input()\r\n # Check the requirement of the article\r\n sol = key\r\n if article_flag:\r\n if db[key][1] == 'noun':\r\n sol = key[4:]\r\n # Check solution\r\n if ans == 'e':\r\n break\r\n elif ans == sol:\r\n right += 1\r\n print('RIGHT answer')\r\n else:\r\n print('WRONG answer')\r\n print('The right answer is: ' + sol)\r\n total += 1\r\n end = time.time()\r\n duration = end - start\r\n print_results(total, right, duration)\r\n else:\r\n print('There are no words in the database that match the requirements.')\r\n\r\ndef g2e_test():\r\n (noun_flag, test_list) = choose_class_test()\r\n # Check if there are words before continuing\r\n if len(test_list) > 0:\r\n test_list = choose_theme(test_list)\r\n total = 0\r\n right = 0\r\n # Start of the test\r\n start = time.time()\r\n while 1:\r\n ans = '1'\r\n key = random.choice(test_list)\r\n print(key)\r\n while (ans.isalpha() == False):\r\n ans = input()\r\n ans = ans.lower()\r\n # Check solution\r\n if ans == 'e':\r\n break\r\n elif ans == db[key][0]:\r\n right += 1\r\n print('RIGHT answer')\r\n else:\r\n print('WRONG answer')\r\n print('The right answer is: ' + db[key][0])\r\n total += 1\r\n end = time.time()\r\n duration = end - start\r\n print_results(total, right, duration)\r\n else:\r\n print('There are no words in the database that match the requirements.')\r\n\r\ndef article_test():\r\n print('''Enter the following number for each article:\r\n 1.- Der 2.- Die 3.- Das\r\n Type \\'e\\' to finish the test.''')\r\n test_list = []\r\n art_list = ['der', 'die', 'das']\r\n for key in db:\r\n if db[key][1] == 'noun':\r\n test_list.append(key)\r\n # Check if there are words before continuing\r\n if len(test_list) > 0:\r\n test_list = choose_theme(test_list)\r\n total = 0\r\n right = 0\r\n # Start of the test\r\n start = time.time()\r\n while 1:\r\n ans = ''\r\n key = random.choice(test_list)\r\n print(key[4:])\r\n while ans != '1' and ans != '2' and ans != '3' and ans != 'e':\r\n ans = input()\r\n if ans == 'e':\r\n break\r\n elif art_list[int(ans)-1] == key[0:3]:\r\n right += 1\r\n print('RIGHT answer')\r\n else:\r\n print('WRONG answer')\r\n print('The right answer is: ' + key[0:3])\r\n total += 1\r\n end = time.time()\r\n duration = end - start\r\n print_results(total, right, duration)\r\n else:\r\n print('There are no nouns in the database.')\r\n \r\n# ------------------------------------------------------------------------------\r\n# Main command functions -------------------------------------------------------\r\n# ------------------------------------------------------------------------------\r\n\r\ndef print_help():\r\n print('''These are the commands available:\r\n \\'add\\': allows to add a word to the database.\r\n \\'delete\\': allows to delete a word from the database.\r\n \\'clear\\': deletes all the words on the database.\r\n \\'list\\': creates a list with the words in the database\r\n and stores them on a .txt file.\r\n \\'info\\': shows the information of a word.\r\n \\'test\\': take a test to practice.\r\n \\'exit\\': exits the program.''')\r\n\r\ndef add():\r\n# Function that allows to add new words (also can overwrite the previous ones)\r\n # Choose class\r\n global classes\r\n while 1:\r\n print('What is the class of the word you want to add?')\r\n print(''' 1.- Noun.\r\n 2.- Verb.\r\n 3.- Adjective.\r\n 4.- Other.''')\r\n vclass = input()\r\n if vclass == '1':\r\n vclass = 'noun'\r\n break\r\n elif vclass == '2':\r\n vclass = 'verb'\r\n break\r\n elif vclass == '3':\r\n vclass = 'adjective'\r\n break\r\n elif vclass == '4':\r\n vclass = 'other'\r\n break\r\n # Enter word in german\r\n print('Please introduce the german word.')\r\n if vclass == 'noun':\r\n german = get_noun()\r\n elif vclass == 'verb':\r\n german = get_verb()\r\n else:\r\n german = get_rest()\r\n # Enter word in english\r\n while 1:\r\n print('Please enter the word in English.')\r\n if vclass == 'verb':\r\n print('The verb does no require beginning with \\'to \\'.')\r\n english = input()\r\n english = english.lower()\r\n if english.isalpha():\r\n break\r\n # Enter theme\r\n theme_list = []\r\n for key in db:\r\n if db[key][2] not in theme_list:\r\n theme_list.append(db[key][2])\r\n theme_list.sort()\r\n while 1:\r\n print('This is the current list of existing themes:')\r\n for i in range(len(theme_list)):\r\n print(' ' + str(i+1) + '.- ' + theme_list[i])\r\n print('''Choose a theme from the previous list or write a new one.\r\n(The default option is general)''')\r\n while 1:\r\n theme = input()\r\n if theme == '':\r\n theme = 'general'\r\n break\r\n elif theme.isdecimal():\r\n if (int(theme) - 1) in range(len(theme_list)):\r\n theme = theme_list[int(theme) - 1]\r\n break\r\n elif all(x.isalpha() or x.isspace() for x in theme):\r\n theme = theme.lower()\r\n break\r\n print('Option not valid')\r\n add_theme = ''\r\n print('Do you want the word \\'' + german + '\\' to be included in the theme \\'' + theme + '\\'? [Y]:')\r\n add_theme = input()\r\n add_theme = add_theme.lower()\r\n if add_theme == 'y':\r\n break\r\n # Add extra information to help during the tests\r\n print('Would you like to add extra information?')\r\n print('''(This info will be used in the English to German tests\r\nto differenciate between different translations of the same word)''')\r\n print('If you do not want it, just press ENTER.')\r\n extra_info = input()\r\n # Confirm and save the new word\r\n add_var = ''\r\n while add_var != 'y' and add_var != 'n':\r\n print('Do you want to save the word with the following content? [Y/N]:')\r\n print_info(german, english, vclass, theme, extra_info)\r\n add_var = input()\r\n add_var = add_var.lower()\r\n if add_var == 'y':\r\n db[german] = [english, vclass, theme, extra_info]\r\n print('The new word was successfully added.')\r\n else:\r\n print('The new word was not added.')\r\n\r\ndef delete():\r\n# Function that allows to delete words\r\n while 1:\r\n print('Please enter the german word you want to delete from the list.')\r\n print('(This input is cap sensitive)')\r\n print('Type \\'EXIT\\' to exit this section')\r\n word = input()\r\n if word == 'EXIT':\r\n break\r\n elif word in db:\r\n # Confirm the word you want to delete\r\n print('Are you sure you want to delete \\'' + word + '\\'?')\r\n print('(Confirm by writing the word again)')\r\n word2 = input()\r\n if word == word2:\r\n del db[word]\r\n print('The word was successfully deleted.')\r\n break\r\n else:\r\n print('Confirmation word was wrong.')\r\n else:\r\n print('This word is not in the list.')\r\n\r\ndef clear():\r\n# Deletes the entire list of words\r\n confirmation = 'I am sure.'\r\n print('Are you sure you want to clear all the words on the database?')\r\n print('Confirm this by printing \\'' + confirmation +'\\'')\r\n print('(This input is cap sensitive)')\r\n conf_input = input()\r\n if conf_input == confirmation:\r\n for key in db:\r\n del db[key]\r\n print('Database succesfully cleared.')\r\n else:\r\n print('Database not cleared.')\r\n \r\ndef make_list():\r\n# Creates a list of the words on the database\r\n # Initialize the variables\r\n noun_list = []\r\n verb_list = []\r\n adj_list = []\r\n oth_list = []\r\n max_len_german = 0\r\n max_len_english = 0\r\n max_len_theme = 0\r\n file_name = 'german_words_list.txt'\r\n print('Creating list...')\r\n # Get the words\r\n wlist = list(db.keys())\r\n for key in wlist:\r\n # Get the longest word in each column\r\n if len(key) > max_len_german:\r\n max_len_german = len(key)\r\n if len(db[key][0]) > max_len_english:\r\n max_len_english = len(db[key][0])\r\n if len(db[key][2]) > max_len_theme:\r\n max_len_theme = len(db[key][2])\r\n # Agroup the words according to their class\r\n if db[key][1] == 'noun':\r\n noun_list.append(key)\r\n elif db[key][1] == 'verb':\r\n verb_list.append(key)\r\n elif db[key][1] == 'adjective':\r\n adj_list.append(key)\r\n else:\r\n oth_list.append(key)\r\n # Store the list on a .txt file\r\n # Store the nouns\r\n list_file = open(file_name, 'w')\r\n list_file.write(' NOUNS '.center(40, '-') + '\\n')\r\n list_file.close()\r\n noun_list.sort(key=lambda x: x[4])\r\n print_list(file_name, noun_list, max_len_german, max_len_english, max_len_theme)\r\n # Store the verbs\r\n list_file = open(file_name, 'a')\r\n list_file.write(' VERBS '.center(40, '-') + '\\n')\r\n list_file.close()\r\n print_list(file_name, verb_list, max_len_german, max_len_english, max_len_theme)\r\n # Store the adjectives\r\n list_file = open(file_name, 'a')\r\n list_file.write(' ADJECTIVES '.center(40, '-') + '\\n')\r\n list_file.close()\r\n print_list(file_name, adj_list, max_len_german, max_len_english, max_len_theme)\r\n # Store the rest\r\n list_file = open(file_name, 'a')\r\n list_file.write(' OTHERS '.center(40, '-') + '\\n')\r\n list_file.close()\r\n print_list(file_name, oth_list, max_len_german, max_len_english, max_len_theme)\r\n print('List was stored in \\'' + file_name + '\\'.')\r\n \r\ndef info():\r\n# Shows the information of a word\r\n while 1:\r\n print('Please enter the german word you want more information about.')\r\n print('(This input is cap sensitive)')\r\n print('Type \\'EXIT\\' to exit this section')\r\n word = input()\r\n if word == 'EXIT':\r\n break\r\n elif word in db:\r\n # Call the function to print the information\r\n print_info(word, db[word][0], db[word][1], db[word][2], db[word][3])\r\n break\r\n else:\r\n print('This word is not in the list.')\r\n \r\ndef test():\r\n# Tests\r\n print('''Choose the type of test you would like to take:\r\n 1.- English to German.\r\n 2.- German to English.\r\n 3.- Article test.''')\r\n while 1:\r\n test_type = input()\r\n if test_type == '1':\r\n # English to German test\r\n e2g_test()\r\n break\r\n elif test_type == '2':\r\n # German to English test\r\n g2e_test()\r\n break\r\n elif test_type == '3':\r\n # Article test\r\n article_test()\r\n break\r\n else:\r\n print('Option not valid.')\r\n\r\ndef invalid():\r\n print('The command introduced was not found.')\r\n\r\ndef exit_program():\r\n db.close()\r\n global exit_flag\r\n exit_flag = True\r\n \r\n# ------------------------------------------------------------------------------\r\n# Start the program ------------------------------------------------------------\r\n# ------------------------------------------------------------------------------\r\n\r\n# Opens and loads the database\r\ndb = shelve.open('german_database')\r\n# Create the variables and regexs\r\nclasses = ['noun', 'verb', 'adjective', 'other']\r\nexit_flag = False\r\nnoun_regex = re.compile('''(\r\n (der|die|das) # Article\r\n ([ ]) # Space\r\n ([A-ZÄÜÖ][a-zäüöß]*) # Noun\r\n )''', re.VERBOSE | re.UNICODE)\r\n\r\n\r\n# Introduction\r\nprint('Welcome to this tester.')\r\nprint('Please type the task you want to carry out.')\r\nprint('Type \\'help\\' if you need information about the commands.')\r\n\r\nwhile(exit_flag == False):\r\n # Read the command\r\n command = input()\r\n command = command.lower()\r\n \r\n input_map = {\r\n 'help': print_help,\r\n 'add': add,\r\n 'delete': delete,\r\n 'clear': clear,\r\n 'list': make_list,\r\n 'info': info,\r\n 'test': test,\r\n 'exit': exit_program\r\n }\r\n input_map.get(command, invalid)()\r\n \r\n","sub_path":"test_german_vocabulary.py","file_name":"test_german_vocabulary.py","file_ext":"py","file_size_in_byte":19204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"41300248","text":"from operator import imatmul\nfrom PIL.Image import new\nimport cv2\nfrom time import time\nimport numpy as np\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nfrom numpy.lib.function_base import gradient\nimport csv\n\n\n# def convolve2D(image, kernel, padding=2, strides=1):\n# # Cross Correlation, turn the kernel by 180 degrees\n# kernel = np.flipud(np.fliplr(kernel))\n\n# # Gather Shapes of Kernel + Image + Padding\n# xKernShape = kernel.shape[0]\n# yKernShape = kernel.shape[1]\n# xImgShape = image.shape[0]\n# yImgShape = image.shape[1]\n\n# # Shape of Output Convolution\n# xOutput = int(((xImgShape - xKernShape + 2 * padding) / strides) + 1)\n# yOutput = int(((yImgShape - yKernShape + 2 * padding) / strides) + 1)\n# output = np.zeros((xOutput, yOutput))\n\n# # Apply Equal Padding to All Sides\n# if padding != 0:\n# imagePadded = np.zeros((image.shape[0] + padding*2, image.shape[1] + padding*2))\n# imagePadded[int(padding):int(-1 * padding), int(padding):int(-1 * padding)] = image\n# # print(imagePadded)\n# else:\n# imagePadded = image\n\n# # Iterate through image\n# for y in range(yImgShape):\n# # Exit Convolution\n# if y > yImgShape - yKernShape:\n# break\n# # Only Convolve if y has gone down by the specified Strides\n# if y % strides == 0:\n# for x in range(xImgShape):\n# # Go to next row once kernel is out of bounds\n# if x > xImgShape - xKernShape:\n# break\n# try:\n# # Only Convolve if x has moved by the specified Strides\n# if x % strides == 0:\n# output[x, y] = (kernel * imagePadded[x: x + xKernShape, y: y + yKernShape]).sum()\n# except:\n# break\n# return output\n\n# def hysterisis_recusive(image, weak_index, weakedge_row, weakedge_col, strong_index, strongedge_row, strongedge_col):\n# result = np.copy(image)\n# for i in range(strong_index):\n# result = find_connected_weak_edge(result, strongedge_row[i], strongedge_col[i])\n \n# for i in range(weak_index):\n# if result[weakedge_row[i], weakedge_col[i]] != 255:\n# result[weakedge_row[i], weakedge_col[i]] = 0\n \n# return result\n\n# def find_connected_weak_edge(image, row, col):\n# M, N = image.shape\n# for i in range(-3, 3, 1):\n# for j in range(-3, 3, 1):\n# if (row+i > 0) and (col+j >0) and (row+i < M) and (col+j < N):\n# image[int(row+i), int(col+j)] = 255\n# image = find_connected_weak_edge(image, row+i, col+j)\n# return image\n\ndef convolution(image, kernel, average=False):\n print(\"Image Shape : {}\".format(image.shape))\n print(\"Kernel Shape : {}\".format(kernel.shape))\n\n image_row, image_col = image.shape\n kernel_row, kernel_col = kernel.shape\n\n output = np.zeros(image.shape)\n\n #padding\n pad_height = int((kernel_row - 1) / 2)\n pad_width = int((kernel_col - 1) / 2)\n padded_image = np.zeros((image_row + (2 * pad_height), image_col + (2 * pad_width)))\n padded_image[pad_height:padded_image.shape[0] - pad_height, pad_width:padded_image.shape[1] - pad_width] = image\n\n\n for row in range(image_row):\n for col in range(image_col):\n output[row, col] = np.sum(kernel * padded_image[row:row + kernel_row, col:col + kernel_col])\n if average:\n output[row, col] /= kernel.shape[0] * kernel.shape[1]\n\n print(\"Output Image size : {}\".format(output.shape))\n return output\n\ndef generate_gaussian(size, sigma=1):\n size = int(size) // 2 # floor divition operator '//' true divitrion operator '/'\n x, y = np.mgrid[-size:size+1, -size:size+1]\n normal = 1 / (2.0 * np.pi * sigma**2)\n result = np.exp(-((x**2 + y**2) / (2.0*sigma**2))) * normal\n return result\n\ndef generate_sobel(direction):\n if direction.lower() == 'x':\n return np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], np.float32)\n elif direction.lower() == 'y':\n return np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], np.float32)\n\ndef RGB2GRAY(image):\n # RGB weight: L = R * 299/1000 + G * 587/1000 + B * 114/1000\n rgb_weights = [0.2989, 0.587, 0.114]\n result = np.dot(image[...,:3], rgb_weights) # normalize the grayimage to 0.0 to 1.0 to display\n result = result.astype(np.uint8)\n return result\n\ndef gaussian_blur(image):\n kernel = generate_gaussian(size=5)\n result = convolution(image, kernel)\n return result\n\ndef sobel_dege_detector(image):\n x_kernel = generate_sobel('x')\n y_kernel = generate_sobel('y')\n Ix = convolution(image, x_kernel)\n Iy = convolution(image, y_kernel)\n gradient_magnitude = np.sqrt(np.square(Ix) + np.square(Iy))\n gradient_direction = np.arctan2(Iy, Ix)\n return gradient_direction, gradient_magnitude, Ix, Iy\n\ndef nms_interpolation(magnitude, dx, dy, sub_pixel=False):\n # use sub-pixel interpolation to determine the edge pixel\n mag = np.copy(magnitude)\n M, N = np.shape(mag)\n result = np.zeros((M,N))\n sub_pixel_location = []\n\n for i in range(1, M-1):\n for j in range(1, N-1):\n if mag[i, j] == 0:\n result[i, j] =0\n else:\n gradX = dx[i,j]\n gradY = dy[i,j]\n gradX_abs = np.abs(gradX)\n gradY_abs = np.abs(gradY)\n grad = mag[i,j]\n\n # Y gradient greater than X gradient\n if gradY_abs > gradY_abs:\n if gradY == 0:\n weight = 0\n else:\n weight = gradX_abs / gradY_abs\n # |g1|g2| | | |g2|g1|\n # | | g| | or | | g| |\n # | |g4|g3| |g3|g4| |\n g2 = mag[i-1, j]\n g4 = mag[i+1, j]\n if gradX * gradY > 0:\n g1 = mag[i-1, j-1]\n g3 = mag[i+1, j+1]\n else:\n g1 = mag[i-1, j+1]\n g3 = mag[i+1, j-1]\n # X gradient greater than Y gradient\n else:\n if gradX == 0:\n weight = 0\n else:\n weight = gradY_abs / gradX_abs\n # | | |g3| |g1| | |\n # |g2| g|g4| or |g2| g|g4|\n # |g1| | | | | |g3|\n g2 = mag[i, j-1]\n g4 = mag[i, j+1]\n if gradX * gradY > 0:\n g1 = mag[i+1, j-1]\n g3 = mag[i-1, j+1]\n else:\n g1 = mag[i-1, j-1]\n g3 = mag[i+1, j+1]\n \n gradTemp1 = weight * g1 + (1-weight) * g2\n gradTemp2 = weight * g3 + (1-weight) * g4\n if grad >= gradTemp1 and grad >= gradTemp2:\n result[i,j] = grad\n else:\n result[i,j] = 0\n\n return result\n\ndef double_threshold(image, th_low_ratio = 0.1, th_high_ratio = 0.3):\n # upper lower ratio is recommended to be between 2:1 or 3:1\n highThreshold = np.max(image) * th_high_ratio\n lowThreshold = np.max(image) * th_low_ratio\n \n M, N = image.shape\n result = np.zeros((M,N), dtype=np.int32)\n\n strongedge_row = np.zeros(M*N)\n strongedge_col = np.zeros(M*N)\n weakedge_row = np.zeros(M*N)\n weakedge_col = np.zeros(M*N)\n strong_index = 0\n weak_index = 0\n \n weak_value = np.int32(50)\n strong_value = np.int32(255)\n \n # strong_i, strong_j = np.where(image >= highThreshold)\n # weak_i, weak_j = np.where((image <= highThreshold) & (image >= lowThreshold))\n # zeros_i, zeros_j = np.where(image < lowThreshold)\n # result[strong_i, strong_j] = strong_value\n # result[weak_i, weak_j] = weak_value\n\n for i in range(M):\n for j in range(N):\n if image[i, j] > highThreshold:\n result[i, j] = strong_value\n strongedge_row[strong_index] = i\n strongedge_col[strong_index] = j\n strong_index += 1\n elif image[i, j] < lowThreshold:\n result[i, j] = 0\n else:\n result[i, j] = weak_value\n weakedge_row[weak_index] = i\n weakedge_col[weak_index] = j\n weak_index += 1\n\n return result, weak_index, weakedge_row, weakedge_col, strong_index, strongedge_row, strongedge_col\n\ndef hysterisis(image, weak_value=50, strong_value=255):\n M, N = image.shape\n # result = np.zeros((M,N))\n top2btm = np.copy(image)\n btm2top = np.copy(image)\n right2left = np.copy(image)\n left2right = np.copy(image)\n\n # probably needs to go from other directions?\n for i in range(1, M):\n for j in range(1, N):\n # check the 8 surroundings of the weak edge\n if(top2btm[i,j]==weak_value):\n if((top2btm[i-1, j-1:j+1] == strong_value).any()\n or (top2btm[i, [j-1,j+1]] == strong_value).any()\n or (top2btm[i+1, j-1:j+1] == strong_value).any()):\n top2btm[i,j] = strong_value\n else:\n top2btm[i,j] = 0\n\n for i in range(M-1, 1, -1):\n for j in range(N-1, 1, -1):\n # check the 8 surroundings of the weak edge\n if(btm2top[i,j]==weak_value):\n if((btm2top[i-1, j-1:j+1] == strong_value).any()\n or (btm2top[i, [j-1,j+1]] == strong_value).any()\n or (btm2top[i+1, j-1:j+1] == strong_value).any()):\n btm2top[i,j] = strong_value\n else:\n btm2top[i,j] = 0\n\n for i in range(1, M):\n for j in range(N-1, 0, -1):\n if(right2left[i,j]==weak_value):\n if((right2left[i-1, j-1:j+1] == strong_value).any()\n or (right2left[i, [j-1,j+1]] == strong_value).any()\n or (right2left[i+1, j-1:j+1] == strong_value).any()):\n right2left[i,j] = strong_value\n else:\n right2left[i,j] = 0\n\n for i in range(M-1, 0, -1):\n for j in range(1, N):\n if(left2right[i,j]==weak_value):\n if((left2right[i-1, j-1:j+1] == strong_value).any()\n or (left2right[i, [j-1,j+1]] == strong_value).any()\n or (left2right[i+1, j-1:j+1] == strong_value).any()):\n left2right[i,j] = strong_value\n else:\n left2right[i,j] = 0\n\n result = top2btm + btm2top + right2left + left2right\n result[result > 255] = 255\n return result\n\ndef fit_parabola(x1, y1, x2, y2, x3, y3):\n # y = ax^2+bx+c\n denom = (x1-x2) * (x1-x3) * (x2-x3)\n a = (x3 * (y2-y1) + x2 * (y1-y3) + x1 * (y3-y2)) / denom\n b = (x3*x3 * (y1-y2) + x2*x2 * (y3-y1) + x1*x1 * (y2-y3)) / denom\n c = (x2 * x3 * (x2-x3) * y1+x3 * x1 * (x3-x1) * y2+x1 * x2 * (x1-x2) * y3) / denom\n x0 = (-1*b)/(2*a)\n return x0, y2\n\ndef canny(image, verbose=True):\n # 5 steps\n # 1.Noise reduction (guassian blur)\n img_gaussian = gaussian_blur(image)\n if verbose:\n cv2.imshow('gaussian_blur',img_gaussian.astype(np.uint8))\n \n # 2.edge enhancement (gradient caluclation)\n gradient_direction, gradient_magnitude, Ix, Iy = sobel_dege_detector(img_gaussian)\n if verbose:\n cv2.imshow('edge enhancement',gradient_magnitude.astype(np.uint8))\n\n # 3.Non-maximum suppression (pixel accuracy)\n img_nms = nms_interpolation(gradient_magnitude,dx=Ix, dy=Iy)\n if verbose:\n cv2.imshow('NMS_result', img_nms.astype(np.uint8))\n\n # 4.Double thresholding\n img_dt, weak_index, weakedge_row, weakedge_col, strong_index, strongedge_row, strongedge_col = double_threshold(img_nms)\n if verbose:\n cv2.imshow('double thresholding', img_dt.astype(np.uint8))\n\n # 5.Edge Tracking by Hysteresis\n final_result = hysterisis(img_dt)\n # final_result = hysterisis_recusive(img_dt, weak_index, weakedge_row, weakedge_col, strong_index, strongedge_row, strongedge_col)\n cv2.imshow('hysterisis', final_result.astype(np.uint8))\n\n return final_result\n\ndef compute_edge_points(partial_gradients, min_magnitude=0):\n gx, gy = partial_gradients\n rows, cols = gx.shape\n edges = []\n\n def mag(y, x):\n # sqrt(x^2+y^2) magnitude\n return np.hypot(gx[y, x], gy[y, x])\n\n for y in range(1, rows - 1):\n for x in range(1, cols - 1):\n\n center_mag = mag(y, x)\n if center_mag < min_magnitude:\n continue\n\n left_mag = mag(y, x - 1)\n right_mag = mag(y, x + 1)\n top_mag = mag(y - 1, x)\n bottom_mag = mag(y + 1, x)\n\n theta_x, theta_y = 0, 0\n if (left_mag < center_mag >= right_mag) and abs(gx[y, x]) >= abs(gy[y, x]):\n theta_x = 1\n elif (top_mag < center_mag >= bottom_mag) and abs(gx[y, x]) <= abs(gy[y, x]):\n theta_y = 1\n\n if theta_x != 0 or theta_y != 0:\n a = mag(y - theta_y, x - theta_x)\n b = mag(y, x)\n c = mag(y + theta_y, x + theta_x)\n lamda = (a - c) / (2 * (a - 2 * b + c))\n ex = x + lamda * theta_x\n ey = y + lamda * theta_y\n edges.append([ex, ey])\n print('(%f, %f)' % (ex, ey))\n return edges\n\ndef canny_subpixel(image):\n # 1.resize\n M, N = image.shape\n img_resize = cv2.resize(image, (int(N/4), int(M/4)), interpolation=cv2.INTER_AREA)\n cv2.imshow('resized_image',img_resize.astype(np.uint8))\n\n # 2.Noise reduction (guassian blur)\n img_gaussian = gaussian_blur(img_resize)\n cv2.imshow('gaussian_blur_resized',img_gaussian.astype(np.uint8))\n \n # 3.edge enhancement (gradient caluclation)\n gradient_direction, gradient_magnitude, Ix, Iy = sobel_dege_detector(img_gaussian)\n cv2.imshow('edge enhancement_resized',gradient_magnitude.astype(np.uint8))\n\n # 4.sub pixel canny\n edgels = compute_edge_points((Ix, Iy),50)\n \n # 5.get the edge location\n\n # 6.cast again, resize it back to original image\n subpixel_result = np.zeros((M,N))\n for i in edgels:\n y = int(i[0]*4)\n x = int(i[1]*4)\n subpixel_result[x,y] = 255\n cv2.imshow('sub_pixel', subpixel_result)\n\n # 7. calculate accuracy\n canny_result = canny(image, verbose=True)\n accuracy = calculate_accuracy(canny_result, subpixel_result)\n print('the accuracy is: %f' % accuracy)\n return subpixel_result\n\ndef calculate_accuracy(canny, subpixel):\n M, N = subpixel.shape\n correct = 0\n incorrect = 0\n for i in range(M):\n for j in range(N):\n if subpixel[i,j] == 255:\n if (canny[i-3:i+3, j-3:j+3] == 255).any():\n correct += 1\n else:\n incorrect += 1\n \n accuracy = correct/(correct+incorrect)\n return accuracy\n\ndef read_image(image_name, image_ext, sub_pixel=False):\n base_dir = os.path.dirname(os.path.abspath(__file__))\n image_path = os.path.join(base_dir, image_name + '.' + image_ext)\n image = cv2.imread(image_path)\n cv2.imshow('original', image)\n\n if len(image.shape) == 3:\n gray_image = RGB2GRAY(image)\n elif len(image.shape) == 2:\n gray_image = image\n cv2.imshow('gray', gray_image)\n\n # # normalize the grayimage to 0.0 to 1.0 for display\n # gray_image /= 255. \n # new_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # print(new_gray)\n # # opencv image is brighter, probabaly due to the low precision for fast computation speed\n # print(gray_image) \n\n # CANNY\n if sub_pixel:\n result = canny_subpixel(gray_image)\n else:\n result = canny(gray_image)\n # canny_cv = cv2.Canny(np.uint8(image),200, 300)\n # cv2.imshow('canny', canny_cv)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef realtime():\n camera = cv2.VideoCapture(0)\n kernel_gauss = generate_gaussian(5)\n while True:\n # Read the frame\n ret, frame= camera.read()\n # mirroing the image\n frame = cv2.flip(frame, 1)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n img = canny(gray,kernel_gauss)\n # canny_cv = cv2.Canny(np.uint8(frame),200, 300)\n # Display\n cv2.imshow('canny', img)\n cv2.imshow('frame', frame)\n #cv2.imshow('canny_opencv', canny_cv)\n # Stop if escape key is pressed\n k = cv2.waitKey(20) & 0xff\n if k==27:\n break\n\nif __name__ == '__main__':\n # sys.setrecursionlimit(10000)\n # read_image('chessboard_hp', 'jpg', sub_pixel=False)\n read_image('chessboard_hp', 'jpg', sub_pixel=True)\n # read_image('lena', 'png', sub_pixel=True)\n # print(generate_gaussian(5))\n # gaussian separable. use 1D filter to reduce calculation time\n # print(cv2.getGaussianKernel(ksize=5,sigma=1) * cv2.getGaussianKernel(ksize=5,sigma=1).T)''","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"651507132","text":"import matplotlib\nimport matplotlib.pyplot as plt\nfrom winlist import get_win_pct_list_for_hand\n\nsampleSize = 100000\n\nwin_list_5_6 = get_win_pct_list_for_hand(3, 7, sampleSize)\nprint(win_list_5_6[sampleSize - 1])\nwin_list_10_9 = get_win_pct_list_for_hand(10, 9, sampleSize)\nprint(win_list_10_9[sampleSize - 1])\nwin_list_8_8 = get_win_pct_list_for_hand(8, 8, sampleSize)\nprint(win_list_8_8[sampleSize - 1])\n\nxs = list(range(1, sampleSize+1))\n\n#plt.hlines(y=100, xmin=0, xmax=sampleSize, colors='k', linestyles='solid', data=None)\nplt.ylabel('Win Procent')\nplt.xlabel('Sample Size')\np2 = plt.plot(xs,win_list_10_9)\nplt.title('Sandsynlighed for at vinde en bestemt hånd i poker')\np1 = plt.plot(xs,win_list_5_6)\np3 = plt.plot(xs,win_list_8_8)\nplt.legend((p1[0], p2[0], p3[0]), ('Cards 5 and 6', 'Cards 10 and 9', 'Pair 8'))\nplt.ylim(0, 130)\nplt.show()\n\n#print('Hånd 1 har vundet','procent')\n#print('Hånd 2 har vundet','procent')\n#print('Draw har vundet', 'procent')\n","sub_path":"poker_plot.py","file_name":"poker_plot.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"180842711","text":"import urllib.request as ulib\nimport os\nimport sys\nimport openpyxl as pyxl\nfrom openpyxl import Workbook\nimport win32api\nimport datetime\n\ndef returnargs():\n arguments = []\n try:\n for argument in sys.argv[1:]:\n print(argument)\n arguments.append(argument)\n except:\n pass\n \n return arguments\n\ndef lineparse(line):\n\n firstSweep = line.split(\"<\")\n secondSweep = firstSweep[1].split(\">\")\n\n num = secondSweep[1]\n\n return num\n\n\ndef main():\n\n print(\"Bienvenido al importador de loterias de porodo!\")\n\n arguments = returnargs()\n if arguments:\n choosing = True\n else:\n choosing = False\n\n url = \"https://resultadodelaloteria.com/colombia\"\n\n loterias = {\n \"Cu/marca\": \"loteria-de-cundinamarca\", \n \"Cruz Roja\":\"loteria-de-la-cruz-roja\",\n \"Manizales\":\"loteria-de-manizales\",\n \"Bogota\":\"loteria-de-bogota\",\n \"Boyaca\":\"loteria-de-boyaca\",\n \"S/tander\":\"loteria-de-santander\"}\n \n if not choosing:\n\n wb = Workbook()\n ws = wb.active\n ws.page_margins.left = 0\n ws.page_margins.right = 0\n ws.page_margins.top = 0\n ws.page_margins.bottom = 0\n\n now = datetime.datetime.now()\n month = now.strftime(\"%B\")\n\n fecha = f\"{now.year}/{month}//{now.day}\"\n ws[\"A1\"].value = fecha\n ws[\"A3\"].value = \"NUMEROS\"\n\n index = 4\n for loteria in loterias:\n\n loturl = f\"{url}/{loterias[loteria]}\"\n print(loturl)\n html = ulib.urlopen(loturl).readlines()\n\n for line in html:\n if \"ctl00_ContentPlaceHolder1_lbl4Cifras\" in str(line):\n break\n\n print(\"Extrayendo numero . . .\")\n\n num = lineparse(str(line))\n print(f\"El numero es: {num}!\")\n\n ws[f\"A{index}\"].value = f\"LOT {loteria}: [{num}]\"\n rpath = f\"{os.path.dirname(__file__)}\\\\temp.xlsx\"\n \n index += 1\n\n wb.save(rpath)\n\n printer = \"POS-58\"\n\n # win32api.ShellExecute (\n # 0,\n # \"printto\",\n # f\"{rpath}\",\n # f\"{printer}\",\n # \".\",\n # 0\n # )\n \n os.remove(rpath)\n\nmain()","sub_path":"PY/DTerm/LotFetcher.py","file_name":"LotFetcher.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"464138195","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Title: TeleInviter\n# Author: @neoctobers\n#\n# Invite members, from (many) `source_groups` to (one) `destination_group`\n# And avoid repetition from `destination_group`\n#\n# [Github]:\n# https://github.com/neoctobers/TeleInviter\n#\n# [Telegram Group]:\n# https://t.me/TeleInviter\n#\nimport sys\nimport time\nimport random\nimport colorama\nimport telethon\nimport conf\nimport console\nimport db\nfrom telethon import sync\nfrom telethon import errors\nfrom telethon.tl.types import PeerUser\nfrom telethon.tl.types import UserStatusOffline\nfrom telethon.tl.functions.channels import JoinChannelRequest\nfrom telethon.tl.functions.channels import InviteToChannelRequest\nfrom pprint import pprint\n\n\ndef get_user_display_name(u):\n \"\"\"Get `display_name` for a user\n\n Args:\n u: user\n\n Returns:\n A string\n example:\n 'Donald J. Trump'\n \"\"\"\n name = []\n if u.first_name:\n name.append(u.first_name)\n if u.last_name:\n name.append(u.last_name)\n return '|'.join(name)\n\n\ndef invite_user(u):\n \"\"\"Invite user to destination_group\n\n Args:\n i: index of participants[client_session]\n \"\"\"\n\n # Get a random session\n client_session = random.choice(client_sessions)\n\n # Get the user\n user_to_be_invited = clients[client_session].get_entity(PeerUser(u.id))\n\n # SN, display_name\n sys.stdout.write('%6d > [%s] ... ' % (i, get_user_display_name(user_to_be_invited)))\n\n # Find in db\n row = db.Invite.select().where(db.Invite.user_id == user_to_be_invited.id).first()\n\n # No record in db\n if row is None:\n\n # Echo\n sys.stdout.write(colorama.Fore.LIGHTYELLOW_EX + 'INVITE by \"%s\" ... ' % client_session)\n\n\n try:\n # Invite\n clients[client_session](InviteToChannelRequest(\n destination_groups[client_session],\n [user_to_be_invited],\n ))\n\n # Save to db\n db.save_invite(user_to_be_invited)\n\n # shows done\n sys.stdout.write(colorama.Fore.GREEN + 'DONE')\n\n # CPU sleep\n sleeping_secs = random.randint(conf.rd_sleep_min, conf.rd_sleep_max)\n print(colorama.Fore.LIGHTMAGENTA_EX + ' waiting %d secs...' % sleeping_secs)\n time.sleep(sleeping_secs)\n except errors.rpcerrorlist.UserPrivacyRestrictedError as e:\n print(colorama.Fore.LIGHTRED_EX + 'error#0. UserPrivacyRestrictedError...')\n except errors.rpcerrorlist.ChatAdminRequiredError as e:\n print(colorama.Fore.LIGHTRED_EX + 'error#1. ChatAdminRequiredError...')\n except errors.rpcerrorlist.ChatIdInvalidError as e:\n print(colorama.Fore.LIGHTRED_EX + 'error#2. ChatIdInvalidError...')\n except errors.rpcerrorlist.InputUserDeactivatedError as e:\n print(colorama.Fore.LIGHTRED_EX + 'error#3. InputUserDeactivatedError...')\n except errors.rpcerrorlist.PeerIdInvalidError as e:\n print(colorama.Fore.LIGHTRED_EX + 'error#4. PeerIdInvalidError...')\n except errors.rpcerrorlist.UserAlreadyParticipantError as e:\n print(colorama.Fore.LIGHTRED_EX + 'error#5. UserAlreadyParticipantError...')\n except errors.rpcerrorlist.UserIdInvalidError as e:\n print(colorama.Fore.LIGHTRED_EX + 'error#6. UserIdInvalidError...')\n except errors.rpcerrorlist.UserNotMutualContactError as e:\n print(colorama.Fore.LIGHTRED_EX + 'error#7. UserNotMutualContactError...')\n except errors.rpcerrorlist.UsersTooMuchError as e:\n print(colorama.Fore.LIGHTRED_EX + 'error#8. UsersTooMuchError...')\n except errors.rpcerrorlist.PeerFloodError as e:\n sys.stdout.write(colorama.Fore.LIGHTRED_EX + '\\n error#9. PeerFloodError > ')\n sys.stdout.write(colorama.Fore.LIGHTMAGENTA_EX + '%s ' % e.message)\n # Prepare to exit: record the position of job failed\n db.get_create_or_update_pause_position(i)\n # When PeerFloodError occurs, it should better not to use this client any more\n sys.exit(0)\n else:\n print(colorama.Fore.GREEN + 'skipped')\n\n\ndef is_user_status_offline_passed(t):\n \"\"\"Check UserStatusOffline `was_online` limit\n\n Args:\n t: datetime\n\n Returns:\n boolean\n \"\"\"\n if (\n (conf.filter_user_status_offline_was_online_min is None or t >= conf.filter_user_status_offline_was_online_min)\n and\n (conf.filter_user_status_offline_was_online_max is None or t <= conf.filter_user_status_offline_was_online_max)\n ):\n # t >= `filter_user_status_offline_was_online_min` if it was set\n # AND\n # t <= `filter_user_status_offline_was_online_max` if it was set\n return True\n\n # Or return False\n return False\n\n\n# Initialize colorama with auto-reset on\ncolorama.init(autoreset=True)\n\n\n# OUTPUT: starting\nprint('starting...')\n\n\n# Initialize `clients` dict\nclients = {}\nclient_sessions = []\nprint(colorama.Fore.LIGHTCYAN_EX + '\\n\\nLaunching Clients:')\nfor client_session in conf.client_sessions:\n # Launch\n sys.stdout.write(' \"%s\" ... ' % client_session)\n\n # Create a Telegram Client\n c = telethon.TelegramClient(\n client_session,\n conf.tg_api_id,\n conf.tg_api_hash,\n proxy=conf.proxy,\n )\n c.connect()\n\n # Confirm authorized or start the client (login)\n if c.is_user_authorized():\n # Authorized\n clients[client_session] = c\n client_sessions.append(client_session)\n print(colorama.Fore.GREEN + 'DONE')\n else:\n # Need to login\n print(colorama.Fore.LIGHTYELLOW_EX + 'Need Login\\n')\n print(colorama.Fore.LIGHTMAGENTA_EX + 'Session login for \"%s\"' % client_session)\n\n # Login\n c.start()\n\n # Verify Login\n if c.is_user_authorized():\n clients[client_session] = c\n client_sessions.append(client_session)\n print(colorama.Fore.GREEN + 'Session login for \"%s\" is SUCCESSFUL' % client_session)\n else:\n print(colorama.Fore.LIGHTRED_EX + 'Session login for \"%s\" is FAILED' % client_session)\n\n\n# Exit if there is no available client\nif 0 == len(clients):\n print(colorama.Fore.LIGHTRED_EX + 'No client available...')\n sys.exit()\n\n\n# Initialize `destination_groups` dict, same keys as clients\ndestination_groups = {}\nsys.stdout.write(colorama.Fore.LIGHTCYAN_EX + '\\n\\nDestination Group: ')\nprint('\"%s\"' % conf.destination_group)\nfor client_session, client in clients.items():\n # each session\n sys.stdout.write(' \"%s\" ... ' % client_session)\n\n # error when session user is banned\n try:\n g = client.get_entity(conf.destination_group)\n\n # Join if not IN.\n if g.left:\n client(JoinChannelRequest(g))\n print(colorama.Fore.LIGHTYELLOW_EX + 'JOINED')\n else:\n print(colorama.Fore.GREEN + 'IN')\n\n # Democracy\n if g.democracy:\n # All members can add members\n destination_groups[client_session] = g\n else:\n # Only admins can add members\n if (g.admin_rights is not None and g.admin_rights.invite_users):\n destination_groups[client_session] = g\n else:\n sys.stdout.write(colorama.Fore.LIGHTRED_EX + ' Have NO admin right to add a member,')\n print(colorama.Fore.LIGHTYELLOW_EX + ' session is REMOVED.')\n del clients[client_session]\n\n except ValueError as e:\n print(colorama.Fore.LIGHTRED_EX + 'ERROR')\n print(colorama.Fore.LIGHTRED_EX + ' %s' % e)\n print(colorama.Fore.LIGHTYELLOW_EX + ' Please make sure \"%s\" is NOT banned' % client_session)\n print(' session \"%s\" is removed from clients' % client_session)\n del clients[client_session]\n pass\n\n\n# save participants of destination_group\nsys.stdout.write(' -\\n save participants ... ')\nps = clients[client_sessions[0]].get_participants(conf.destination_group, aggressive=True)\nsys.stdout.write(colorama.Fore.LIGHTYELLOW_EX + '%d members ... ' % len(ps))\nfor u in ps:\n db.save_invite(u)\nprint(colorama.Fore.GREEN + 'DONE')\n\n\n# Exit if there is no available client\nif 0 == len(clients):\n print(colorama.Fore.LIGHTRED_EX + 'No client available...')\n sys.exit()\n\n\n# OUTPUT: clients\nprint(colorama.Fore.LIGHTCYAN_EX + '\\n\\nThese clients have been launched:')\ni = 1\nfor key, client in clients.items():\n print('%4d: \"%s\"' % (i, key))\n i += 1\ndel i\n\n\n# Verify `source_groups`\nsource_groups = []\nprint(colorama.Fore.LIGHTCYAN_EX + '\\n\\nVerify Source Groups:')\nfor group_key in conf.source_groups:\n print(' \"%s\" ... ' % group_key)\n try:\n g = clients[client_sessions[0]].get_entity(group_key)\n source_groups.append(group_key)\n print(colorama.Fore.GREEN + ' %s' % g.title)\n except errors.rpcerrorlist.InviteHashInvalidError as e:\n sys.stdout.write(colorama.Fore.LIGHTRED_EX + ' [InviteHashInvalidError] ')\n print(colorama.Fore.LIGHTYELLOW_EX + '%s' % e)\n except ValueError as e:\n sys.stdout.write(colorama.Fore.LIGHTRED_EX + ' [ValueError] ')\n print(colorama.Fore.LIGHTYELLOW_EX + '%s' % e)\n\n\n# OUTPUT: source_groups\nprint(colorama.Fore.LIGHTCYAN_EX + '\\n\\nThese Source Groups have been verified:')\ni = 1\nfor group_key in source_groups:\n print('%4d: \"%s\"' % (i, group_key))\n i += 1\ndel i\n\n\n# Continue\nif (input('\\n\\n\\nLOAD participants (y/n)? ') not in ['y', 'yes']):\n sys.exit('\\n\\n')\n\n\n# Initialize `participants` dict\nparticipants = {}\nprint(colorama.Fore.LIGHTCYAN_EX + '\\n\\nLoading participants:')\nfor client_session, client in clients.items():\n print('\\n- %s:' % client_session)\n participants[client_session] = []\n for group_key in source_groups:\n sys.stdout.write(' \"%s\" ... ' % group_key)\n ps = client.get_participants(group_key, aggressive=True)\n participants[client_session].extend(ps)\n print(colorama.Fore.GREEN + '%d members' % len(ps))\n del ps\n\n # members amount\n print(colorama.Fore.LIGHTYELLOW_EX + ' %d members' % len(participants[client_session]))\n\n\n# Ready to GO ?\nif (input('\\n\\n\\nReady to GO (y/n)? ') not in ['y', 'yes']):\n sys.exit('\\n\\n')\n\n\n# Start inviting\nprint(colorama.Fore.LIGHTCYAN_EX + '\\n\\nStarting inviting:')\n# leave back or use the position you want to start from\ni = db.get_create_or_update_pause_position()\nfor u in participants[client_sessions[0]][i:]:\n if u.bot is False:\n # skip bots\n if len(get_user_display_name(u)) > conf.filter_user_display_name_too_much_words_limit:\n # avoid spam, who has a very long name\n pass\n elif type(u.status) in conf.filter_user_status_types:\n # Not UserStatusOffline\n invite_user(u)\n elif (isinstance(u.status, UserStatusOffline) and is_user_status_offline_passed(u.status.was_online)):\n # UserStatusOffline\n invite_user(u)\n\n # Next\n i += 1\ndel i\n\n\n# embed & console\nconsole.embed(banner='\\nconsole')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"46543253","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nfrom sqlalchemy import Column, Text, Integer\n\nfrom aria.storage import (\n ModelStorage,\n model,\n exceptions,\n sql_mapi,\n structure,\n type as aria_type,\n)\nfrom aria import application_model_storage\nfrom ..storage import get_sqlite_api_kwargs, release_sqlite_storage\nfrom ..mock import (\n context as mock_context,\n models,\n operations\n)\n\n\nclass MockModel(model.DeclarativeBase, structure.ModelMixin): #pylint: disable=abstract-method\n __tablename__ = 'mock_models'\n model_dict = Column(aria_type.Dict)\n model_list = Column(aria_type.List)\n value = Column(Integer)\n name = Column(Text)\n\n\n@pytest.fixture\ndef storage():\n base_storage = ModelStorage(sql_mapi.SQLAlchemyModelAPI, api_kwargs=get_sqlite_api_kwargs())\n base_storage.register(MockModel)\n yield base_storage\n release_sqlite_storage(base_storage)\n\n\n@pytest.fixture\ndef context():\n return mock_context.simple(get_sqlite_api_kwargs())\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef module_cleanup():\n model.DeclarativeBase.metadata.remove(MockModel.__table__) #pylint: disable=no-member\n\n\ndef test_storage_base(storage):\n with pytest.raises(AttributeError):\n storage.non_existent_attribute()\n\n\ndef test_model_storage(storage):\n mock_model = MockModel(value=0, name='model_name')\n storage.mock_model.put(mock_model)\n\n assert storage.mock_model.get_by_name('model_name') == mock_model\n\n assert [mm_from_storage for mm_from_storage in storage.mock_model.iter()] == [mock_model]\n assert [mm_from_storage for mm_from_storage in storage.mock_model] == [mock_model]\n\n storage.mock_model.delete(mock_model)\n with pytest.raises(exceptions.StorageError):\n storage.mock_model.get(mock_model.id)\n\n\ndef test_inner_dict_update(storage):\n inner_dict = {'inner_value': 1}\n\n mock_model = MockModel(model_dict={'inner_dict': inner_dict, 'value': 0})\n storage.mock_model.put(mock_model)\n\n storage_mm = storage.mock_model.get(mock_model.id)\n assert storage_mm == mock_model\n\n storage_mm.model_dict['inner_dict']['inner_value'] = 2\n storage_mm.model_dict['value'] = -1\n storage.mock_model.update(storage_mm)\n storage_mm = storage.mock_model.get(storage_mm.id)\n\n assert storage_mm.model_dict['inner_dict']['inner_value'] == 2\n assert storage_mm.model_dict['value'] == -1\n\n\ndef test_inner_list_update(storage):\n mock_model = MockModel(model_list=[0, [1]])\n storage.mock_model.put(mock_model)\n\n storage_mm = storage.mock_model.get(mock_model.id)\n assert storage_mm == mock_model\n\n storage_mm.model_list[1][0] = 'new_inner_value'\n storage_mm.model_list[0] = 'new_value'\n storage.mock_model.update(storage_mm)\n storage_mm = storage.mock_model.get(storage_mm.id)\n\n assert storage_mm.model_list[1][0] == 'new_inner_value'\n assert storage_mm.model_list[0] == 'new_value'\n\n\ndef test_model_to_dict(context):\n deployment = context.deployment\n deployment_dict = deployment.to_dict()\n\n expected_keys = [\n 'created_at',\n 'description',\n 'inputs',\n 'groups',\n 'permalink',\n 'policy_triggers',\n 'policy_types',\n 'outputs',\n 'scaling_groups',\n 'updated_at',\n 'workflows',\n 'blueprint_name',\n ]\n\n for expected_key in expected_keys:\n assert expected_key in deployment_dict\n\n assert 'blueprint_fk' not in deployment_dict\n\n\ndef test_application_storage_factory():\n storage = application_model_storage(sql_mapi.SQLAlchemyModelAPI,\n api_kwargs=get_sqlite_api_kwargs())\n assert storage.node\n assert storage.node_instance\n assert storage.plugin\n assert storage.blueprint\n assert storage.deployment\n assert storage.deployment_update\n assert storage.deployment_update_step\n assert storage.deployment_modification\n assert storage.execution\n\n release_sqlite_storage(storage)\n\n\ndef test_relationship_model_ordering(context):\n deployment = context.model.deployment.get_by_name(models.DEPLOYMENT_NAME)\n source_node = context.model.node.get_by_name(models.DEPENDENT_NODE_NAME)\n source_node_instance = context.model.node_instance.get_by_name(\n models.DEPENDENT_NODE_INSTANCE_NAME)\n target_node = context.model.node.get_by_name(models.DEPENDENCY_NODE_NAME)\n target_node_instance = context.model.node_instance.get_by_name(\n models.DEPENDENCY_NODE_INSTANCE_NAME)\n new_node = model.Node(\n name='new_node',\n type='test_node_type',\n type_hierarchy=[],\n number_of_instances=1,\n planned_number_of_instances=1,\n deploy_number_of_instances=1,\n properties={},\n operations=dict((key, {}) for key in operations.NODE_OPERATIONS),\n min_number_of_instances=1,\n max_number_of_instances=1,\n deployment=deployment\n )\n source_to_new_relationship = model.Relationship(\n source_node=source_node,\n target_node=new_node,\n source_interfaces={},\n source_operations=dict((key, {}) for key in operations.RELATIONSHIP_OPERATIONS),\n target_interfaces={},\n target_operations=dict((key, {}) for key in operations.RELATIONSHIP_OPERATIONS),\n type='rel_type',\n type_hierarchy=[],\n properties={},\n )\n new_node_instance = model.NodeInstance(\n name='new_node_instance',\n runtime_properties={},\n version=None,\n node=new_node,\n state='',\n scaling_groups=[]\n )\n source_to_new_relationship_instance = model.RelationshipInstance(\n relationship=source_to_new_relationship,\n source_node_instance=source_node_instance,\n target_node_instance=new_node_instance,\n )\n\n new_to_target_relationship = model.Relationship(\n source_node=new_node,\n target_node=target_node,\n source_interfaces={},\n source_operations=dict((key, {}) for key in operations.RELATIONSHIP_OPERATIONS),\n target_interfaces={},\n target_operations=dict((key, {}) for key in operations.RELATIONSHIP_OPERATIONS),\n type='rel_type',\n type_hierarchy=[],\n properties={},\n )\n new_to_target_relationship_instance = model.RelationshipInstance(\n relationship=new_to_target_relationship,\n source_node_instance=new_node_instance,\n target_node_instance=target_node_instance,\n )\n\n\n context.model.node.put(new_node)\n context.model.node_instance.put(new_node_instance)\n context.model.relationship.put(source_to_new_relationship)\n context.model.relationship.put(new_to_target_relationship)\n context.model.relationship_instance.put(source_to_new_relationship_instance)\n context.model.relationship_instance.put(new_to_target_relationship_instance)\n\n def flip_and_assert(node_instance, direction):\n \"\"\"\n Reversed the order of relationships and assert effects took place.\n :param node_instance: the node instance to operatate on\n :param direction: the type of relationships to flip (inbound/outbount)\n :return:\n \"\"\"\n assert direction in ('inbound', 'outbound')\n\n relationships = getattr(node_instance.node, direction + '_relationships')\n relationship_instances = getattr(node_instance, direction + '_relationship_instances')\n assert len(relationships) == 2\n assert len(relationship_instances) == 2\n\n first_rel, second_rel = relationships\n first_rel_instance, second_rel_instance = relationship_instances\n assert getattr(first_rel, relationships.ordering_attr) == 0\n assert getattr(second_rel, relationships.ordering_attr) == 1\n assert getattr(first_rel_instance, relationship_instances.ordering_attr) == 0\n assert getattr(second_rel_instance, relationship_instances.ordering_attr) == 1\n\n reversed_relationships = list(reversed(relationships))\n reversed_relationship_instances = list(reversed(relationship_instances))\n\n assert relationships != reversed_relationships\n assert relationship_instances != reversed_relationship_instances\n\n relationships[:] = reversed_relationships\n relationship_instances[:] = reversed_relationship_instances\n context.model.node_instance.update(node_instance)\n\n assert relationships == reversed_relationships\n assert relationship_instances == reversed_relationship_instances\n\n assert getattr(first_rel, relationships.ordering_attr) == 1\n assert getattr(second_rel, relationships.ordering_attr) == 0\n assert getattr(first_rel_instance, relationship_instances.ordering_attr) == 1\n assert getattr(second_rel_instance, relationship_instances.ordering_attr) == 0\n\n flip_and_assert(source_node_instance, 'outbound')\n flip_and_assert(target_node_instance, 'inbound')\n","sub_path":"tests/storage/test_model_storage.py","file_name":"test_model_storage.py","file_ext":"py","file_size_in_byte":9615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"201547931","text":"import os\nfrom math import ceil\nimport pandas as pd\nimport cv2\nimport tensorflow as tf\n\nfirst_time = False\nos.environ['CUDA_VISIBLE_DEVICES'] = '7'\nimage_size = 256\ndata_dir = '/data/FGVC/cub200-2011/CUB_200_2011'\n\n\ndef gen(df, parts, name, batch_size):\n batch = 0\n writer = []\n for i in range(parts):\n writer.append(tf.python_io.TFRecordWriter(name + '_' + str(i) + '.tfrecord'))\n iterator = fetch_data(df, batch_size)\n img, label = iterator.get_next()\n sess = tf.Session()\n while True:\n try:\n im, ex = sess.run([img, label])\n except tf.errors.OutOfRangeError:\n break\n sel = (batch * batch_size) // ceil(len(df) / parts)\n if batch % 500 == 0:\n print('batch ', batch, 'len ', len(ex), 'ex ', ex, 'sel ', sel)\n w = writer[sel]\n for i in range(len(ex)):\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n \"img\": tf.train.Feature(bytes_list=tf.train.BytesList(value=[im[i]])),\n \"ex\": tf.train.Feature(int64_list=tf.train.Int64List(value=[ex[i]]))\n }))\n w.write(example.SerializeToString())\n batch += 1\n for i in range(parts):\n writer[i].close()\n\n\ndef fetch_data(df, batch_size, num_epochs=1):\n def _features_parse_function(filename, bbox):\n image = tf.read_file(filename)\n image = tf.image.decode_jpeg(image, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n # image = tf.slice(image, [bbox[1], bbox[0], 0], [bbox[3], bbox[2], 3])\n image = tf.image.resize_images(image, (image_size, image_size), preserve_aspect_ratio=True)\n image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size)\n image = tf.image.convert_image_dtype(image, tf.uint8)\n image = tf.image.encode_png(image)\n return image\n\n def _labels_parse_function(expression):\n return expression\n\n file_path = tf.convert_to_tensor(df.data_path.values, tf.string)\n dirs = tf.convert_to_tensor(os.path.join(data_dir, 'images/'), tf.string)\n file_path = tf.string_join([dirs, file_path])\n bbox = tf.convert_to_tensor(df[['x0', 'y0', 'w', 'l']].values, tf.int32)\n cls = tf.convert_to_tensor(df.cls.values, tf.int32)\n images = tf.data.Dataset.from_tensor_slices((file_path, bbox))\n labels = tf.data.Dataset.from_tensor_slices(cls)\n features = images.map(_features_parse_function, num_parallel_calls=6)\n labels = labels.map(_labels_parse_function, num_parallel_calls=6)\n dataset = tf.data.Dataset.zip((features, labels))\n dataset = dataset.batch(batch_size).repeat(num_epochs)\n # dataset = dataset.shuffle(1000)\n dataset = dataset.prefetch(buffer_size=1)\n iterator = dataset.make_one_shot_iterator()\n return iterator\n\n\ndef aug(df, times):\n return df.sample(frac=times, replace=True)\n\n\nif __name__ == '__main__':\n data_df = pd.read_table(\n os.path.join(data_dir, 'images.txt'), sep=' ', header=None, names=['data_path'], index_col=0)\n bbox_df = pd.read_table(\n os.path.join(data_dir, 'bounding_boxes.txt'), sep=' ', header=None, names=['x0', 'y0', 'w', 'l'],\n index_col=0).astype(int)\n bbox_df[['w', 'l']] = bbox_df[['w', 'l']] - 1\n tt_df = pd.read_table(\n os.path.join(data_dir, 'train_test_split.txt'), sep=' ', header=None, names=['tt'], index_col=0)\n cls_df = pd.read_table(\n os.path.join(data_dir, 'image_class_labels.txt'), sep=' ', header=None, names=['cls'], index_col=0) - 1\n df = pd.concat([data_df, bbox_df, tt_df, cls_df], axis=1)\n # df = df.sample(frac=1).reset_index(drop=True)\n # num = 387\n # emit items that exceed the image\n if first_time:\n for num in range(len(df)):\n img = cv2.imread(os.path.join(data_dir, 'images', data_df['data_path'].iloc[num]))\n img = img[bbox_df['y0'].iloc[num]:bbox_df['l'].iloc[num] +\n bbox_df['y0'].iloc[num], bbox_df['x0'].iloc[num]:bbox_df['x0'].iloc[num] + bbox_df['w'].iloc[num]]\n cv2.imwrite(os.path.join(data_dir, 'images', data_df['data_path'].iloc[num]), img)\n for n, g in df.groupby('tt'):\n # test\n if n == 0:\n print('test len', len(g))\n gen(g, 1, 'test', 1)\n # train\n elif n == 1:\n g = g.groupby('cls').apply(lambda x: aug(x, 5))\n print('train len', len(g))\n gen(g, 8, 'train', 1)\n","sub_path":"gen_cub200_c/gen_tfrecord.py","file_name":"gen_tfrecord.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"113168751","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyams_utils/adapter.py\n# Compiled at: 2020-02-18 19:11:13\n# Size of source mod 2**32: 7493 bytes\n__doc__ = 'Adapters management package\\n\\nThis package provides a small set of standard base adapters for *context*, *context* and *request*,\\nand *context* and *request* and *view*.\\n\\nSee :ref:`zca` to see how PyAMS can help components management.\\n'\nimport logging\nfrom inspect import isclass\nimport venusian\nfrom zope.annotation.interfaces import IAnnotations\nfrom zope.interface import alsoProvides, classImplements, implementedBy\nfrom zope.lifecycleevent import ObjectCreatedEvent\nfrom zope.location import locate as zope_locate\nfrom pyams_utils.factory import get_object_factory, is_interface\nfrom pyams_utils.registry import get_current_registry\n__docformat__ = 'restructuredtext'\nLOGGER = logging.getLogger('PyAMS (utils)')\n\nclass ContextAdapter:\n \"\"\"ContextAdapter\"\"\"\n\n def __init__(self, context):\n self.context = context\n\n\nclass ContextRequestAdapter:\n \"\"\"ContextRequestAdapter\"\"\"\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n\nclass ContextRequestViewAdapter:\n \"\"\"ContextRequestViewAdapter\"\"\"\n\n def __init__(self, context, request, view):\n self.context = context\n self.request = request\n self.view = view\n\n\nclass NullAdapter:\n \"\"\"NullAdapter\"\"\"\n\n def __new__(cls, *args, **kwargs):\n pass\n\n\nclass adapter_config:\n \"\"\"adapter_config\"\"\"\n venusian = venusian\n\n def __init__(self, **settings):\n if 'for_' in settings and settings.get('required') is None:\n settings['required'] = settings.pop('for_')\n self.__dict__.update(settings)\n\n def __call__(self, wrapped):\n settings = self.__dict__.copy()\n depth = settings.pop('_depth', 0)\n\n def callback(context, name, obj):\n required = settings.get('required') or settings.get('adapts') or settings.get('context')\n if required is None:\n required = getattr(obj, '__component_adapts__', None)\n if required is None:\n raise TypeError(\"No for argument was provided for %r and can't determine what the factory adapts.\" % obj)\n if not isinstance(required, tuple):\n required = (\n required,)\n provided = settings.get('provided') or settings.get('provides')\n if provided is None:\n intfs = list(implementedBy(obj))\n if len(intfs) == 1:\n provided = intfs[0]\n if provided is None:\n raise TypeError(\"Missing 'provided' argument\")\n if isclass(obj) and not provided.implementedBy(obj):\n classImplements(obj, provided)\n LOGGER.debug('Registering adapter %s for %s providing %s', str(obj), str(required), str(provided))\n registry = settings.get('registry')\n if registry is None:\n config = context.config.with_package(info.module)\n registry = config.registry\n registry.registerAdapter(obj, required, provided, settings.get('name', ''))\n\n info = self.venusian.attach(wrapped, callback, category='pyams_utils', depth=depth + 1)\n if info.scope == 'class' and settings.get('attr') is None:\n settings['attr'] = wrapped.__name__\n settings['_info'] = info.codeinfo\n return wrapped\n\n\ndef get_annotation_adapter(context, key, factory=None, markers=None, notify=True, locate=True, parent=None, name=None, callback=None, **kwargs):\n \"\"\"Get an adapter via object's annotations, creating it if not existent\n\n :param object context: context object which should be adapted\n :param str key: annotations key to look for\n :param factory: if annotations key is not found, this is the factory which will be used to\n create a new object; factory can be a class or callable object, or an interface for which\n a factory has been registered; if factory is None and is requested object can't be found,\n None is returned\n :param markers: if not None, list of marker interfaces which created adapter should provide\n :param bool=True notify: if 'False', no notification event will be sent on object creation\n :param bool=True locate: if 'False', the new object is not attached to any parent\n :param object=None parent: parent to which new object is attached; if None, object is\n attached to context\n :param str=None name: if locate is not False, this is the name with which the new object is\n attached to it's parent\n :param callback: if not None, callback function which will be called after object creation\n \"\"\"\n annotations = IAnnotations(context, None)\n if annotations is None:\n return\n adapter = annotations.get(key)\n if adapter is None:\n if 'default' in kwargs:\n return kwargs['default']\n if factory is None:\n return\n if is_interface(factory):\n factory = get_object_factory(factory, registry=kwargs.get('registry'))\n assert factory is not None, 'Missing object factory'\n adapter = annotations[key] = factory()\n if markers:\n if not isinstance(markers, (list, tuple, set)):\n markers = {\n markers}\n for marker in markers:\n alsoProvides(adapter, marker)\n\n if notify:\n get_current_registry().notify(ObjectCreatedEvent(adapter))\n if locate:\n zope_locate(adapter, context if parent is None else parent, name)\n if callback:\n callback(adapter)\n return adapter\n\n\ndef get_adapter_weight(item):\n \"\"\"Get adapters weight sort key\"\"\"\n name, adapter = item\n try:\n return (int(adapter.weight), name)\n except (TypeError, AttributeError):\n return (\n 0, name)","sub_path":"pycfiles/pyamtrack-0.1.4-py3-none-manylinux1_x86_64/adapter.cpython-35.py","file_name":"adapter.cpython-35.py","file_ext":"py","file_size_in_byte":6122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"85052562","text":"import asyncio\nimport traceback\n\nfrom utils.logger import Logger\n\nfrom database.wows_db import Wows_database\n\n\nclass Database_manager:\n\t\"\"\"\n\tDatabase manager. Responsible for running database.\n\tChecks for new data and stores them if any are updated.\n\t\"\"\"\n\t__slots__ = ('logger', 'wowsdb')\n\n\tdef __init__(self, db_path):\n\t\tself.logger = Logger(self.__class__.__name__)\n\t\tself.wowsdb = Wows_database(db_path)\n\n\n\tasync def start(self):\n\t\t\"\"\"\n\t\tStart updating database.\n\t\t\"\"\"\n\t\twhile 1:\n\t\t\t# update wows\n\n\t\t\ttry:\n\t\t\t\tself.logger.info('Starting wows database update.')\n\t\t\t\tawait self.wowsdb.update()\n\t\t\t\tself.logger.info('Finished wows database update.')\n\t\t\texcept Exception as e:\n\t\t\t\tself.logger.critical(traceback.format_exc())\n\t\t\t\t\n\t\t\tawait asyncio.sleep(60*3)\n","sub_path":"database/db_manager.py","file_name":"db_manager.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"521892508","text":"\"\"\"\nGroup Project: Internet Chat Relay Application\nBy Chinmay Tawde and Akansha Jain\n\"\"\"\n\nfrom socket import *\nfrom cryptography.fernet import Fernet\nimport threading, atexit, time\n\nclass Client():\n \n def __init__(self, name, conn, addr, room):\n self._clientName = name\n self._clientSocket = conn\n self._clientAddress = addr\n self._currentRoom = room\n self._currentRoom.add_client(self)\n self._currentRoom.send_room_status(self)\n self._exists = True\n \n receiving_thread = threading.Thread(target = self._receive_from_client)\n receiving_thread.start()\n \n def set_room(self, room):\n if not self._currentRoom == None:\n self._currentRoom.remove_client(self)\n \n if type(room) == int:\n room = eval(\"eval('self._currentRoom.get_server().ROOM{}')\".format(room))\n self._currentRoom = room\n self._currentRoom.add_client(self)\n \n def get_name(self):\n return self._clientName\n\n def send_to_client(self, data: str):\n try:\n #Encode message first\n msg = data.encode(\"UTF-8\")\n #then encrypt before sending\n msg = f.encrypt(msg)\n print(msg)\n self._clientSocket.send(msg)\n except ConnectionResetError:\n # If error occurs while sending message to client disconnect the client\n self._exists = False\n self._currentRoom.remove_client(self)\n self._currentRoom.get_server().remove_client(self)\n\n def _receive_from_client(self):\n while self._exists:\n try:\n packet = f.decrypt(self._clientSocket.recv(1024)).decode(\"UTF-8\")\n self._parse_packet(packet)\n except ConnectionResetError:\n # If error occurs while receiving message from client disconnect the client\n self._exists = False\n self._currentRoom.remove_client(self)\n self._currentRoom.get_server().remove_client(self)\n\n def _parse_packet(self, p: str):\n # Packet Types\n # Message -> MessageHeader;MessageContent\n # Private Message -> MessageHeader;PrivateMessageToken;TargetName;MessageContent\n # Room Details -> RoomDetailsHeader;RoomNum Total number of rooms visible on screen for all clients\n # Room Change -> RoomHeader;RoomNum\n # Create Room -> CreateHeader;RoomNum\n # List Room Members -> ListMembersHeader;RoomNum\n # Change Nickname -> nameHeader;NickName\n # Disconnect -> DisconnectHeader;\n # Error -> ErrorHeader;ErrorMessage\n # Update -> UpdateHeader;UpdateMessage\n parsed = p.split(';')\n command = parsed[0]\n if command == '_message':\n if parsed.__len__() > 2:\n if len(parsed) == 4:\n if (parsed[1] == \"pvt_msg\"):\n targetClientName = parsed[2]\n room = self._currentRoom\n found_target_in_room = False\n for room_member in room._occupants:\n if room_member.get_name() == targetClientName:\n found_target_in_room = True\n msg = \"_message;[\" + parsed[1] + \"]\" + self._clientName + ': ' + parsed[3]\n room_member.send_to_client(msg)\n\n if not found_target_in_room:\n err_msg = \"error;Specified Client Not Found in Room\"\n self.send_to_client(err_msg)\n\n else:\n err_msg = \"error;Invalid message format, for pvt message Type \"\"pvt_msg;ClientName;SecretMessage\"\"\"\n self.send_to_client(err_msg)\n else :\n self._currentRoom.send_message(self._clientName, ';'.join(parsed[1:]).rstrip())\n elif command == 'create_room':\n self.notify_room_creation(self, p)\n elif command == 'room':\n self.set_room(int(parsed[1]))\n elif command == 'list_members':\n members_list = \"list_members;\" + self._currentRoom._get_occupants()\n self.send_to_client(members_list)\n elif command == 'disconnect':\n if self._exists:\n self._exists = False\n self._clientSocket.close()\n self._currentRoom.remove_client(self)\n self._currentRoom.get_server().remove_client(self)\n elif command == 'name':\n old_name = self._clientName\n self._clientName = ';'.join(parsed[1:]).rstrip()\n if self._currentRoom.get_server().nickNames.count(self._clientName) > 0:\n err_msg = \"error;The Nickname is already taken, Enter a unique nickname\"\n self.send_to_client(err_msg)\n else:\n self._currentRoom.get_server().nickNames.remove(old_name)\n self._currentRoom.get_server().nickNames.append(self._clientName)\n self._currentRoom.send_update(\"update;--{} has changed their name to {}--\".format(old_name, self._clientName))\n elif command == \"update\":\n self._currentRoom.send_update(';'.join(parsed[1:]).rstrip())\n elif command == '':\n if self._exists:\n self._exists = False\n self._clientSocket.close()\n self._currentRoom.remove_client(self)\n self._currentRoom.get_server().remove_client(self)\n\n def _send_confirmation(self, c):\n self.send_to_client(c)\n\n def notify_room_creation(self, client, message):\n current_room = client._currentRoom\n client_server = current_room._server\n client = self._clientName\n client_server._roomsOnScreen += 1\n for rooms in (client_server.rooms):\n for room_member in rooms._occupants:\n if not room_member.get_name() == client:\n room_member.send_to_client(message)\n\n\nclass Room:\n \n def __init__(self, name, server):\n self._roomName = name\n self._server = server\n self._occupants = []\n\n def get_server(self):\n return self._server \n\n def get_name(self):\n return self._roomName \n\n def add_client(self, new_client):\n self._occupants.append(new_client)\n msg = \"update;\" + new_client._clientName + \" has joined the room\"\n time.sleep(1)\n self.send_update(msg)\n\n def send_room_status(self, new_client):\n msg = \"room_details;\" + str(new_client._currentRoom._server._roomsOnScreen)\n time.sleep(1)\n new_client.send_to_client(msg)\n\n def remove_client(self, client):\n if client in self._occupants:\n self._occupants.remove(client)\n msg = \"update;\" + client._clientName + \" has disconnected from the room\"\n self.send_update(msg)\n\n def send_message(self, sender, _message):\n packet = \"_message;\" + sender + ': ' + _message\n for room_member in self._occupants:\n if not room_member.get_name() == sender:\n room_member.send_to_client(packet)\n\n def _get_occupants(self):\n if not self._occupants:\n print(\"Empty\")\n return\n member_list_string = ''\n for room_member in self._occupants:\n member_list_string += room_member.get_name() + ', '\n return member_list_string[0:-2]\n \n def send_update(self, u):\n for room_member in self._occupants:\n room_member.send_to_client(u)\n\n\nclass MultiChatServer:\n \n def __init__(self, maxClients, serverPort):\n self._maxClients = maxClients\n self._clients = []\n self._roomsOnScreen = 0\n self.rooms = []\n self.nickNames = []\n\n self.ROOM0 = Room('0', self)\n self.ROOM1 = Room('1', self)\n self.ROOM2 = Room('2', self)\n self.ROOM3 = Room('3', self)\n self.ROOM4 = Room('4', self)\n\n self._serverSocket = socket(AF_INET, SOCK_STREAM)\n self._serverPort = serverPort\n\n def add_all_rooms_to_array(self):\n self.rooms.append(self.ROOM0)\n self.rooms.append(self.ROOM1)\n self.rooms.append(self.ROOM2)\n self.rooms.append(self.ROOM3)\n self.rooms.append(self.ROOM4)\n\n def print_room_clients(self):\n for each_room in [self.ROOM0, self.ROOM1, self.ROOM2, self.ROOM3, self.ROOM4]:\n print(each_room.get_name(), each_room._occupants)\n \n def start(self):\n self._serverSocket.bind(('',self._serverPort))\n self._serverSocket.listen(16)\n print(\"Server is listening on port\", self._serverPort)\n listening_thread = threading.Thread(target = self._accept_connections)\n listening_thread.start()\n\n def end(self):\n try:\n self._serverSocket.close()\n except:\n pass\n\n def remove_client(self, client):\n if client in self._clients:\n if self.nickNames.count(client.get_name()) > 0: self.nickNames.remove(client.get_name())\n self._clients.remove(client)\n del client\n\n def _is_server_full(self):\n return self._maxClients == len(self._clients)\n\n def _accept_connections(self):\n while True:\n if not self._is_server_full():\n connection_socket, addr = self._serverSocket.accept()\n new_client = Client(\"Client{}\".format(len(self._clients)+1), connection_socket, addr, self.ROOM0)\n self._clients.append(new_client)\n self.nickNames.append(new_client.get_name())\n time.sleep(2)\n if len(self._clients) > 0: self._clients[-1].send_to_client('1;{}'.format(new_client.get_name()))\n else:\n connection_socket, addr = self._serverSocket.accept()\n err_msg = \"error;Server is full\".encode(\"UTF-8\")\n err_msg = f.encrypt(err_msg)\n connection_socket.send(err_msg)\n connection_socket.close()\n\n def load_key(self):\n return open(\"secret.key\", \"rb\").read()\n\n\nif __name__ == \"__main__\":\n maxClients = 10\n serverPort = 5000\n server = MultiChatServer(maxClients, serverPort)\n server.add_all_rooms_to_array()\n key = server.load_key()\n global f\n f = Fernet(key)\n server.start()\n atexit.register(server.end)\n","sub_path":"Chat_Server.py","file_name":"Chat_Server.py","file_ext":"py","file_size_in_byte":10422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"572138846","text":"print('begin')\nimport numpy as np\nimport matplotlib.pyplot as plt \n\nfrom sklearn.ensemble.forest import RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nroad1 = \"E:/tianchi_koubei/mid_dataset/create_feature_withpre.txt\"\nroad2 = \"E:/tianchi_koubei/mid_dataset/count_shop_pay.txt\"\nway1 = 'r'\n#road3 = \"d:/tianchi_koubei/result/union_rf_mean_median_result.csv\"\n#way2 = 'w'\ndef create_xday(xday):\n for i in range(1,504):\n xday.append(i)\n return xday\nxday_list = []\nxday = create_xday(xday_list)\ndef create_weekend(xweekend):\n j = 3\n for i in range(1,504):\n if j == 6 or j ==7:\n xweekend.append(1)\n if j == 7:\n j = 1\n else:\n j += 1\n else:\n xweekend.append(0)\n j += 1\n return xweekend\nxweekend_list = []\nxweekend = create_weekend(xweekend_list)\n\nfr1 = open(road1,way1)\nfr2 = open(road2,way1)\n#fw = open(road3,way2)\n\ni = 0\n#读取特征\ndef readfile_oneshop_X(fr,xday,xweekend):\n X = []\n X.append(xday)\n X.append(xweekend)\n for r in range(0,13):\n line = fr.readline()\n re = line.strip('\\n').split(',')\n data_str = map(float,re[1:])\n data_float = []\n for s in data_str:\n data_float.append(s)\n X.append((data_float))\n return np.array(X).T\n#读取目标值\ndef readfile_oneshop_Y(fr):\n line = fr.readline()\n re = line.strip('\\n').split(',')\n data_str = map(float,re[1:])\n data_float = []\n for s in data_str:\n data_float.append(s)\n return data_float\ndef Evaluation(pred,test):\n shop_err = 0\n for i in range(0,len(pred)):\n for p,t in zip(pred[i],test[i]):\n if (p+t) == 0.0:\n shop_err += 0.0\n else:\n shop_err += abs((p-t)/(p+t))\n total_err = shop_err/(len(pred)*len(pred[0]))\n return total_err\n#shuchu\ndef output(fw,shopid,y_pre):\n y_pre_str = []\n y_pre_int = map(int,y_pre)\n y_pre_tostr = map(str,y_pre_int)\n for i in y_pre_tostr:\n y_pre_str.append(i)\n fw.write(str(shopid)+','+','.join(y_pre_str)+'\\n')\n################## \n\ndef mean_cp_predict(x_test,mean,cp,n):\n y_pre_cp = []\n for i in range(0,len(x_test)):\n ypre = mean+n*cp\n mean = ypre\n y_pre_cp.append(ypre)\n return y_pre_cp\ndef mean_predict(x_test,mean,n):\n y_pre_m = []\n for i in range(0,len(x_test)):\n y_pre_m.append(n*mean)\n return y_pre_m\n\nerr_shop = []\nY_test = []\nrf_pre = []\nm2_pre = []\nme2_pre = []\nunion_pre = []\nwhile i<50:\n # readfile\n X = []\n X = readfile_oneshop_X(fr1,xday,xweekend)[-350:]\n Y = readfile_oneshop_Y(fr2)[-350:]\n x_train = X[:-14]\n y_train = Y[:-14]\n x_test = X[-14:]\n y_test = Y[-14:]\n\n\n ###\n params_rf = {'n_estimators':800, 'min_samples_split': 2,'warm_start':True,'n_jobs':4}\n rf = RandomForestRegressor(**params_rf)\n rf.fit(x_train,y_train)\n y_pre_rf = rf.predict(x_test)\n ###\n mean1 = np.mean(Y[-7:])\n mean2 = np.mean(Y[-14:])\n mean3 = np.mean(Y[-21:])\n mean4 = np.mean(Y[-28:])\n median1 = np.median(Y[-7:])\n median2 = np.median(Y[-14:])\n median3 = np.median(Y[-21:])\n median4 = np.median(Y[-28:])\n y_pre_m1 = mean_predict(x_test,mean1,1)\n y_pre_m2 = mean_predict(x_test,mean2,1)\n y_pre_m3 = mean_predict(x_test,mean3,1)\n y_pre_m4 = mean_predict(x_test,mean4,1)\n y_pre_me1 = mean_predict(x_test,median1,1)\n y_pre_me2 = mean_predict(x_test,median2,1)\n y_pre_me3 = mean_predict(x_test,median3,1)\n y_pre_me4 = mean_predict(x_test,median4,1)\n ###\n union = 0.6*np.array(y_pre_rf)+0.08*np.array(y_pre_m2)+0.04*np.array(y_pre_m1)+0.04*np.array(y_pre_m3)+0.04*np.array(y_pre_m4)+0.08*np.array(y_pre_me2)+0.04*np.array(y_pre_me1)+0.04*np.array(y_pre_me3)+0.04*np.array(y_pre_me4)\n Y_test.append(y_test)\n rf_pre.append(y_pre_rf)\n m2_pre.append(y_pre_m2)\n me2_pre.append(y_pre_me2)\n union_pre.append(union)\n #output(fw,i+1,union)\n print(i)\n i += 1\n\nfr1.close()\nfr2.close()\n#fw.close()\nprint(Evaluation(rf_pre,Y_test))\nprint(Evaluation(m2_pre,Y_test))\nprint(Evaluation(me2_pre,Y_test))\nprint(Evaluation(union_pre,Y_test))\n","sub_path":"train_pre/train_rf_mean.py","file_name":"train_rf_mean.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"101141524","text":"from collections import defaultdict, deque\nfrom typing import List\n\n\nclass Solution:\n \"\"\"\n BFS\n Use dict to store adjacent nodes\n \"\"\"\n\n def ladderLength(self, begin: str, end: str, words: List[str]) -> int:\n if not begin or not end or not words or end not in words:\n return 0\n\n # word length\n l = len(begin)\n # a map of all combinations of words with missing letters\n # mapped to all words in the list that match that pattern.\n # E.g. hot -> {'*ot': ['hot'], 'h*t': ['hot'], 'ho*': ['hot']}\n dic = defaultdict(list)\n for word in words:\n for i in range(l):\n dic[word[:i] + '*' + word[i + 1:]].append(word)\n\n # BFS Template\n queue = deque([(begin, 1)])\n seen = {begin}\n\n while queue:\n word, depth = queue.popleft()\n for i in range(l):\n tmp = word[:i] + '*' + word[i + 1:]\n for neighbor in dic[tmp]:\n if neighbor == end:\n return depth + 1\n if neighbor not in seen:\n seen.add(neighbor)\n queue.append((neighbor, depth + 1))\n \n return 0\n","sub_path":"0127_Word_Ladder.py","file_name":"0127_Word_Ladder.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"493563433","text":"'''\n An example module for distutil test\n'''\n\n\n__all__ = ['say_hello', 'add', 'fibo_generator']\n__version__ = '1.0'\n__author__ = 'zhaoyafei'\n\ndef say_hello():\n print(\"Hello!\")\n\ndef add(x, y):\n return x+y\n\ndef fibo_generator(n):\n a = 0\n b = 1\n\n for i in range(n):\n t = b\n b = b + a\n yield a\n\n a = t\n\nif __name__=='__main__':\n print('Call say_hello():')\n say_hello()\n a = 2\n b = 3\n print('add({},{})={}'.format(a, b, add(a,b)))\n\n n = 10\n fibo_gen = fibo_generator(n)\n print('generating a fibonacci list with length of {}:'.format(n))\n for i in fibo_gen:\n print('\\t{}'.format(i))","sub_path":"dist_test/simple_example/foo.py","file_name":"foo.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"549777025","text":"import TccLib\n\nimport numpy as np\n\n# Define boundary conditions and parameters\ntotal_time = 0.4\nTccLib.Particle.frame_skips = 1\nforce = \"added_mass\"\nparticle_density = 3e4\nparticle_diameter = 1e-3\nvel_const = 2.0\nvel_x_0 = 0.\nvel_y_0 = 0.\ndu = 1.\n\n# Set liquid parameters or declare liquid\n# density = 1e3\n# viscosity = 0.89e-3\nliquid = \"water\"\n\n# Import gmsh created mesh, and set velocity field\nmesh = TccLib.Mesh(\"Forces\", liquid=liquid)\nvel_x = np.zeros(mesh.size) + vel_const\nvel_y = np.zeros(mesh.size)\nacc = np.zeros(mesh.size) + du\n\n# Show mesh geometry\n# mesh.show_geometry(names=True)\n\n# Define Particles\nx_0 = 0.5 * mesh.length_x\ny_0 = 0.8 * mesh.length_y\nparticle_a = TccLib.Particle(\"A\", (x_0, y_0), density=particle_density, diameter=particle_diameter,\n velocity=(vel_x_0, vel_y_0))\nmesh.add_particle(list_of_particles=[particle_a])\n\n# Define analytic comparison expression\nm = particle_a.mass\nc = 1/2. * mesh.density * particle_a.volume\nif c/m > 1:\n print(\"Particle conditions might cause an unexpected behavior!\")\nanalytic_expression = lambda t: (du*c*t**2)/(2*(c + m)) + vel_x_0*t + x_0\n\n# Define dt based on convergence limit\ndt = min(particle_a.max_dt(mesh.viscosity), 1e-4)/2**6.\n\n# Define x vector of positions\nx_vector = np.arange(0, total_time, dt)\n\n# Move Particles\nfor time in x_vector:\n print(\"\\rMoving particles {0:.2f}%\".format(100 * time / total_time), end=\"\")\n TccLib.move_particles(mesh, velocity_x=vel_x, velocity_y=vel_y, dt=dt, single_force=force, acceleration_x=acc)\n\nprint(\"\\rFinished moving particles\")\n\n# Find values of property in the mesh at a determined set position for every value in the vector of x\nx_position = mesh.length_x*0.5\ny_vector = np.array(particle_a.position_history)[:-1, 0]\n\n# Show comparison graph\nTccLib.util.show_comparison(x_vector, analytic_expression, y_vector, numeric_label=\"Solução Numérica\",\n analytic_label=\"Solução Analítica\", title=\"Força de Massa Virtual\",\n x_label=\"Tempo(s)\", y_label=\"Posição no Eixo X(m)\",\n save_file_as=\"{0}_{1}_validation\".format(mesh.name, force))\n","sub_path":"test_setups/Run_validate_added_mass.py","file_name":"Run_validate_added_mass.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"593188618","text":"import numpy as np\nimport pygame as pg\nfrom numba import njit\n\ndef main():\n running, pause, fps_lock, score, maxscore, fullscreen = 1, 1, 59, 0, 0, 0\n timer, autores, checker, count, enhealth = 0, 1, 2, 0, 0\n renders = [' R: Standard. ', ' R: Doubled pixels. ', ' R: Checkerboard. ', ' R: Radial window. ', ' R: Squared window. ']\n endmsg = ' Numba compiling, please wait... '\n rr, gg, bb = np.linspace(0,0.8, 25*14), np.linspace(0.5,.1, 25*14), np.linspace(1,0.1, 25*14)\n drawing(rr, gg, bb, 14, 25, 1, endmsg, 0, 10, 10, np.zeros([3,3]), score, fullscreen, False)\n pg.time.wait(200)\n \n clock = pg.time.Clock()\n pg.mouse.set_visible(False)\n pg.mixer.init()\n ambient, runfx, shotfx, killfx, respawnfx, successfx, failfx, fr, fg, fb = sfx()\n ambient.set_volume(0.5)\n ambient.play(-1)\n endmsg = \" Numba may need more compiling...\"\n \n (mr, mg, mb, maph, mapr, exitx, exity, mapt, maps, posx, posy, posz, size, rot, rot_v, minimap,\n width, height, mod, rr, gg, bb, count, enx, eny, seenx, seeny, lock, run, shoot, sx, sy, sz, sstart,\n et, health, sdir, sdirz, sdir2, sdirz2, shoot2, sx2, sy2, sz2, sstart2, won, respawn, move) = new_game(fb, fg, fr, endmsg, score) \n\n while running:\n ticks = pg.time.get_ticks()/100000\n if np.random.uniform() > 0.99:\n pg.display.set_caption(endmsg + ' Options: P or C - Pause, F - Fulscreen, Q/W - FPS lock/Res, R - Render type, T - AutoRes ')\n for event in pg.event.get():\n if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):\n if not pause:\n pause = 1\n respawnfx.play()\n endmsg = \" Game paused. Current score: \" + str(score) + ' '\n else:\n endmsg = \" Thanks for playing! Max score: \" + str(maxscore) + ' '\n killfx.play()\n running = False\n if sstart == None and(event.type == pg.MOUSEBUTTONDOWN or event.type == pg.MOUSEBUTTONUP):\n shoot = 1\n if event.type == pg.KEYDOWN:\n if event.key == ord('p') or event.key == ord('c'): # pause\n if not pause:\n pause = 1\n respawnfx.play()\n endmsg = \" Game paused. Current score: \" + str(score)\n elif (int(posx) != exitx or int(posy) != exity):\n if health == 0:\n health = 5\n animate(width, height, mod, move, posx, posy, .01, rot, rot_v, mr, mg, mb, lx, ly, lz,\n mplayer, exitx, exity, mapr, mapt, maps, rr, gg, bb, enx, eny, sx, sy, sx2, sy2,\n size, checker, count,fb, fg, fr, pause, endmsg, won, health, minimap, score, .5/61, fps)\n pause = 0\n respawnfx.play()\n if pause and event.key == ord('n'): # new game\n pause = 0\n (mr, mg, mb, maph, mapr, exitx, exity, mapt, maps, posx, posy, posz, size, rot, rot_v, minimap,\n width, height, mod, rr, gg, bb, count, enx, eny, seenx, seeny, lock, run, shoot, sx, sy, sz, sstart,\n et, health, sdir, sdirz, sdir2, sdirz2, shoot2, sx2, sy2, sz2, sstart2, won, respawn, move) = new_game(fb, fg, fr, endmsg, score)\n \n respawnfx.play()\n \n if event.key == ord('t'): # toggle auto resolution\n autores = not(autores)\n if event.key == ord('r'): # toggle rendering method\n checker += 1\n if checker > 4:\n checker = 0\n if event.key == ord('f'): # toggle fullscreen\n pg.display.toggle_fullscreen()\n fullscreen = not(fullscreen)\n if event.key == ord('q'): # change resolution or fps\n if autores:\n fps_lock = max(19, fps_lock - 10)\n else:\n if width > 100 :\n width, height, mod, rr, gg, bb, count = adjust_resol(int(width*0.8))\n if event.key == ord('e'): # change resolution or fps\n if autores:\n fps_lock = min(120, fps_lock + 10)\n else:\n width, height, mod, rr, gg, bb, count = adjust_resol(int(width*1.1))\n \n if pause:\n clock.tick(30)\n drawing(rr*.7, gg*.7, bb*.7, height, width, pause, endmsg, won, health, enhealth, minimap, score, fullscreen)\n\n else:\n mplayer = np.zeros([size, size])\n (enx, eny, mplayer, et, shoot, sx, sy, sz, sdir, sdirz, shoot2,\n sx2, sy2, sz2, sdir2, sdirz2, seenx, seeny, lock, enhealth, health) = agents(enx, eny, maph, posx, posy, rot, rot_v, et, shoot, sx, sy, sz, sdir,\n sdirz, shoot2, sx2, sy2, sz2, sdir2, sdirz2, mplayer,\n seenx, seeny, lock, size, enhealth, health, score)\n\n lx, ly, lz = size/2 + 1500*np.cos(ticks), size/2 + 1000*np.sin(ticks), 1000\n rwr = checker\n if shoot2:\n rwr = 3\n rr, gg, bb = super_fast(width, height, mod, move, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz,\n mplayer, exitx, exity, mapr, mapt, maps, rr, gg, bb, enx, eny, sx, sy, sx2, sy2,\n size, rwr, count, fb, fg, fr, sz, sz2)\n count += 1\n if enhealth != 0 and lock:\n endmsg = 'Pytracing Maze - Watch out! Score:'+str(score)+' Res: '+ str(width) +'x'+str(height)+' FPS: '+str(int(clock.get_fps()))+renders[checker]\n else:\n endmsg = 'Pytracing Maze - Find the exit! Score:'+str(score)+' Res: '+ str(width) +'x'+str(height)+' FPS: '+str(int(clock.get_fps()))+renders[checker]\n\n minimap[int(posy)][int(posx)] = (50, 50, 255)\n drawing(rr, gg, bb, height, width, pause, endmsg, won, health, enhealth, minimap, score, fullscreen)\n minimap[int(posy)][int(posx)] = (100, 100, 0)\n \n fps = int(1000/(pg.time.get_ticks() - ticks*100000 +1e-16))\n if autores and count > 10: #auto adjust render resolution\n if fps < fps_lock - 10 and width > 100:\n width, height, mod, rr, gg, bb, count = adjust_resol(int(width*0.8))\n elif fps > fps_lock + 15:\n width, height, mod, rr, gg, bb, count = adjust_resol(int(width*1.1)) \n\n if (int(posx) == exitx and int(posy) == exity):\n endmsg, won = \" You escaped safely! \", 1\n successfx.play()\n animate(width, height, mod, move, posx, posy, .5, rot, rot_v, mr, mg, mb, lx, ly, lz,\n mplayer, exitx, exity, mapr, mapt, maps, rr, gg, bb, enx, eny, sx, sy, sx2, sy2,\n size, checker, count,fb, fg, fr, pause, endmsg, won, health, minimap, score, .5/61, clock.get_fps())\n pause = 1\n score += 1\n maxscore = max(score, maxscore) \n \n et = min(clock.tick()/500, 0.1)*(0.8+move)\n\n if shoot or sstart != None:\n if sstart == None:\n shotfx.play()\n if fps < fps_lock and autores:\n width, height, mod, rr, gg, bb, count = adjust_resol(int(width*0.8))\n sstart = pg.time.get_ticks()\n elif pg.time.get_ticks() - sstart > 500:\n shoot, sx, sy, sstart = 0, -1, -1, None\n\n if enhealth == 0:\n if not respawn:\n if shoot:\n health = min(health+5, 10)\n shoot2, sx2, sy2, run, respawn, sstart2 = 0, -1, -1, 1, 1, None\n killfx.play()\n \n else:\n if respawn:\n respawn = 0\n respawnfx.play()\n if shoot2 or sy2 == 0 or sstart2 != None:\n if run:\n run = 0\n runfx.play()\n if sstart2 == None:\n shotfx.play()\n sstart2 = pg.time.get_ticks()\n elif pg.time.get_ticks() - sstart2 > 500:\n shoot2, sx2, sy2, sstart2 = 0, -1, -1, None\n\n if health <= 0:\n won, pause, health = -1, 1, 0\n if score > 0:\n score -= 1\n endmsg = \" You died! Current score: \" + str(score) + ' '\n failfx.play()\n animate(width, height, mod, move, posx, posy, .5, rot, rot_v, mr, mg, mb, lx, ly, lz,\n mplayer, exitx, exity, mapr, mapt, maps, rr, gg, bb, enx, eny, sx, sy, sx2, sy2,\n size, checker, count,fb, fg, fr, pause, endmsg, won, health, minimap, score, -.5/61, clock.get_fps())\n enx, eny, seenx, seeny, lock, won, enhealth = 0, 0, 0, 0, 0, 0, 0\n\n\n posx, posy, rot, rot_v, shoot, move = movement(pg.key.get_pressed(),posx, posy, rot, rot_v, maph, et, shoot, sstart, move)\n pg.mouse.set_pos([640, 360])\n \n \n \n pg.display.update()\n\n pg.mixer.fadeout(1000)\n print(endmsg)\n posz, ani = 0.5, .5/61\n if health <= 0:\n posz, ani = 0.01, .99/61\n elif int(posx) == exitx and int(posy) == exity:\n posz, ani = 0.99, -.99/61\n \n animate(width, height, mod, move, posx, posy, posz, rot, rot_v, mr, mg, mb, size/2 + 1500, size/2 + 1000, 1000,\n maph, exitx, exity, mapr, mapt, maps, rr, gg, bb, enx, eny, sx, sy, sx2, sy2,\n size, checker, count,fb, fg, fr, pause, endmsg, won, health, minimap, score, ani, fps)\n \n pg.quit()\n\ndef new_map(score):\n size = np.random.randint(20+score*5,30+score*10) # size of the map\n posx, posy, posz = np.random.randint(1, size -2)+0.5, np.random.randint(1, size -2)+0.5, 0.5\n x, y = int(posx), int(posy)\n rot, rot_v = (np.pi/4, 0)\n \n mr, mg, mb = np.random.uniform(0,1, (size,size)), np.random.uniform(0,1, (size,size)), np.random.uniform(0,1, (size,size)) \n mapr = np.random.choice([0, 0, 0, 0, 1], (size,size))\n maps = np.random.choice([0, 0, 0, 0, 1], (size,size))\n mapt = np.random.choice([0, 0, 0, 1, 2, 3], (size,size))\n maptemp = np.random.choice([0,0, 1], (size,size))\n maph = np.random.uniform(0.25, 0.99, (size,size))\n maph[np.where(maptemp == 0)] = 0\n maph[0,:], maph[size-1,:], maph[:,0], maph[:,size-1] = (1,1,1,1) # outer walls\n maps[0,:], maps[size-1,:], maps[:,0], maps[:,size-1] = (0,0,0,0) # no spheres\n\n maph[x][y], mapr[x][y] = (0, 0)\n count = 0 \n while 1:\n testx, testy = (x, y)\n if np.random.uniform() > 0.5:\n testx = testx + np.random.choice([-1, 1])\n else:\n testy = testy + np.random.choice([-1, 1])\n if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:\n if maph[testx][testy] == 0 or count > 5:\n count = 0\n x, y = (testx, testy)\n maph[x][y], mapr[x][y] = (0, 0)\n dtx = np.sqrt((x-posx)**2 + (y-posy)**2)\n if (dtx > size*.6 and np.random.uniform() > .99) or np.random.uniform() > .99999:\n exitx, exity = (x, y)\n break\n else:\n count = count+1\n \n return mr, mg, mb, maph, mapr, exitx, exity, mapt, maps, posx, posy, posz, size, rot, rot_v\n\ndef new_game(fb, fg, fr, endmsg, score):\n width, height, mod, rr, gg, bb, count = adjust_resol(200)\n mr, mg, mb, maph, mapr, exitx, exity, mapt, maps, posx, posy, posz, size, rot, rot_v = new_map(score)\n minimap = np.zeros((size, size, 3))\n animate(width, height, mod, 0, posx, posy, .99, rot, rot_v, mr, mg, mb, size/2 + 1500, size/2 + 1000, 1000, maph, exitx, exity, mapr, mapt, maps,\n rr, gg, bb, 0, 0, -1, -1, -1, -1, size, 2, 0, fb, fg, fr, 0, endmsg, 0, 10, minimap, score, -.5/61)\n \n return (mr, mg, mb, maph, mapr, exitx, exity, mapt, maps, posx, posy, posz, size, rot, rot_v, minimap, width, height, mod, rr, gg, bb, count,\n -1, -1, 0, 0, 0, 1, 0, -1, -1, -1, None, 0.1, 10, 0, 0, 0, 0, 0, -1, -1, -1, None, 0, 1, 0)\n#enx, eny, seenx, seeny, lock, run, shoot, sx, sy, sz, sstart, et, health, sdir, sdirz, sdir2, sdirz2, shoot2, sx2, sy2, sz2, sstart2, won, respawn, move\n\ndef movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart, move):\n x, y = (posx, posy)\n p_mouse = pg.mouse.get_pos()\n rot, rot_v = rot - np.clip((p_mouse[0]-640)/200, -0.2, .2), rot_v -(p_mouse[1]-360)/400\n rot_v = np.clip(rot_v, -1, 1)\n diag = 0\n\n if pressed_keys[pg.K_UP] or pressed_keys[ord('w')]:\n diag = 0.5\n x, y, move, diag = x + et*np.cos(rot), y + et*np.sin(rot), move + et/4, 1\n\n elif pressed_keys[pg.K_DOWN] or pressed_keys[ord('s')]:\n x, y, move, diag = x - et*np.cos(rot), y - et*np.sin(rot), move - et/2, 1\n \n if pressed_keys[pg.K_LEFT] or pressed_keys[ord('a')]:\n et = et/(diag+1)\n x, y = x - et*np.sin(rot), y + et*np.cos(rot)\n \n elif pressed_keys[pg.K_RIGHT] or pressed_keys[ord('d')]:\n et = et/(diag+1)\n x, y = x + et*np.sin(rot), y - et*np.cos(rot)\n\n if x == posx and y == posy:\n move = move - et/2\n\n if maph[int(x-0.05)][int(y)] == 0 and maph[int(x+0.05)][int(y)] == 0 and maph[int(x)][int(y+0.05)] == 0:\n posx, posy = x, y \n elif maph[int(posx-0.05)][int(y)] == 0 and maph[int(posx+0.05)][int(y)] == 0 and maph[int(posx)][int(y+0.05)] == 0:\n posy = y\n elif maph[int(x-0.05)][int(posy)] == 0 and maph[int(x+0.05)][int(posy)] == 0 and maph[int(x)][int(posy+0.05)] == 0:\n posx = x\n else:\n move = move - et/2\n \n if not shoot and sstart == None and pressed_keys[pg.K_SPACE]:\n shoot = 1\n move = np.clip(move, 0, 0.4)\n return posx, posy, rot, rot_v, shoot, move\n\n@njit(cache=True)\ndef lodev(x, y, z, cos, sin, sinz, maph, size):\n norm = np.sqrt(cos**2 + sin**2 + sinz**2)\n rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16\n \n mapX, mapY = int(x), int(y)\n\n deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)\n\n if (rayDirX < 0):\n stepX, sideDistX = -1, (x - mapX) * deltaDistX\n else:\n stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX\n \n if (rayDirY < 0):\n stepY, sideDistY = -1, (y - mapY) * deltaDistY\n else:\n stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY\n\n if (rayDirZ < 0):\n sideDistZ = z*deltaDistZ;\n else:\n sideDistZ = (1-z)*deltaDistZ\n\n while (1):\n if (sideDistX < sideDistY):\n sideDistX += deltaDistX; mapX += stepX\n dist = sideDistX; side = 0\n if mapX < 2 or mapX > size-2:\n break\n else:\n sideDistY += deltaDistY; mapY += stepY\n dist = sideDistY; side = 1\n if mapY < 2 or mapY > size-2:\n break\n if (maph[mapX][mapY] != 0):\n break\n \n if (side):\n dist = dist - deltaDistY\n else:\n dist = dist - deltaDistX\n \n if (dist > sideDistZ):\n dist = sideDistZ\n\n x = x + rayDirX*dist - cos/2\n y = y + rayDirY*dist - sin/2\n z = z + rayDirZ*dist - sinz/2\n return x, y, z\n\n@njit(cache=True)\ndef ray_caster(posx, posy, posz, sin, cos, sinz, lx, ly, lz, maph, mapr, maps, enx, eny, sx, sy, sx2, sy2, size, sz, sz2):\n x, y, z = posx, posy, posz\n modr, cx, cy, shot, mapv = 1, 1, 1, 0, 0\n dtp = np.random.uniform(0.002,0.01)\n for k in range(2000):\n \n if (mapv == 0) or (sinz > 0 and (z > mapv or (mapv > 1 and mapv < 6 and z > 0.58))): ## LoDev DDA for optimization\n x, y, z = lodev(x, y, z, cos, sin, sinz, maph, size)\n \n x += cos; y += sin; z += sinz\n if (z > 1 or z < 0): # check ceiling and floor\n break\n mapv = maph[int(x)][int(y)]\n if (mapv == 2 or mapv == 8 or mapv == 14) and modr > 0.7:\n mapv = 0\n if mapv > 1 and z < 0.58: # check agents\n if mapv == 2 or mapv == 8 or mapv == 3 or mapv == 15:\n refx, refy, sh = posx, posy, .8\n if mapv%2 != 0:\n refx, refy, sh = enx, eny, .2 \n if z> 0.45 and (x-refx)**2 + (y-refy)**2 + (z-0.5)**2 < 0.003 +abs(z-0.47)/30 :\n break # head\n if z < 0.45 and z > 0.28 and (x-refx)**2 + (y-refy)**2 < (z/10 - 0.02):\n break # chest\n if z < 0.28 and (x-refx)**2 + (y-refy)**2 + (z-0.15)**2 < 0.023 :\n break #roller\n if mapv > 5:# and z < 0.4 and z > 0.2:\n refx, refy, refz = sx, sy, sz\n if mapv < 12:\n refx, refy, refz = sx2, sy2, sz2\n if ((x-refx)**2 + (y-refy)**2 + (z-refz)**2 < dtp):\n shot = 1\n break\n\n if mapv > z and mapv < 2: # check walls\n if maps[int(x)][int(y)]: # check spheres\n if ((x%1-0.5)**2 + (y%1-0.5)**2 + (z%1-0.5)**2 < 0.24):\n x, y, z = refine(x, y, z, sin, cos, sinz)\n if (mapr[int(x)][int(y)]): # spherical mirror\n if (modr == 1):\n cx, cy = int(x), int(y)\n modr = modr*0.7\n if (modr < 0.2):\n break\n if (mapv - z <= abs(sinz)): ## horizontal surface\n sinz = -sinz\n else:\n nx = (x%1-0.5)/0.5; ny = (y%1-0.5)/0.5; nz =(z%1-0.5)/0.5\n dot = 2*(cos*nx + sin*ny + sinz*nz)\n cos = (cos - nx*dot); sin = (sin - ny*dot); sinz = (sinz - nz*dot) \n x += cos; y += sin; z += sinz\n else:\n break\n \n elif mapr[int(x)][int(y)]: # check reflections\n if modr == 1:\n cx, cy = int(x), int(y)\n modr = modr*0.7\n if modr < 0.2:\n break\n if abs(z-maph[int(x)][int(y)]) < abs(sinz):\n sinz = -sinz\n elif maph[int(x+cos)][int(y-sin)] == maph[int(x)][int(y)]:\n cos = -cos\n else:\n sin = -sin\n else:\n break\n return x, y, z, modr, shot, mapv, refx, refy, refz, cx, cy, sin, cos, sinz, sh, shot\n\n@njit(cache=True)\ndef refine(x, y, z, sin, cos, sinz):\n x -= .9*cos; y -= .9*sin; z -= .9*sinz\n while ((x%1-0.5)**2 + (y%1-0.5)**2 + (z%1-0.5)**2 - 0.24 > 0):\n x += 0.1*cos; y += 0.1*sin; z += 0.1*sinz\n return x, y, z\n\n@njit(cache=True)\ndef shadow_ray(x, y, z, cos, sin, sinz, modr, shot, maps, enx, eny, posx, posy, posz, size, maph, limodr, refz):\n x += cos; y += sin; z += sinz # advance one step\n mapv = maph[int(x)][int(y)]\n if z < mapv and mapv < 1:# if already hit something apply dark shade\n if maps[int(x)][int(y)]:\n if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.24):\n modr = modr*0.39\n else:\n modr = modr*0.39\n\n for k in range(1000):\n if (mapv == 0) or not shot and ((z > mapv) or (z > 0.57 and mapv > 1)): ## LoDev DDA for optimization\n x, y, z = lodev(x, y, z, cos, sin, sinz, maph, size)\n x += cos; y += sin; z += sinz\n mapv = maph[int(x)][int(y)]\n if shot:\n if mapv > 5 or (sinz > 0 and z > refz) or (sinz < 0 and z < refz) or modr < limodr:\n break\n elif z >1 or modr < limodr:\n break\n if z < 0.58 and mapv > 1 and (mapv == 3 or mapv == 2 or mapv == 15 or mapv == 8):\n refx, refy, sh = enx, eny, .2\n if mapv%2 == 0:\n refx, refy, sh = posx, posy, .8\n if z> 0.45 and (x-refx)**2 + (y-refy)**2 + (z-0.5)**2 < 0.003 +abs(z-0.47)/30:\n modr = modr*0.67 # head\n if z < 0.45 and z > 0.28 and (x-refx)**2 + (y-refy)**2 < (z/10 - 0.02):\n modr = modr*0.67 # chest\n if z < 0.28 and (x-refx)**2 + (y-refy)**2 + (z-0.15)**2 < 0.023 :\n modr = modr*0.67 #roller\n \n if mapv > 0 and z <= mapv and mapv < 2: \n if maps[int(x)][int(y)]: # check spheres\n if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):\n modr = modr*0.9\n else: \n modr = modr*0.9\n return modr\n\n@njit(cache=True)\ndef get_color(x, y, z, modr, shot, mapv, refx, refy, refz, cx, cy, sin, cos, sinz, sh, mapt, maps, exitx, exity,\n mr, mg, mb, fr, fg, fb, lx, ly, lz, size):\n if z > 1: # ceiling\n norm = np.sqrt(cos**2 + sin**2 + sinz**2)\n rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16 \n deltaDistZ = (lz-z)/rayDirZ\n x += deltaDistZ*rayDirX; y += deltaDistZ*rayDirY; z = lz\n dtol = np.sqrt((x-lx)**2+(y-ly)**2)\n\n if dtol < 50: #light source\n c1, c2, c3, shot = 1, 1, 0.5, 1\n else:\n angle = np.rad2deg(np.arctan((y-ly)/(x-lx)))/np.random.uniform(12,15)\n sh = min((0.8+ abs(angle - int(angle))/5)/(dtol/1000), 1)\n if int(angle)%2 == 1:\n c1, c2, c3 = 0.82*(1-sh), 0.86*(1-sh/4), (1-sh/10)\n else:\n c1, c2, c3 = 0.8*(1-sh), 0.9*(1-sh/4), (1-sh/10)*0.9\n \n elif z < 0: # floor\n xx, sh = int(3*x%1*100) + int(3*y%1*100)*100, 0.3 + (x+y)/(3*size)\n c1, c2, c3, z = .85*(1-sh/2)*fg[xx], sh*fg[xx], 0.85*sh*fb[xx], 0\n if int(x) == exitx and int(y) == exity: #exit\n c3 = np.random.uniform(0.5, 1)\n \n elif mapv < 2: # walls\n c1, c2, c3 = mr[int(x)][int(y)], mg[int(x)][int(y)], mb[int(x)][int(y)]\n mapvt = mapt[int(x)][int(y)]\n if mapv - z <= abs(sinz): # round coordinates and pre shade\n z = mapv\n elif not maps[int(x)][int(y)]:\n if int(x-cos) != int(x):\n x = max(int(x-cos), int(x))\n modr = modr*0.80\n else:\n y = max(int(y-sin), int(y))\n modr = modr*0.9\n \n if mapvt > 1: # textured walls\n if z == mapv:\n xx = int(3*x%1*100) + 100*int(3*y%1*100)\n elif x%1 == 0:\n xx = int(3*z%1*100) + 100*int(3*y%1*100)\n else:\n xx = int(3*z%1*100) + 100*int(3*x%1*100)\n xx = fr[xx]\n c1, c2, c3 = c1*xx, c2*xx, c3*xx\n if mapvt%2 == 1: # gradient walls\n c1, c2, c3 = c1*(2+z)/3, c2*(3-z)/3, c3*(2+z**2)/3\n \n else: # agents\n if shot: # fireball\n sh = ((x-refx)**2 + (y-refy)**2 + (z-refz)**2)/0.012\n c1, c2, c3 = 1, 0.6*sh+0.2 , 0.2*sh+0.1 \n elif z> 0.45: # Head\n c1, c2, c3 = (1-z)*(1-sh), (1-z)*sh, z*sh \n elif z > 0.28: # Chest\n c1, c2, c3 = (z-0.28), (z-0.28)*(1-sh), (z-0.28)*sh \n else: # Roller\n c1, c2, c3 = refx%1*z*(1-sh), refy%1*0.2*sh, refy%1*z*sh\n\n return c1, c2, c3, modr, x, y, z, shot\n\n \n@njit(cache=True)\ndef super_fast(width, height, mod, move, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz,\n maph, exitx, exity, mapr, mapt, maps, pr, pg, pb, enx, eny, sx, sy, sx2, sy2,\n size, checker, count, fb, fg, fr, sz=0, sz2=0):\n \n inv, inv2, garbage, idx = (count%2), -(int(count/2)%2), not(count), 0\n if checker == 0:\n garbage = 0\n for j in range(height): #vertical loop \n rot_j = rot_v + (1+move**1.5)*np.deg2rad(24 - j/mod)\n sinzo = (0.04/mod)*np.sin(rot_j) \n coszo = (0.04/mod)*np.sqrt(abs(np.cos(rot_j))) \n for i in range(width): #horizontal vision loop\n if (checker == 1 or garbage) and idx%2 == 1:\n pr[idx], pg[idx], pb[idx] = pr[idx-1], pg[idx-1], pb[idx-1]\n else:\n if checker == 3: # radial\n rad = np.sqrt((i-width/2)**2 + (j-height/2)**2)\n elif checker == 4: # square\n rad = max(abs(i-width/2)*1, abs(j-height/2))\n \n if (checker < 2 or garbage or # standard and doubled pixels, first frame after res adjust\n (checker == 2 and ((inv and i%2 == j%2) or (not(inv) and i%2 != j%2))) or # checkerboard\n (checker > 2 and ((rad < height/2.9 and ((inv and i%2 == j%2) or (not(inv) and i%2 != j%2))) or # radial and square\n (rad > height/2.9 and rad < height/1.9 and (((i+2*inv)%3 == 0 and (j+2*inv2)%3 == 0))) or\n (rad > height/1.9 and (((i+3*inv)%5 == 0 and (j+3*inv2)%5 == 0)))))):\n \n rot_i = rot + (1+move**1.5)*np.deg2rad(i/mod - 30)\n si = 1\n if checker > 2:\n si = 2\n if rad > height/1.9:\n si = 3\n\n sin, cos, sinz = coszo*np.sin(rot_i), coszo*np.cos(rot_i), sinzo\n modr, cx, cy, c1r, c2r, c3r, shot, mapv = 1, 1, 1, 1, 1, 1, 0, 0\n \n x, y, z, modr, shot, mapv, refx, refy, refz, cx, cy, sin, cos, sinz, sh, shot = ray_caster(posx, posy, posz, sin, cos, sinz, lx, ly, lz, maph, mapr, maps,\n enx, eny, sx, sy, sx2, sy2, size, sz, sz2)\n\n c1, c2, c3, modr, x, y, z, shot = get_color(x, y, z, modr, shot, mapv, refx, refy, refz, cx, cy, sin, cos, sinz, sh, mapt, maps, exitx, exity,\n mr, mg, mb, fr, fg, fb, lx, ly, lz, size)\n\n if modr <= 0.7 and not shot: # tinted mirrors\n c1r, c2r, c3r = mr[cx][cy], mg[cx][cy], mg[cx][cy]\n\n if not shot and z < 1: # shadows\n limodr, refx, refy, refz = 0.4, lx, ly, lz\n dtp = np.sqrt((x-posx)**2+(y-posy)**2+(z-posz)**2)\n if dtp > 7:\n modr = modr/np.log((dtp-6)/4+np.e)\n if sx != -1 or sx2 != -1: # fireball\n refx, refy, refz, shot, c3 = sx2, sy2, sz2, 1, c3*0.9\n if sx != -1:\n refx, refy, refz = sx, sy, sz\n dtol = np.sqrt((x-refx)**2+(y-refy)**2+(z-refz)**2)\n cos, sin, sinz = .01*(refx-x)/dtol, .01*(refy-y)/dtol, .01*(refz-z)/dtol\n modr = shadow_ray(x, y, z, cos, sin, sinz, modr, shot, maps, enx, eny, posx, posy, posz, size, maph, limodr, refz)\n \n c1, c2, c3 = modr*np.sqrt(c1*c1r), modr*np.sqrt(c2*c2r), modr*np.sqrt(c3*c3r) \n\n\n \n if checker == 0 or garbage:\n pr[idx], pg[idx], pb[idx] = c1, c2, c3\n \n elif checker > 2 and rad > height/2.9 -1 :\n imin, imax, jmin, jmax = max(i-si, 0), min(i+si, width-1), max(j-si, 0), min(j+si, height-1)\n for jj in range(jmin, jmax):\n for ii in range(imin, imax):\n idx2 = ii + jj*width\n pr[idx2], pg[idx2], pb[idx2] = (3*c1 + pr[idx2])/4, (3*c2 + pg[idx2])/4, (3*c3 + pb[idx2])/4\n \n else:\n pr[idx], pg[idx], pb[idx] = (3*c1 + pr[idx])/4, (3*c2 + pg[idx])/4, (3*c3 + pb[idx])/4 \n idx += 1\n\n if checker != 0: # fill gaps and smoothing\n idx = 0\n for j in range(height): #vertical loop \n for i in range(width): #horizontal vision loop\n if (i > 0 and i < width -1 and j > 0 and j < height -1 and ((inv and i%2 != j%2) or (not(inv) and i%2 == j%2))):\n if abs(pr[idx-1] - pr[idx+1]) < 0.05 and abs(pg[idx-1] - pg[idx+1]) < 0.05 and abs(pb[idx-1] - pb[idx+1]) < 0.05 :\n pr[idx], pg[idx], pb[idx] = (pr[idx-1] + pr[idx+1])/2, (pg[idx-1] + pg[idx+1])/2, (pb[idx-1] + pb[idx+1])/2\n elif abs(pr[idx-width] - pr[idx+width]) < 0.05 and abs(pg[idx-width] - pg[idx+width]) < 0.05 and abs(pb[idx-width] - pb[idx+width]) < 0.05 :\n pr[idx], pg[idx], pb[idx] = (pr[idx-width] + pr[idx+width])/2, (pg[idx-width] + pg[idx+width])/2, (pb[idx-width] + pb[idx+width])/2\n else:\n pr[idx] = (pr[idx] + pr[idx-1] + pr[idx-width] + pr[idx+width] + pr[idx+1])/5\n pg[idx] = (pg[idx] + pg[idx-1] + pg[idx-width] + pg[idx+width] + pg[idx+1])/5\n pb[idx] = (pb[idx] + pb[idx-1] + pb[idx-width] + pb[idx+width] + pb[idx+1])/5\n idx += 1\n \n return pr, pg, pb\n\n@njit(cache=True)\ndef agents(enx, eny, maph, posx, posy, rot, rot_v, et, shoot, sx, sy, sz, sdir,\n sdirz, shoot2, sx2, sy2, sz2, sdir2, sdirz2, mplayer,\n seenx, seeny, lock, size, enhealth, health, score):\n\n mplayer[int(posx)][int(posy)] = 2 # player = 2, npc = 3, npc fireball >=6, player fireball >=12\n if (maph[int(posx+.1)][int(posy+.1)] == 0 and maph[int(posx-.1)][int(posy-.1)] == 0 and\n maph[int(posx-.1)][int(posy+.1)] == 0 and maph[int(posx+.1)][int(posy-.1)] == 0):\n mplayer[int(posx+0.1)][int(posy+0.1)], mplayer[int(posx+0.1)][int(posy-0.1)] = 2, 2\n mplayer[int(posx-0.1)][int(posy+0.1)], mplayer[int(posx-0.1)][int(posy-0.1)] = 2, 2\n \n # teleport or respawn npc\n if (enhealth == 0 and np.random.uniform(0,1) > 0.995) or (enhealth > 0 and (enx-posx)**2 + (eny-posy)**2 > 300) :\n x, y = np.random.normal(posx, 5), np.random.normal(posy, 5)\n dtp = (x-posx)**2 + (y-posy)**2\n if x > 0 and x < size-1 and y > 0 and y < size-1:\n if maph[int(x)][int(y)] == 0 and dtp > 49 :\n if enhealth == 0:\n enx, eny, seenx, seeny, lock, enhealth = x, y, x, y, 0, 10\n else:\n enx, eny, seenx, seeny, lock = x, y, x, y, 0\n if enhealth > 0: # look for player\n if not lock or np.random.uniform(0,1) > 0.99:\n dtp = np.sqrt((enx-posx)**2 + (eny-posy)**2)\n cos, sin = (posx-enx)/dtp, (posy-eny)/dtp\n x, y = enx, eny\n for i in range(300):\n x += 0.04*cos; y += 0.04*sin\n if (maph[int(x+.05)][int(y+.05)] != 0 or maph[int(x-.05)][int(y-.05)] != 0 or\n maph[int(x-.05)][int(y+.05)] != 0 or maph[int(x+.05)][int(y-.05)] != 0):\n lock = 0\n break\n if(int(x) == int(posx) and int(y) == int(posy)): # lock on player position\n seenx, seeny, lock = posx, posy, 1\n break\n\n if int(enx) == int(seenx) and int(eny) == int(seeny): # reached target\n if not lock:\n if shoot or np.random.uniform(0,1) > 0.7: #if the player is shooting, go towards him\n seenx, seeny = np.random.uniform(enx, posx), np.random.uniform(eny, posy)\n else: # just keep mooving\n seenx, seeny = np.random.normal(enx, 2), np.random.normal(eny, 2) \n else: # go near the player if locked\n seenx, seeny = np.random.normal(posx, 2), np.random.normal(posy, 2)\n \n dtp = np.sqrt((enx-seenx)**2 + (eny-seeny)**2) \n cos, sin = (seenx-enx)/dtp, (seeny-eny)/dtp \n x, y = enx + et*cos, eny + et*sin # set new npc position\n if maph[int(x)][int(y)] == 0: # check if position is valid\n enx, eny = x, y\n else: # if not, try to move sideways\n if np.random.uniform(0,1) > 0.5:\n x, y = enx - et*sin, eny + et*cos\n else:\n x, y = enx + et*sin, eny - et*cos\n if maph[int(x)][int(y)] == 0: # try again\n enx, eny = x, y\n elif np.random.uniform(0,1) > 0.5: # update target\n if lock and np.random.uniform(0,1) > 0.5:\n seenx, seeny = np.random.normal(posx, 2), np.random.normal(posy, 2)\n if np.random.uniform(0,1) > 0.99: # release lock if stuck\n lock = 0\n else:\n seenx, seeny = enx+np.random.normal(0,3), eny+np.random.normal(0,3)\n \n mplayer[int(enx)][int(eny)] = 3 # mark npc position and adjacent positions \n if (maph[int(enx+.1)][int(eny+.1)] == 0 and maph[int(enx-.1)][int(eny-.1)] == 0 and\n maph[int(enx-.1)][int(eny+.1)] == 0 and maph[int(enx+.1)][int(eny-.1)] == 0):\n mplayer[int(enx+0.1)][int(eny+0.1)], mplayer[int(enx+0.1)][int(eny-0.1)] = 3, 3\n mplayer[int(enx-0.1)][int(eny+0.1)], mplayer[int(enx-0.1)][int(eny-0.1)] = 3, 3\n \n if lock and not shoot2: # npc fireball initiate\n shoot2, sdirz2 = 1, np.sin(np.random.uniform(0,.2))\n sdir2 = np.arctan((posy-eny)/(posx-enx)) + np.random.uniform(-.1,.1)\n if abs(enx+np.cos(sdir2)-posx) > abs(enx-posx):\n sdir2 = sdir2 - np.pi\n \n if shoot2 and sy2 != 0: # npc fireball\n if sx2 == -1:\n sx2, sy2, sz2 = enx + .5*np.cos(sdir2), eny + .5*np.sin(sdir2), 0.35 + .5*sdirz2\n sx2, sy2, sz2 = sx2 + 5*et*np.cos(sdir2), sy2 + 5*et*np.sin(sdir2), sz2 + 5*et*sdirz2\n sdirz2 = sdirz2 - et/5\n if sx2 > 0 and sx2 < size-1 and sy2 > 0 and sy2 < size-1 and sz2 > 0 and sz2 < 1:\n if (maph[int(sx2+.05)][int(sy2+.05)] != 0 or maph[int(sx2-.05)][int(sy2-.05)] != 0 or\n maph[int(sx2-.05)][int(sy2+.05)] != 0 or maph[int(sx2+.05)][int(sy2-.05)] != 0):\n shoot2, sx2, sy2 = 0, -1, -1\n else:\n if (sx2 - posx)**2 + (sy2 - posy)**2 < 0.01:\n shoot2, sx2, sy2 = 0, -1, 0\n health -= min(1 + score/4, 5)\n else:\n mplayer[int(sx2)][int(sy2)] += 6\n else:\n shoot2, sx2, sy2 = 0, -1, 0\n \n if shoot: # player fireball\n if sx == -1:\n sdir, sdirz = rot+np.random.uniform(-.05,.05), np.sin(min(rot_v , 0)+np.random.uniform(.1,.2))\n sx, sy, sz = posx + .5*np.cos(sdir), posy + .5*np.sin(sdir), 0.35 + .5*sdirz\n sx, sy, sz = sx + 5*et*np.cos(sdir), sy + 5*et*np.sin(sdir), sz + 5*et*sdirz\n sdirz = sdirz - et/5\n if (sx > 0 and sy < size-1 and sy > 0 and sy < size-1 and sz > 0 and sz < 1 and\n maph[int(sx+.05)][int(sy+.05)] == 0 and maph[int(sx-.05)][int(sy-.05)] == 0 and\n maph[int(sx-.05)][int(sy+.05)] == 0 and maph[int(sx+.05)][int(sy-.05)] == 0):\n if enhealth > 0 and (sx - enx)**2 + (sy - eny)**2 < 0.02:\n shoot, sx, sy = 0, -1, -1\n enhealth -= max(5 -score/4, 1)\n if enhealth <= 0:\n enx, eny, seenx, seeny, enhealth = 0, 0, 0, 0, 0\n else:\n mplayer[int(sx)][int(sy)] += 12\n else:\n shoot, sx, sy = 0, -1, -1\n \n mplayer = maph + mplayer\n return(enx, eny, mplayer, et, shoot, sx, sy, sz, sdir, sdirz, shoot2,\n sx2, sy2, sz2, sdir2, sdirz2, seenx, seeny, lock, enhealth, health)\n\ndef adjust_resol(width):\n height = int(0.6*width)\n mod = width/64\n rr, gg, bb = np.ones(width * height)/2, np.ones(width * height)/2, np.ones(width * height)/2\n return width, height, mod, rr, gg, bb, 0\n\ndef drawing(rr, gg, bb, height, width, pause, endmsg, won, health, enhealth, minimap, score, fullscreen, nosplash=True):\n global font, font2, screen, surfbg\n\n surfbg.fill(pg.Color(\"darkgrey\"))\n pg.draw.rect(surfbg, (200-int(health*20), 50+int(health*20), 50+int(health*10)),(1205,int(700-62*health),30,int(63*health)))\n pg.draw.rect(surfbg, (enhealth*25, 200-enhealth*10, 50),(1245,700-62*enhealth,30,63*enhealth))\n \n pixels = np.dstack((rr,gg,bb))\n pixels = np.reshape(pixels, (height,width,3))\n surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))\n surf = pg.transform.scale(surf, (1200, 720))\n if not nosplash or pause:\n px, py = 1100, 360\n if nosplash:\n px, py = pg.mouse.get_pos()\n for i in range(3):\n pg.draw.circle(surf, (50, 70+i*20, 160+i*40), [px+i*10,py-i*10], 50-i*15)\n pg.draw.circle(surf, (60+i*10, 100+i*20, 100+i*10), [px+i*10,py+280-i*1], 90-i*15)\n pg.draw.polygon(surf, (150+i*30, 34+i*10, 60+i*10), [[px-100+i*20,py+40+i*15],[px+100-i*20,py+40+i*15],[px+50-i*15,py+205-i*15],[px-50+i*15,py+205-i*15]])\n screen.blit(surfbg, (0, 0))\n screen.blit(surf, (0, 0))\n \n if pause: \n screen.blit(font2.render(\" PyTracing Maze by FinFET \", 0, pg.Color(\"red\")),(45,45))\n screen.blit(font2.render(\" PyTracing Maze by FinFET \", 0, pg.Color(\"blue\")),(55,55))\n screen.blit(font2.render(\" PyTracing Maze by FinFET \", 0, pg.Color(\"white\")),(50,50))\n screen.blit(font2.render(endmsg, 0, pg.Color(\"salmon\"), (100, 34, 60)),(50,420))\n if nosplash:\n screen.blit(font2.render(\" Press N for a new game \", 0, pg.Color(\"grey\"), (45, 34, 100)),(50,560))\n screen.blit(font2.render(\" Press ESC to leave \", 0, pg.Color(\"grey\"), (13, 34, 139)),(50,630))\n if won == 1:\n screen.blit(font2.render(\" Your current score is \"+str(score) + ' ', 0, pg.Color(\"grey\"), (80, 34, 80)),(50,490))\n if won == 0:\n screen.blit(font2.render(\" Press P or C to continue \", 0, pg.Color(\"grey\"), (80, 34, 80)),(50,490))\n else:\n size = len(minimap)\n surfmap = pg.surfarray.make_surface(np.flip(minimap).astype('uint8'))\n surfmap = pg.transform.scale(surfmap, (size*4, size*4))\n screen.blit(surfmap,(1280-size*4 - 85, 5), special_flags=pg.BLEND_ADD)\n if fullscreen:\n fps = font.render(endmsg, 0, pg.Color(\"coral\"))\n screen.blit(fps,(100,1))\n screen.blit(font2.render(str(score), 0, pg.Color(\"white\")),(1210, 10))\n \n pg.display.update()\n\ndef animate(width, height, mod, move, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, #simple up and down animation\n maph, exitx, exity, mapr, mapt, maps, pr, pg, pb, enx, eny, sx, sy, sx2, sy2,\n size, checker, count, fb, fg, fr, pause, endmsg, won, health, minimap, score, ani, fps=60):\n ani = ani*60/fps\n for i in range(int(fps)):\n rr, gg, bb = super_fast(width, height, mod, move, posx, posy, posz+ani*i, rot, rot_v, mr, mg, mb, lx, ly, lz,\n maph, exitx, exity, mapr, mapt, maps, pr, pg, pb, enx, eny, sx, sy, sx2, sy2,\n size, checker, count, fb, fg, fr)\n count += 1\n \n drawing(rr, gg, bb, height, width, pause, endmsg, won, health, 0, minimap, score, 0)\n \ndef sfx(): #load sounds and textures\n try:\n ambient = pg.mixer.Sound('soundfx/HauntSilentPartner.mp3') \n runfx = pg.mixer.Sound('soundfx/run.mp3')\n shotfx = pg.mixer.Sound('soundfx/slap.mp3')\n killfx = pg.mixer.Sound('soundfx/shutdown.mp3')\n respawnfx = pg.mixer.Sound('soundfx/respawn.mp3')\n successfx = pg.mixer.Sound('soundfx/success.mp3')\n failfx = pg.mixer.Sound('soundfx/fail.mp3')\n except:\n print(\"Sounds missing! Generating replacements...\")\n ambient = generate_sounds(75, 10000, 300)\n runfx = generate_sounds(200, 800, 150)\n shotfx = generate_sounds(200, 200, 250)\n killfx = generate_sounds(1620, 241, 230)\n respawnfx = generate_sounds(230, 350, 80)\n successfx = generate_sounds(300, 900, 100)\n failfx = generate_sounds(700, 200, 350)\n try: \n floor = pg.surfarray.array3d(pg.image.load('soundfx/textures.jpg'))\n fr, fg, fb = np.dsplit(floor,floor.shape[-1])\n fr, fg, fb = fr.flatten()/255, fg.flatten()/255, fb.flatten()/255\n except:\n print(\"Textures missing! Generating replacements...\")\n fr, fg, fb = generate_textures()\n \n\n return ambient, runfx, shotfx, killfx, respawnfx, successfx, failfx, fr, fg, fb\n\ndef generate_sounds(freq = 60, var = 500, n = 10):\n sound1, sound2, sound3, freq0, dir2 = [], [], [], freq, 1\n for i in range(n):\n freq = freq+20*dir2\n if freq > freq0*3 or freq < freq0/3+10:\n dir2 = -1*dir2\n freq = freq+20*dir2\n var2 = np.random.randint(int(var/2),var*2)\n samples1 = synth(np.random.randint(int(var),var*3), np.random.randint(freq,2*freq))\n samples2 = synth(np.random.randint(int(var/2),var*2), np.random.randint(int(freq/2),freq))\n samples3 = synth(np.random.randint(int(var/4),int(var/2)), np.random.randint(int(freq/3),3*freq))\n sound1 = sound1 + list(samples1/3)\n sound2 = sound2 + list(samples2/3)\n sound3 = sound3 + list(samples3/3)\n\n lens = min(len(sound1), len(sound2), len(sound3))\n sound = np.asarray(sound1[:lens]) + np.asarray(sound2[:lens]) + np.asarray(sound3[:lens])\n sound = np.asarray([sound,sound]).T.astype(np.int16)\n sound = pg.sndarray.make_sound(sound.copy())\n return sound\n\ndef synth(frames, freq):\n def frame(i):\n return 0.2 * 32767 * np.sin(2.0 * np.pi * freq * i / 44100)\n arr = np.array([frame(x) for x in range(0, frames)]).astype(np.int16)\n return arr\n\ndef generate_textures():\n fr, fg, fb = [], [], []\n for i in range(100):\n for j in range(100):\n ref1, ref2 = .2, 0.2\n if i < 50 and j < 50 or i > 50 and j > 50:\n ref1 = 1\n if (i-50)**2 + (j-50)**2 < 2000:\n ref2 = 1\n fr.append(3.5-abs(np.sin(i/5)+np.cos(j/5))*ref2*np.random.uniform(i/100,j/100)+0.3+ref1)\n fg.append(ref1+np.random.uniform(i/100,ref2*ref1)/3+0.1)\n fb.append(ref1+np.random.uniform(j/100,ref2*ref1)/3+0.1)\n fr, fg, fb = np.asarray(fr)/max(fr), np.asarray(fg)/3, np.asarray(fb)/3\n return fr, fg, fb\n\nif __name__ == '__main__':\n pg.init()\n pg.display.set_caption(\"Welcome to Pytracing maze! Stutters may occur on first run while Numba is compiling\")\n font = pg.font.SysFont(\"Arial\", 18)\n font2 = pg.font.SysFont(\"Impact\", 48)\n screen = pg.display.set_mode((1280, 720))\n surfbg = pg.Surface((1280,720))\n main()\n","sub_path":"pytracing render choices.py","file_name":"pytracing render choices.py","file_ext":"py","file_size_in_byte":43519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"93587220","text":"#\n# 프로그램 이름: convolution_basics.py\n# 작성자: Bong Ju Kang\n# 설명: 합성곱 신경망 모델 이해하기\n#\n\n# 필요한 패키지\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport requests, zipfile, io\nfrom keras.models import load_model\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nfrom sklearn.model_selection import GridSearchCV, KFold\n\nfrom sklearn.datasets import load_digits\nfrom sklearn.preprocessing import StandardScaler\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, InputLayer, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization\nfrom keras.optimizers import Adam, Optimizer\nfrom keras import backend as K\nfrom keras.utils import to_categorical\nfrom keras.regularizers import l2\nfrom keras.wrappers.scikit_learn import KerasClassifier\n\nfrom scipy.signal import correlate, convolve2d\nfrom numpy.random import RandomState\n\n# 3차원 그래프\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# 초기 설정\npng_path = \"./data/png\"\nos.makedirs(png_path, exist_ok=True)\n\n# 한글출력\nplt.rcParams['font.family'] = 'Malgun Gothic'\nplt.rcParams['axes.unicode_minus'] = False\n\n# GPU 메모리를 독점적으로 사용하지 말고 공유하여 사용하도록 설정\nconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)\nconfig.gpu_options.allow_growth = True\nK.set_session(tf.Session(config=config))\n\n#\n# 합성곱 신경망 이해를 위한 기본 연산 (교재 수록)\n#\n\n#\n# 교차 상관 연산과 합성곱 연산\n#\n# 데이터 구성\nimage = RandomState(0).choice(np.arange(0, 4), size=25).reshape(5, 5)\n# array([[0, 3, 1, 0, 3],\n# [3, 3, 3, 1, 3],\n# [1, 2, 0, 3, 2],\n# [0, 0, 0, 2, 1],\n# [2, 3, 3, 2, 0]])\n\nfilter = RandomState(0).choice(np.arange(3), size=9).reshape(-1, 3)\n# array([[0, 1, 0],\n# [1, 1, 2],\n# [0, 2, 0]])\n\n# 교차 상관\nccor = correlate(image, filter)\n# array([[ 0, 0, 6, 2, 0, 6, 0],\n# [ 0, 12, 11, 10, 9, 9, 3],\n# [ 6, 11, 19, 9, 16, 11, 3],\n# [ 2, 8, 6, 11, 12, 10, 2],\n# [ 0, 5, 8, 10, 11, 5, 1],\n# [ 4, 8, 11, 10, 7, 3, 0],\n# [ 0, 2, 3, 3, 2, 0, 0]])\n\nccor_valid = correlate(image, filter, 'valid')\n# array([[19, 9, 16],\n# [ 6, 11, 12],\n# [ 8, 10, 11]])\n\nstride = 1\nfilter_size = 3\n\n# 1번째 값 계산\nnp.sum(image[:3, :3] * filter)\n# 19\n\n# 2번째 값 계산\nnp.sum(image[:filter_size, stride:filter_size + stride] * filter)\n# 9\n\n# correlate 계산 방식은 원래 이미지 데이터에 0 값을 행과 열에 2개씩 padding 한 후 계산한 것임\n# 아래는 실제로 padding 후 계산한 결과임\npadding = filter_size - 1\nadj_shape = np.array(image.shape) + padding * 2\nimage_with_padding = np.zeros(shape=adj_shape)\nimage_with_padding[padding:image.shape[0] + padding, padding:image.shape[1] + padding] = image\nedge = np.zeros(shape=(image_with_padding.shape[0] - padding, image_with_padding.shape[1] - padding))\nfor i in np.arange(image_with_padding.shape[0] - padding):\n for j in np.arange(image_with_padding.shape[1] - padding):\n edge[i, j] = np.sum(image_with_padding[i:i + padding + 1, j:j + padding + 1] * filter)\n# array([[ 0., 0., 6., 2., 0., 6., 0.],\n# [ 0., 12., 11., 10., 9., 9., 3.],\n# [ 6., 11., 19., 9., 16., 11., 3.],\n# [ 2., 8., 6., 11., 12., 10., 2.],\n# [ 0., 5., 8., 10., 11., 5., 1.],\n# [ 4., 8., 11., 10., 7., 3., 0.],\n# [ 0., 2., 3., 3., 2., 0., 0.]])\n\n# 합성곱 연산\nconvolve2d(image, filter)\n# array([[ 0, 0, 3, 1, 0, 3, 0],\n# [ 0, 6, 7, 10, 6, 6, 6],\n# [ 3, 7, 20, 12, 13, 13, 6],\n# [ 1, 9, 10, 13, 9, 15, 4],\n# [ 0, 4, 7, 5, 11, 9, 2],\n# [ 2, 5, 10, 11, 12, 6, 0],\n# [ 0, 4, 6, 6, 4, 0, 0]])\n\n# 행으로 한번, 열로 한번: 0축으로 한번, 1축으로 한번\nfilter_180 = np.rot90(filter, k=2)\n# filter180 = np.rot90(np.rot90(filter))\nfilter_row_90 = np.rot90(filter)\n\nnp.flip(np.flip(filter, axis=0), axis=1)\n\nconvolve2d(image, filter_180)\nconvolve2d(filter_180, image)\n# array([[ 0, 0, 6, 2, 0, 6, 0],\n# [ 0, 12, 11, 10, 9, 9, 3],\n# [ 6, 11, 19, 9, 16, 11, 3],\n# [ 2, 8, 6, 11, 12, 10, 2],\n# [ 0, 5, 8, 10, 11, 5, 1],\n# [ 4, 8, 11, 10, 7, 3, 0],\n# [ 0, 2, 3, 3, 2, 0, 0]])\n\nfpass_z = convolve2d(image, filter_180, 'same')\nbias = 0.1\nfpass_z = fpass_z + bias\nact_z = np.where(fpass_z >= 0, fpass_z, 0)\n# array([[12, 11, 10, 9, 9],\n# [11, 19, 9, 16, 11],\n# [ 8, 6, 11, 12, 10],\n# [ 5, 8, 10, 11, 5],\n# [ 8, 11, 10, 7, 3]])\n\ni = padding + 1\nj = padding + 1\nnp.sum(image_with_padding[i:i + padding + 1, j:j + padding + 1] * filter_180.T)\n\ntest_image = image[:3, :3]\nnp.sum(test_image * filter_180[::-1, ::-1])\nnp.rot90(filter_180, 2)\n\n#\n# 모수의 추정: 전진패스와 후진패스의 예\n#\nimage_matrix = RandomState(2).choice(np.arange(0, 3), size=9).reshape(3, 3)\n# array([[0, 3, 1, 0, 3],\n# [3, 3, 3, 1, 3],\n# [1, 2, 0, 3, 2],\n# [0, 0, 0, 2, 1],\n# [2, 3, 3, 2, 0]])\n\n\nfilter = RandomState(0).choice(np.arange(3), size=4).reshape(-1, 2)\n# array([[0, 1, 0],\n# [1, 1, 2],\n# [0, 2, 0]])\n\n# 전진 패스\nfilter_flip = np.rot90(filter, k=2)\nfpass_z = convolve2d(image_matrix, filter_flip, 'valid') + 0.1\nact_z = np.where(fpass_z >= 0, fpass_z, 0)\n\n# 후진 패스\ndelta_matrix = RandomState(0).randn(2, 2)\ndelta_matrix_flip = np.rot90(delta_matrix, k=2)\nweight_grad = convolve2d(image_matrix, delta_matrix_flip, 'valid')\n\n#\n# 예제: [DIGITS] 데이터 적용 (다층 신경망)\n#\n\n# 데이터 구성\nbunch = load_digits()\ndir(bunch)\n# ['DESCR', 'data', 'images', 'target', 'target_names']\n\n# Data Set Characteristics:\n# :Number of Instances: 5620\n# :Number of Attributes: 64\n# :Attribute Information: 8x8 image of integer pixels in the range 0..16.\n# :Missing Attribute Values: None\n# :Creator: E. Alpaydin (alpaydin '@' boun.edu.tr)\n# :Date: July; 1998\n#\n# This is a copy of the test set of the UCI ML hand-written digits datasets\n# http://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits\n\n# 이미지 파일로 데이터 구성 형식 이해하기\nplt.figure(figsize=(5, 5))\nplt.imshow(bunch['images'][0], cmap='binary')\nplt.grid()\nplt.savefig(png_path + '/convnet_data_digits_image.png')\nplt.show()\n\n# 입력 특징 구성\nX = bunch['data']\nX.shape\n# (1797, 64)\n\n# 목표 변수\ny = bunch['target']\ny.shape\n# (1797,)\n\n# 데이터 전 처리 및 분할\nmax_value = np.max(X)\nX_scaled = X / max_value\ny_onehot = to_categorical(y)\ny_onehot.shape\n# (1797, 10)\n\nX_train, X_test, y_train, y_test = train_test_split(X_scaled, y_onehot, test_size=0.3, random_state=123)\n\n#\n# 모델 구성\n#\n# 입력 차원 정의\ninput_dims = X_train.shape[1]\n\n# 아키텍처 정의\nmodel = Sequential()\nmodel.add(InputLayer(input_shape=(input_dims,)))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(10, activation='softmax'))\nmodel.summary()\n\n# 모델 정의를 위한 추가 변수 정의\nmodel.compile(optimizer=Adam(lr=0.01), loss='categorical_crossentropy', metrics=['accuracy'])\n\n# 모델 적합\nhist = model.fit(X_train, y_train, validation_split=0.1, batch_size=10, epochs=100)\n\n# 모델 평가\nscores = model.evaluate(X_test, y_test, batch_size=10)\nprint('손실함수값=', scores[0], '\\n정확도=', scores[1])\n# 손실함수값= 0.12081231161361058\n# 정확도= 0.9833333315672698\n\n#\n# 초 모수 결정 (은닉 층 개수와 학습률 조정)\n#\n\n# 병렬 처리를 위하여 모델은 외부에 저장\n# 같은 프로젝트 디렉토리에 해당 파일을 저장한 경우에\n# 1) sys.path를 실행하여 해당 파일의 위치가 경로에 있는 지 확인\n# 2) 없으면, 가령, 프로젝트가 여러개인 경우에는\n# 파이참의 File > Settings > Project > Project Dependenceis... 에 가서 각 프로젝트의 체크 박스를 활성화\n\n# 해당 파일 호출\ntry:\n import convolution_basics_defs as defs\nexcept:\n import os, sys\n\n curr_path = os.path.abspath(\"./ch20-합성곱 신경망\")\n if curr_path not in sys.path:\n sys.path.append(curr_path)\n import convolution_basics_defs as defs\n\n# 모델 등록\nmodel = KerasClassifier(build_fn=defs.grid_base_model, verbose=0, epochs=200)\n\nparam_grid = dict(learning_rate=[0.01, 0.1],\n batch_size=[10, 50])\n\n# 초 모수 값의 조합에 의한 모델 적합\ngrid_search = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)\ngrid_search.fit(X_train, y_train)\n\n# 선택된 초 모수 값\nprint(grid_search.best_params_)\n# {'batch_size': 10, 'learning_rate': 0.01}\n\n#\n# 예제: [DIGITS] 데이터 적용 (합성곱 신경망)\n#\n\n# 데이터 구성\nbunch = load_digits()\n\n# 입력 특징\nX = bunch['data']\nX.shape\n# (1797, 64)\n\n# 목표 변수\ny = bunch['target']\ny.shape\n# (1797,)\n\n# 데이터 전 처리 및 분할\nmax_value = np.max(X)\nX_scaled = X / max_value\ny_onehot = to_categorical(y)\n\n# 입력 차원 정의: 이미지 형식으로 적용 (높이, 폭, 채널(깊이))\nX_conv = X_scaled.reshape(-1, 8, 8, 1)\ny_conv = y_onehot\n\n# 데이터 분할\nX_conv_train, X_conv_test, y_conv_train, y_conv_test = train_test_split(X_conv, y_conv,\n test_size=0.3, random_state=1234)\n\n#\n# 모델 구성\n#\n# 입력 차원 지정\ninput_shape = X_conv_train.shape[1:]\n\n# 아키텍처 정의\nmodel = Sequential()\nmodel.add(InputLayer(input_shape=input_shape))\nmodel.add(Conv2D(64, kernel_size=3, padding='same', activation='relu'))\nmodel.add(MaxPooling2D())\nmodel.add(Flatten())\n# model.add(BatchNormalization())\n# model.add(Dropout(0.1))\nmodel.add(Dense(10, activation='softmax'))\nmodel.summary()\n\n# 모델 정의를 위한 추가 변수 정의\nmodel.compile(optimizer=Adam(lr=0.01), loss='categorical_crossentropy', metrics=['accuracy'])\n\n# 모델 적합\nhist = model.fit(X_conv_train, y_conv_train, validation_split=0.1, batch_size=10, epochs=100, verbose=2)\n\n# model.save(data_path+'/convmodel.h5')\n# saved_model = load_model(data_path+'/convmodel.h5')\n\nhist.history.keys()\nplt.plot(hist.history['val_loss'])\n\nscores = model.evaluate(X_conv_test, y_conv_test, batch_size=10)\n# scores = saved_model.evaluate(X_conv_test, y_conv_test, batch_size=10)\nprint('손실함수값=', scores[0], '\\n정확도=', scores[1])\n# 손실함수값= 0.04426065459343465\n# 정확도= 0.9870370339464258\n","sub_path":"reference/understanding-ml-code/ch20-합성곱 신경망/convolution_basics.py","file_name":"convolution_basics.py","file_ext":"py","file_size_in_byte":10602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"520700025","text":"\"\"\"A module for breaking down a glossary into individual entries.\"\"\"\nimport json\nimport os\nimport subprocess\nimport sys\nimport warnings\n\n\n# By default, we treat most glossary data as strings, but sometimes we want the\n# REST API to return a different type (for instance, counts should be integers).\n# The below sequences refer to fields in two ways: a field name just by itself\n# indicates that the field should be indexed as a string; alternatively, if the\n# name is accompanied by a type [e.g. (\"icount\", int)], that means that its\n# values should be converted to the given type.\nbase_fields = [\"project\", \"lang\"] # fields to copy into each entry\ndirect_fields = [\"gw\", \"headword\", \"cf\", (\"icount\", int), \"id\"]\nindirect_fields = {\n \"senses\": [\"mng\"],\n \"forms\": [\"n\"],\n \"norms\": [\"n\"],\n \"periods\": [\"p\"]\n}\n\n\ndef name_and_type(field_spec):\n \"\"\"Break down a field spec into field name and type (string, by default).\"\"\"\n # NB We cannot just try unwrapping the spec (and assume failure means there\n # is no type), since strings can also be unwrapped, so the spec \"gw\" would\n # be extracted as a name (\"g\") and a type (\"w\"). Hence, we check for strings\n # explicitly.\n if isinstance(field_spec, str): # if the spec contains only the field name\n return field_spec, str\n else: # if the spec also has a type\n return field_spec[0], field_spec[1]\n\n\ndef process_entry(entry):\n \"\"\"Flatten the nested fields of an entry.\"\"\"\n new_entry = {}\n for field in direct_fields:\n field_name, to_type = name_and_type(field)\n new_entry[field_name] = to_type(entry[field_name])\n for top_field in indirect_fields:\n for inner_field in indirect_fields[top_field]:\n inner_field_name, to_type = name_and_type(inner_field)\n new_field = \"{}_{}\".format(top_field, inner_field_name)\n new_entry[new_field] = [\n to_type(inner_entry[inner_field_name])\n for inner_entry\n in entry.get(top_field, []) # in case field is missing\n ]\n # TODO Consider making this a generator (if too slow for bigger files)?\n return new_entry\n\n\ndef process_glossary_data(data):\n \"\"\"\n Process a glossary and link the entries to their instances.\n\n Glossaries contain entries in a nested format. This step extracts the\n relevant information at various nesting levels, and produces a list of\n entries with \"flattened\" fields. It also incorporates the information from\n the instances part of the glossary into the relevant entries.\n Any entries referring to non-existent instances will be ignored. A warning\n will be raised in those cases.\n\n :param data: a dictionary representing a glossary, including the instances.\n :return: a list of entries, flattened and linked to instances when possible.\n\n \"\"\"\n instances = data[\"instances\"]\n base_data = {key: data[key] for key in base_fields}\n new_entries = []\n for entry in data[\"entries\"]:\n # Create a flat entry from the nested norms, forms, senses etc.\n new_entry = process_entry(entry)\n # Find the instance that is referred to by the entry. For now, just link\n # the top-level reference rather than that of individual senses, norms\n # etc. Every entry should have a corresponding instance in the glossary,\n # so if something is missing this will throw a KeyError, which will let\n # us know that there is something wrong with the glossary.\n try:\n new_entry[\"instances\"] = instances[entry[\"xis\"]]\n except KeyError:\n warnings.warn(\n \"Could not find the instance {} for entry {}!\".format(\n entry[\"xis\"], entry[\"headword\"])\n )\n continue\n # Add the attributes shared by all entries in the glossary\n new_entry.update(base_data)\n new_entries.append(new_entry)\n return new_entries\n\n\ndef preprocess_glossary(glossary_filename):\n \"\"\"Remove unused fields from a glossary and return it as a dictionary.\"\"\"\n filter_file = os.path.join(\"ingest\", \"remove_unused.jq\")\n try:\n s = subprocess.run(\n [\"jq\", \"-f\", filter_file, glossary_filename],\n stdout=subprocess.PIPE\n )\n except FileNotFoundError as e:\n # If the jq is executable is not found, a FileNotFoundError is raised\n raise RuntimeError('Could not run jq command.') from e\n # We need to decode the output to a string if not working in binary mode\n return json.loads(s.stdout.decode(\"utf8\"))\n\n\ndef process_file(input_name, write_file=True):\n \"\"\"\n Process all entries in a glossary file, extracting the common information to\n create entries that can be individually indexed. Optionally create a new\n file with the entries that can be uploaded manually.\n\n :param input_name: the name of the glossary JSON file\n :param write_file: whether to write the entries in a new file, to be used later\n :return: a list of the new individual entries, as dictionaries\n \"\"\"\n # The glossaries contain a lot of information that we do not use.\n # Sometimes this can make them too large to load in memory. Therefore,\n # we first preprocess each file to remove the fields we do not need.\n try:\n data = preprocess_glossary(input_name)\n except RuntimeError:\n # If the preprocessing fails (most likely reason is that the jq tool\n # is not present), try to read the file normally as a last resort.\n warnings.warn(\n \"Could not preprocess file {}. Is jq installed?\\n\"\n \"Will attempt to ingest without preprocessing.\"\n \"This may fail for large glossaries.\".format(input_name),\n RuntimeWarning\n )\n with open(input_name, 'r') as input_file:\n data = json.load(input_file)\n\n new_entries = process_glossary_data(data)\n if write_file:\n output_name = input_name.rsplit('.', 1)[0] + \"-entries.json\"\n with open(output_name, 'w') as outfile:\n for new_entry in new_entries:\n header = '{ \"index\" : { \"_id\" : \"' + new_entry[\"id\"] + '\" } }'\n print(header, file=outfile)\n print(json.dumps(new_entry), file=outfile)\n print(\"Finished processing {}\".format(input_name))\n return new_entries\n\n\nif __name__ == \"__main__\":\n process_file(sys.argv[1])\n","sub_path":"ingest/break_down.py","file_name":"break_down.py","file_ext":"py","file_size_in_byte":6429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"431580412","text":"from django.core.mail import send_mail\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.db.models import Count\nfrom django.shortcuts import render, get_object_or_404\n# from django.views.generic import ListView\n\nfrom taggit.models import Tag\nfrom haystack.query import SearchQuerySet\n\nfrom .models import Post\nfrom .forms import EmailPostForm, CommentForm, SearchForm\n\n\n# class PostListView(ListView):\n# # do pagination.html musimy przekazać page_obj zamiast posts\n# # {% include \"blog/pagination.html\" with page=page_obj %}\n# # ListView tak przesyła wybraną stronę\n# queryset = Post.published.all()\n# context_object_name = 'posts'\n# paginate_by = 3\n# template_name = 'blog/post/list.html'\n\n\n# wracamy do funkcji widoku\ndef post_list(request, tag_slug=None):\n object_list = Post.published.all()\n\n tag = None\n\n # jeśli tag został wpisany jako parametr do URL,\n # to wyszukujemy wszystkie posty, które go zawierają\n # i aktualizujemy object_list\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n object_list = object_list.filter(tags__in=[tag])\n\n # 3 posty na stronę\n paginator = Paginator(object_list, 3)\n\n # numer bieżącej strony\n # potrzebny tylko dla paginatora\n # nie trzeba go przekazywać do szablonu\n page = request.GET.get('page')\n\n # w posts będziemy mieli obiekt Page,\n # który będziemy przekazywali do szablonu paginatora\n # w osobnej zmiennej,\n # możemy wtedy gdzie indziej użyć szablonu paginatora,\n # będzie on korzystał z dowolnego przekazanego\n # obiektu Page, bez względu na model\n\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n # pierwsza strona, jeśli wartość nie jest integerem\n posts = paginator.page(1)\n except EmptyPage:\n # ostatnia strona, jeśli mamy indeks poza zasięgiem\n posts = paginator.page(paginator.num_pages)\n\n return render(request,\n 'blog/post/list.html',\n {\n # 'page': page,\n 'posts': posts,\n 'tag': tag\n })\n\n\ndef post_detail(request, year, month, day, post):\n post = get_object_or_404(Post,\n slug=post,\n status='published',\n publish__year=year,\n publish__month=month,\n publish__day=day)\n\n # lista aktywnych komentarzy dla posta\n comments = post.comments.filter(active=True)\n\n # monitorujemy istnienie nowego komentarza\n added = False\n\n # w przypadku wyświetlenia widoku\n # z komentarzami i formularzem używamy GET\n\n # w przypadku umieszczenia komentarza mamy POST\n # wyświetli się komunikat o dodaniu komentarza\n\n if request.method == 'POST':\n # w request.POST mamy tylko nowy komentarz\n # pobieramy tylko jego pola name, email i body\n comment_form = CommentForm(data=request.POST)\n\n if comment_form.is_valid():\n # tworzymy obiekt komentarza, ale nie zapisujemy do bazy\n new_comment = comment_form.save(commit=False)\n\n # dopisujemy do bazy, którego posta dotyczy komentarz\n # zmiennej comment_form będzie używał szablon\n new_comment.post = post\n new_comment.save()\n added = True\n else:\n comment_form = CommentForm()\n\n # lista podobnych postów\n post_tags_ids = post.tags.values_list('id', flat=True)\n similar_posts = Post.published \\\n .filter(tags__in=post_tags_ids) \\\n .exclude(id=post.id)\n # liczymy tagi dla każdego posta\n similar_posts = similar_posts \\\n .annotate(same_tags=Count('tags')) \\\n .order_by('-same_tags', '-publish')[:4]\n\n # mamy też funkcje agregujące Avg, Max, Min, Sum\n # w django-taggit mamy też manager similar_objects() oraz inne\n\n # bez względu na to, czy korzystamy z GET/POST\n # używamy tego samego URL i tego samego szablonu\n # w przypadku klikania przez przeglądarkę\n # najpierw będzie użyte GET, a potem ten sam widok\n # będzie obsługiwał POST i przekazywał wszystkie dane\n return render(request,\n 'blog/post/detail.html',\n {\n 'post': post,\n 'comments': comments,\n 'comment_form': comment_form,\n 'added': added,\n 'similar_posts': similar_posts,\n })\n\n\ndef post_share(request, post_id):\n post = get_object_or_404(Post, id=post_id, status='published')\n sent = False\n cd = {}\n\n if request.method == 'POST':\n # formularz został przesłany\n form = EmailPostForm(request.POST)\n\n if form.is_valid():\n # słownik z polami i ich wartościami\n # w razie braku powodzenia walidacji\n # cleaned_data będzie zawierał tylko poprawne pola\n # błędy walidacji dostępne są w forms.errors\n cd = form.cleaned_data\n post_url = request.build_absolute_uri(post.get_absolute_url())\n subject = '{} ({}) recommends you reading \"{}\"' \\\n .format(cd['name'], cd['email'], post.title)\n message = 'Read \"{}\" at {}\\n\\n{}\\'s comments: {}' \\\n .format(post.title, post_url, cd['name'], cd['comments'])\n send_mail(subject,\n message,\n cd['email'],\n (cd['to'],))\n sent = True\n\n # tutaj nastąpi wysłanie maila\n else:\n # użycie GET i wyświetlenie pustego formularza\n form = EmailPostForm()\n\n # wysyłamy form zamiast cd, będziemy mieli dostęp do form.as_p\n return render(request,\n 'blog/post/share.html',\n {\n 'post': post,\n 'form': form,\n 'cd': cd,\n 'sent': sent\n })\n\n\ndef post_search(request):\n form = SearchForm()\n cd = None\n results = None\n total_results = None\n\n if 'query' in request.GET:\n form = SearchForm(request.GET)\n\n if form.is_valid():\n cd = form.cleaned_data\n results = SearchQuerySet()\\\n .models(Post)\\\n .filter(content=cd['query'])\\\n .load_all()\n\n # dzięki load_all() ładujemy za jednym razem wszystkie rezultaty z bazy\n # inaczej musielibyśmy pytać bazę o każdy obiekt z osobna\n # każdy rezultat to dokument zwrócony przez Solr i opakowany przez Haystack\n\n total_results = results.count()\n\n return render(request,\n 'blog/post/search.html',\n {\n 'form': form,\n 'cd': cd,\n 'results': results,\n 'total_results': total_results\n })\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"250142612","text":"from numpy import * #import de numpy\nfrom pylab import * #import de matplotlib.pylab\nfrom scipy.stats import norm #import du module norm de scipy.stats\n\nt=arange(0,5,0.1)\nc=[1,2,2] #Définition du polynome (t**2+2*t+2)\ny=polyval(c,t) #Evaluation du polynome\nyn=y+norm.rvs(size=len(t)) #Ajout d'un bruit gaussian\n\nc_est = np.polyfit(t, yn, 2) #regression polynomiale (degré 2)\ny_est=polyval(c_est,t)\nplot(t,yn,'o',label=\"mesure\") #Affichage des points\nplot(t,y_est,label=\"regression\")#Affichage des points\nxlabel('temps')\nylabel('signal')\nlegend()\nshow() #affichage des courbes","sub_path":"chapitre4/scipy3.py","file_name":"scipy3.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"511756927","text":"\"\"\"\nCopyright (c) [2019] [sixlab.cn]\n[https://github.com/PatrickRoot/six-site] is licensed under the Mulan PSL v1.\nYou can use this software according to the terms and conditions of the Mulan PSL v1.\nYou may obtain a copy of Mulan PSL v1 at:\n http://license.coscl.org.cn/MulanPSL\nTHIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR\nPURPOSE.\nSee the Mulan PSL v1 for more details.\n\"\"\"\nfrom flask import Flask, url_for, request, jsonify, redirect\nfrom flask_apscheduler import APScheduler\n\nfrom apps.api import app_api\nfrom apps.comments import app_comments\nfrom apps.notes import app_notes\nfrom apps.notify import app_notify\nfrom apps.posts import app_posts\nfrom apps.tags import app_tags\nfrom apps.thought import app_thought\nfrom apps.thoughts import app_thoughts\nfrom apps.users import app_users\nfrom config.filter import register_filter\nfrom config.init import init_table\nfrom config.scheduler import add_jobs\nfrom config.utils import login_user\nfrom models.posts import posts_by_num, count_num, render_list\n\ninit_table()\n\napp = Flask(__name__)\n\napp.config[\"JSON_AS_ASCII\"] = False\n\nscheduler = APScheduler()\nscheduler.init_app(app)\nscheduler.start()\n\nadd_jobs(scheduler)\nregister_filter(app)\n\napp.register_blueprint(app_tags, url_prefix='/tags')\napp.register_blueprint(app_posts, url_prefix='/posts')\napp.register_blueprint(app_thought, url_prefix='/thought')\napp.register_blueprint(app_thoughts, url_prefix='/thoughts')\napp.register_blueprint(app_comments, url_prefix='/comments')\napp.register_blueprint(app_notify, url_prefix='/notify')\napp.register_blueprint(app_notes, url_prefix='/notes')\napp.register_blueprint(app_users, url_prefix='/user')\napp.register_blueprint(app_api, url_prefix='/api')\n\n\n@app.before_request\ndef before_request():\n username = login_user()\n path = request.path\n if username is None:\n print(path + \" - None\")\n if \"/auth/\" in path:\n if request.method == \"GET\":\n return redirect(url_for('app_users.login_get', message=\"未登录\"))\n else:\n return jsonify({\n \"success\": False,\n \"message\": \"未登录\"\n })\n else:\n print(path + \" - \" + username)\n\n\n@app.route('/')\ndef index():\n posts_list = posts_by_num(1)\n total_number = count_num()\n return render_list(posts_list=posts_list, url_prefix=url_for(\"app_thoughts.index\")+\"p/\", page_num=1, total_number=total_number)\n\n\n@app.route('/favicon.ico')\ndef favicon():\n return redirect(url_for('static', filename='favicon.ico'))\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8888)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"18407085","text":"from argparse import ArgumentParser\n\nparser = ArgumentParser()\n\n# Agregando las diferentes opciones de mi linea de comandos\nparser.add_argument(\"-d\", \"--debug\", help=\"Activa logs en modo desarrollo\", action=\"store_true\")\n\nparser.add_argument(\"-o\", \"--operacion\", help=\"Indica la operacion matematica\", default=\"+\")\n\nparser.add_argument(\"-n\", \"--numeros\", help=\"Lista de numeros a operar\")\n\n# Leyendo los argumentos y parsearlos respecta ^^\nargs = parser.parse_args()\n\ndef print_debug(mensaje):\n if args.debug:\n print(mensaje)\n\ndef suma(*numeros):\n print_debug(\"Ejecutando la suma\")\n resultado = 0\n for numero in numeros:\n resultado = resultado + numero\n return resultado\n\ndef resta(*numeros):\n print_debug(\"Ejecutando la resta\")\n resultado = numeros[0]\n for numero in numeros[1:]:\n resultado = resultado - numero\n return resultado\n\ndef multiplicacion(*numeros):\n print_debug(\"Ejecutando la multiplicacion\")\n resultado = 1\n for numero in numeros:\n resultado = resultado * numero\n return resultado\n\ndef division(*numeros):\n print_debug(\"Ejecutando la division\")\n resultado = numeros[0]\n try:\n for numero in numeros[1:]:\n resultado = resultado / numero\n return resultado\n except ZeroDivisionError:\n print(\"Error: División por 0\")\n return 0\n\nargumentos = args.numeros\n\noperacion = args.operacion\n\n# Divido los numeros por una coma: \"1,2,3\" -> [\"1\",\"2\",\"3\"]\nargumentos_split = argumentos.split(\",\")\n\n# Convierte cada texto en un numero\nnumeritos = []\nfor argumento in argumentos_split:\n numeritos.append(int(argumento))\n\nif operacion == \"+\":\n print(suma(*numeritos))\nelif operacion == \"-\":\n print(resta(*numeritos))\nelif operacion == \"*\":\n print(multiplicacion(*numeritos))\nelif operacion == \"/\":\n print(division(*numeritos))\nelse:\n print(\"Operación invalida\")","sub_path":"sesion06/opearciones_cli_argparse.py","file_name":"opearciones_cli_argparse.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"298679399","text":"import os\nimport sys\nimport fractions\nimport argparse\nfrom PIL import Image\n\n\ndef append_subparser_for_scaling(subparsers):\n parser_scale = subparsers.add_parser(\n 'scale',\n help='Command to scale image by a single parameter.'\n )\n parser_scale.add_argument(\n 'scale',\n type=float,\n help='Scale coefficient.'\n )\n\n\ndef append_subparser_for_resizing(subparsers):\n parser_resize = subparsers.add_parser(\n 'resize',\n help='Command to resize image by desired height & width.'\n )\n parser_resize.add_argument(\n '--width',\n type=int,\n help='Width of target image.'\n )\n parser_resize.add_argument(\n '--height',\n type=int,\n help='Height of target image.'\n )\n\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n\n subparsers = parser.add_subparsers(\n help='Help for scaling & resizing commands.',\n dest='command'\n )\n\n append_subparser_for_scaling(subparsers)\n append_subparser_for_resizing(subparsers)\n\n parser.add_argument(\n 'input',\n help='File path to original image.'\n )\n parser.add_argument(\n '--output',\n help='File path to target image.'\n )\n\n return parser\n\n\ndef load_image(path_to_image):\n try:\n return Image.open(path_to_image)\n except FileNotFoundError:\n return None\n\n\ndef get_target_img_size(args, original_img_size):\n if args.command == 'scale':\n return get_target_img_size_by_scale(original_img_size, args.scale)\n elif args.command == 'resize':\n return get_target_img_size_by_dimensions(original_img_size, args.width, args.height)\n else:\n return None\n\n\ndef get_target_img_size_by_scale(original_img_size, scale):\n return tuple(\n int(dimension_value * scale)\n for dimension_value in original_img_size\n )\n\n\ndef get_target_img_size_by_dimensions(original_img_size, width, height):\n if width is not None and height is not None:\n result_img_size = (\n width,\n height,\n )\n elif height is None:\n result_img_size = (\n width,\n calculate_target_height(\n original_img_size=original_img_size,\n target_width=width\n )\n )\n else:\n result_img_size = (\n calculate_target_width(\n original_img_size,\n height,\n ),\n height\n )\n return result_img_size\n\n\ndef calculate_target_height(original_img_size, target_width):\n division_of_width = (original_img_size[0] / target_width)\n return int(original_img_size[1] / division_of_width)\n\n\ndef calculate_target_width(original_img_size, target_height):\n division_of_height = (original_img_size[1] / target_height)\n return int(original_img_size[0] / division_of_height)\n\n\ndef verify_aspect_ratio(original_img_size, target_img_size):\n original = fractions.Fraction(original_img_size[0], original_img_size[1])\n target = fractions.Fraction(target_img_size[0], target_img_size[1])\n return original == target\n\n\ndef resize_image(original_img, target_img_size):\n img = original_img.resize(target_img_size, Image.ANTIALIAS)\n return img\n\n\ndef get_target_img_name(original_img_name, target_img_size):\n filepath, extension = os.path.splitext(original_img_name)\n return '{}__{}x{}{}'.format(\n filepath,\n target_img_size[0],\n target_img_size[1],\n extension\n )\n\n\nif __name__ == '__main__':\n\n arguments_parser = create_parser()\n args = arguments_parser.parse_args()\n\n original_img = load_image(args.input)\n if original_img is None:\n sys.exit('Cannot open image: {}'.format(args.input))\n\n target_img_size = get_target_img_size(args, original_img.size)\n\n if target_img_size is None:\n sys.exit('Cannot parse operation type.')\n\n if not verify_aspect_ratio(original_img.size, target_img_size):\n print('Aspect ratio is collapsed.')\n\n target_img = resize_image(original_img, target_img_size)\n\n if args.output is None:\n target_img_name = get_target_img_name(args.input, target_img_size)\n else:\n target_img_name = args.output\n\n target_img.save(target_img_name)\n print('Image has been saved at: {}'.format(target_img_name))\n","sub_path":"image_resize.py","file_name":"image_resize.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"169317663","text":"\"\"\"\ndisplayer.py\n\nProvides an interface over the loaded bytecode. \n\nNote 1: Items of explorer are added by loader.py \nNote 2: Read/Write bytecode color code:\n green : Code read/write not modified\n yellow: Code read/write unsaved changes\n red : Code read/write invalid\n black : Code read-only\n\"\"\"\n\nfrom PyQt4.QtGui import QTreeWidget, QTreeWidgetItem, QMenu,\\\nQFileDialog, QMessageBox\nfrom PyQt4.QtCore import Qt, QDir\n\nfrom os.path import basename\nfrom editbytecode import WriteBytecodeProxy\nfrom sys import version_info\nimport marshal, loader\n\n# Each item have 3 used data slot in their first index\n# 32- The code object, 33-The source if available, \n# 34 - WriteBytecodeProxy if set as editable\nclass Explorer(QTreeWidget):\n \n def __init__(self, parent):\n super(Explorer, self).__init__()\n \n self.item = None\n self.loaded_files = []\n \n self.menu = QMenu()\n \n self.remove_action = self.menu.addAction(\"Remove\")\n self.remove_action.triggered.connect(self.removeCode)\n self.menu.addSeparator()\n self.parse_action = self.menu.addAction(\"Disassemble\")\n self.parse_action.triggered.connect(self.parseCode)\n self.decompile_action = self.menu.addAction(\"Decompile\")\n self.decompile_action.triggered.connect(self.decompileCode)\n self.menu.addSeparator()\n self.save_pyc_action = self.menu.addAction(\"Save as .pyc\")\n self.save_pyc_action.triggered.connect(self.saveAsPyc)\n self.extract_action = self.menu.addAction(\"Extract code\")\n self.extract_action.triggered.connect(self.extractCode)\n self.menu.addSeparator()\n self.editable_action = self.menu.addAction(\"Permisson Read/Write\")\n self.editable_action.triggered.connect(self.setCodeEditable)\n self.not_editable_action = self.menu.addAction(\"Permisson Read-Only\")\n self.not_editable_action.triggered.connect(self.setCodeNotEditable)\n self.recompile_action = self.menu.addAction(\"Rebuild code\")\n self.recompile_action.triggered.connect(self.recompileCode)\n\n self.empty_menu = QMenu()\n \n self.itemDoubleClicked.connect(self.loadCodeObj)\n self.customContextMenuRequested.connect(self.showOptions)\n \n self.parent = parent\n self.setHeaderLabels([\"Name\", \"Type\", \"Size (bytes)\"])\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n \n def addItem(self, name, t, size, parent=None):\n \"\"\"Add an item to the tree\"\"\"\n i = QTreeWidgetItem([name, t, str(size)])\n if not parent:\n self.addTopLevelItem(i)\n \n else:\n if type(parent) == QTreeWidgetItem:\n parent.addChild(i)\n \n return i\n \n def showOptions(self, point):\n \"\"\"Spawn an option menu to interact with the loaded pyc\"\"\"\n self.item = self.itemAt(point)\n point = self.mapToGlobal(point)\n point.setY(point.y()+30)\n \n if self.item:\n self.menu.exec_( point )\n else:\n self.empty_menu.exec_( point )\n \n def loadCodeObj(self, item, silent=False):\n \"\"\"\n Parse and load a code object into the other widget.\n Called when double clicking on an item.\n \n Arguments:\n item: Item of the explorer\n silent: Add messages to the logs\n \"\"\"\n \n disp = self.parent.display\n info = self.parent.info\n writable = item.textColor(0) in WriteBytecodeProxy.COLORS\n \n if writable:\n code = item.data(0, 34)\n else:\n code = item.data(0, 32)\n \n #Python2 compatibility fix\n if version_info.major == 2:\n code = code.toPyObject()\n \n if writable:\n code = code.code\n \n if item.parent():\n pcode = item.parent().data(0, 32)\n #Python2 compatibility fix\n if version_info.major == 2:\n pcode = pcode.toPyObject()\n else:\n pcode = None\n \n try:\n disp.loadCode(code, pcode, item)\n info.loadCode(code, item)\n except Exception:\n if not silent:\n self.parent.console.add_log.emit(\"Cannot decompile code. This\"+\\\n \" is most likely to happens because the code is broken.\",\n \"#FF0000\", False, False)\n return False\n \n \n if not silent:\n name = str(self.currentItem().text(0))\n self.parent.console.add_log.emit(\"{} disassembled.\".format(name), \n \"#00A000\", False, False)\n \n def pathLoaded(self, path):\n \"Check if path is loaded in the explorer\"\n \n if path in self.loaded_files:\n return False\n \n return True\n \n def parseCode(self,):\n \"\"\"Call self.loadCodeObj. Used by the option menu.\"\"\"\n self.loadCodeObj(self.item)\n \n def decompileCode(self):\n \"\"\"\n Decompile the code held in the selected code object.\n The chidren code will also be decompiled.\n \"\"\"\n self.parent.console.addLog(\"Decompiling \"+ self.item.text(0)+\"...\")\n \n code = self.item.data(0, 32)\n source = \"TODO: code decompiler\"\n \n self.parent.txt_edit.addTextEditor(source, self.item.text(0), True) \n if not self.parent.txt_dock.isVisible():\n self.parent.txt_dock.setVisible(True)\n \n self.parent.console.addLog(self.item.text(0)+\" decompiled.\")\n \n def extractCode(self):\n \"\"\"Extract the code from the code object and save it to a file.\"\"\"\n if self.item == None:return\n \n path = QFileDialog.getSaveFileName(self)\n if path:\n self.reloadCodeConstants(self.item)\n f = open(path, \"wb\")\n #Python 2 compatibility fix\n if version_info[0] == 2:\n marshal.dump(self.item.data(0, 32).toPyObject(), f)\n else:\n marshal.dump(self.item.data(0, 32), f)\n f.close()\n \n self.parent.console.addLog(self.item.text(0)+\" extracted to {0}.\".format(path),\n \"#000000\", False, False)\n \n def saveAsPyc(self):\n \"\"\" Save the code object as a .pyc and reload all his bytecode constants. \"\"\"\n item = self.currentItem()\n self.reloadCodeConstants(item)\n \n ok = self.loadCodeObj(item, True)\n if ok == False:\n self.parent.console.addLog(\"Code broken. Cannot save as .pyc.\"\n ,\"#FF0000\", False, False)\n return\n \n if item.text(1) != \"module\":\n QMessageBox.critical(self, \"Error\", \"The code object is not a module.\\n\"+\\\n \"It cannot be saved as pyc.\") \n return\n \n code = item.data(0,32)\n if version_info[0] == 2:\n code = code.toPyObject()\n \n name = QFileDialog.getSaveFileName(self, \"Save .pyc\", filter=\"Compiled bytecode (*.pyc)\")\n magic = getMagic()\n if name:\n loader.saveAsPyc(code, name, magic)\n self.parent.console.addLog(item.text(0)+\" saved to {0}.\".format(name),\n \"#000000\", False, False)\n \n def setCodeEditable(self, silent=False):\n \"\"\"\n Mark codeobject as editable and return the created proxy.\n Used by the explorer or when a code object needs to reload its\n codeobject constants (self.reloadCodeConstants).\n \"\"\"\n item = self.item\n color = item.textColor(0)\n \n if color in WriteBytecodeProxy.COLORS:\n QMessageBox.critical(self, \"Error\", \"Code already writable.\")\n return\n \n writeProxy = WriteBytecodeProxy(item)\n item.setData(0, 34, writeProxy)\n \n if self.parent.display.current_item_loaded == item:\n writeProxy.markCurrentlyEdited()\n \n if not silent:\n self.parent.console.addLog(self.item.text(0)+\" marked as writable.\",\n \"#00A000\", False, False)\n \n return writeProxy\n \n def setCodeNotEditable(self, silent=False):\n \"\"\" Set edited code as read-only \"\"\"\n item = self.item\n color = item.textColor(0)\n \n if not color in WriteBytecodeProxy.COLORS:\n QMessageBox.critical(self, \"Error\", \"Code already read-only.\")\n return\n \n #Python 2 compatibility fix\n if version_info[0] == 2:\n item.data(0, 34).toPyObject().closeProxy()\n else:\n item.data(0, 34).closeProxy()\n \n item.setData(0, 34, None)\n \n if not silent:\n self.parent.console.addLog(self.item.text(0)+\" marked as read-only.\",\n \"#00A000\", False, False)\n \n def reloadCodeConstants(self, item):\n \"\"\"\n Append the edited codeobject constants to the object.\n This method recursive and will also reload all child item.\n This is called when a code object is saved as .pyc (self.saveAsPyc)\n or when a code object is extracted (self.extractcode)\n \n Arguments:\n item(QTreeWidgetItem): Item that hold the constants.\n \"\"\"\n from types import CodeType\n \n #Check if the item have children\n for i in range(item.childCount()):\n self.reloadCodeConstants(item.child(i))\n \n self.item = item\n \n if self.item.textColor(0) in WriteBytecodeProxy.COLORS:\n proxy = self.item.data(0, 34).toPyObject() if version_info[0]==2 else self.item.data(0, 34)\n else:\n proxy = self.setCodeEditable(True)\n constants = proxy.consts\n child_index = 0\n \n for i in range(len(constants)):\n old_const = constants[i]\n if type(old_const) == CodeType:\n new_const = item.child(child_index).data(0, 32)\n if version_info[0] == 2:\n new_const = new_const.toPyObject()\n proxy.editValue(\"constants\", i, new_const)\n child_index += 1\n\n self.recompileCode(True) \n self.setCodeNotEditable(True) \n \n def recompileCode(self, silent=False):\n \"\"\" Save the changes on the editable bytecode \"\"\"\n item = self.item\n proxy = item.data(0, 34)\n \n ok = self.loadCodeObj(self.item, True)\n if ok == False:\n self.parent.console.addLog(\"Code broken. Cannot recompile. You can select \\\"Permisson Read-Only\\\" to reset the code.\"\n ,\"#FF0000\", False, False)\n return\n\n if version_info[0] == 2:\n proxy = proxy.toPyObject()\n \n if proxy:\n item.setData(0, 32, proxy.code)\n if not silent:\n self.parent.console.addLog(\"codeobject \\\"{0}\\\" recompiled\".format(item.text(0))\n ,\"#00A000\", False, False)\n else:\n QMessageBox.critical(self, \"Error\", \"Code not writable.\")\n \n def removeCode(self):\n \"\"\"Remove a loaded item from the explorer.\"\"\"\n \n if self.item.parent() != None:\n QMessageBox.critical(self, \"Not supported\", \"Removing a constant from a code object is not currently supported.\")\n else:\n index = self.indexOfTopLevelItem(self.item)\n name = self.item.text(0)\n self.takeTopLevelItem(index)\n \n for i in self.loaded_files:\n if basename(i) == name:\n name = i\n \n self.loaded_files.remove(name)\n self.parent.console.addLog(\"Removed \"+name+\" from the editor.\")\n \n \ndef getMagic():\n \"\"\"Get the magic number for the version of python running.\"\"\"\n from compileall import compile_file\n \n #Python 2 compatibility fix\n if version_info[0] == 2:\n d = QDir()\n else:\n d = QDir(\"__pycache__\")\n \n f = open(\"tmp.py\", \"w\")\n f.write(\"print(\"\")\")\n f.close()\n \n compile_file(\"tmp.py\", quiet=True, )\n for entry in d.entryList():\n if \"tmp\" in entry:\n f = open(d.absoluteFilePath(entry), \"rb\")\n magic = f.read(4)\n f.close()\n d.remove(entry)\n \n QDir().remove(\"tmp.py\")\n if version_info[0] == 2:\n QDir().remove(\"tmp.pyc\")\n \n return magic","sub_path":"ui/explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":12849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"452555660","text":"'#Agenda Telefonica'\nimport funcoes\n\nfuncoes.bemvindo()\n\n#Opcoes do Usuario\n\nopcao = int(input(\"Selecione uma opção\"))\nprint(\"Selecionou a \", opcao)\n\n#Estrutura de controle\nif opcao == 1:\n\tfuncoes.adicionar()\n\t\nelif opcao == 2:\n\tfuncoes.listar()\nelse:\n\tfuncoes.falha()\n\t \n\t\n\n\n","sub_path":"agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"327895923","text":"import sqlite3 as lite\r\n\r\nclass DatabaseManage(object):\r\n def __init__(self):\r\n global conn\r\n try:\r\n conn = lite.connect('fileDB.db')\r\n with conn:\r\n curr = conn.cursor()\r\n curr.execute(\"CREATE TABLE IF NOT EXISTS test(Id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, description TEXT, price TEXT, is_private BOOLEAN NOT NULL DEFAULT 1 )\")\r\n except Exception as e:\r\n print(\"Unable to create a DB !\")\r\n \r\n def insert_data(self, data):\r\n try:\r\n with conn:\r\n curr = conn.cursor()\r\n curr.execute(\r\n \"INSERT INTO test (name, description, price, is_private) VALUES (?,?,?,?)\",\r\n data\r\n )\r\n return True\r\n except Exception as identifier:\r\n return False\r\n def fetch_data(self):\r\n try:\r\n with conn:\r\n curr = conn.cursor()\r\n curr.execute(\"SELECT * FROM test\")\r\n return curr.fetchall()\r\n except Exception as identifier:\r\n return False\r\n def delete_data(self, id):\r\n try:\r\n with conn:\r\n curr = conn.cursor()\r\n sql = \"DELETE FROM test WHERE id = ?\"\r\n curr.execute(sql, [id])\r\n return True\r\n except Exception as identifier:\r\n return False\r\n\r\ndef main():\r\n print(\"*\"*40)\r\n print(\"\\n:: Course Management :: \\n\")\r\n print(\"*\"*40)\r\n print(\"\\n\")\r\n\r\n db = DatabaseManage()\r\n\r\n print(\"#\"*40)\r\n print(\"\\n :: User Manual :: \\n\")\r\n print(\"#\"*40)\r\n\r\n print('\\n1. Insert a new Course\\n')\r\n print('2. Show all courses\\n')\r\n print('3. Delete a course (NEED ID OF COURSE)\\n')\r\n print(\"#\"*40)\r\n \r\n\r\n choice = input(\"Enter a choice: \")\r\n \r\n while(True):\r\n\r\n if choice == \"1\":\r\n name = input(\"Enter course name: \")\r\n desc = input(\"Enter course description: \")\r\n price = input(\"Enter course price: \")\r\n private = input(\"Is this course private(0/1): \")\r\n\r\n if db.insert_data([name, desc, price, private]):\r\n print(\"Course was inserted successfully\")\r\n else:\r\n print(\"OOPS SOMETHING IS WRONG\")\r\n\r\n elif choice == \"2\":\r\n print(\":: Course List ::\")\r\n for index, course in enumerate(db.fetch_data()):\r\n print(\"Serial no. : \", str(index+1))\r\n print(\"Course ID : \", str(course[0]))\r\n print(\"Course Name : \", str(course[1]))\r\n print(\"Course Description : \", str(course[2]))\r\n print(\"Course Price : \", str(course[3]))\r\n private = 'Yes' if course[4] else 'No'\r\n print(\"Is Private : \", private)\r\n print(\"\\n\")\r\n \r\n elif choice == \"3\":\r\n record_id = input(\"Enter the course ID: \")\r\n\r\n if db.delete_data(record_id):\r\n print(\"Course was successfully deleted\")\r\n else:\r\n print(\"OOPS SOMETHING WENT WRONG\")\r\n\r\n else:\r\n print(\"BAD CHOICE\")\r\n\r\n \r\n choice = input(\"Enter Choice or enter 4 to exit : \")\r\n if choice == \"4\":\r\n break\r\n else:\r\n continue\r\n\r\nif __name__ == \"__main__\":\r\n main() \r\n ","sub_path":"pythonApp.py","file_name":"pythonApp.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"615811019","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('project', '0009_auto_20160310_1214'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ImportWorkItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=255, verbose_name='title')),\n ('description', models.TextField(null=True, verbose_name='description', blank=True)),\n ('csv_file', models.FileField(upload_to=b'')),\n ('project_task', models.ForeignKey(to='project.ProjectTask')),\n ],\n options={\n 'verbose_name': 'ImportWorkItem',\n 'verbose_name_plural': 'ImportWorkItems',\n },\n bases=(models.Model,),\n ),\n migrations.AlterField(\n model_name='workitemassignment',\n name='agents',\n field=models.ManyToManyField(related_name=b'+', to=b'employee.Employee'),\n ),\n ]\n","sub_path":"invensis_pmc/project/migrations/0010_auto_20160312_1213.py","file_name":"0010_auto_20160312_1213.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"494113140","text":"# -*- coding:utf-8 -*-\n__author__ = '张全亮'\n# 可以封装成函数,方便 Python 的程序调用\nimport socket\n\n\ndef get_host_ip():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n finally:\n s.close()\n\n return ip\n\n\nif __name__ == '__main__':\n get_host_ip()\n","sub_path":"pinduoduo/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"592903171","text":"import os\nimport sys\nsys.path.append(os.path.abspath(os.path.join(os.getcwd(), '..', 'lib')))\nimport pop_a_shot\n\ndef round_1_on_score(game_round): \n if game_round.get_time() > (game_round.timer_start_value / 2):\n game_round.add_points(2)\n else:\n game_round.add_points(3) \n\ndef all_3_points(game_round): \n game_round.add_points(3)\n\nclass classic(pop_a_shot.Game):\n \n def game_type_name(self):\n return \"Pop-a-Shot Classic\"\n\n def game_rounds(self):\n return [pop_a_shot.GameRound(round_number=1, \n timer_start_value=30, \n score_start_value=2, \n on_score=round_1_on_score), \n pop_a_shot.GameRound(round_number=2, \n timer_start_value=10, \n on_score=all_3_points),\n pop_a_shot.GameRound(round_number=3, \n timer_start_value=10, \n on_score=all_3_points)]\n\nif __name__ == '__main__':\n debug = sys.argv[1] if len(sys.argv) > 1 else None\n classic(debug)\n\n","sub_path":"python_game/game_modes/classic.py","file_name":"classic.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"163886533","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 17 10:57:18 2021\n\n@author: gerry\n\"\"\"\n\nordliste = dict()\nlinjeteller = 0\n\nwith open(\"oving_1_rein_tekst.txt\", \"r\", encoding=\"UTF8\") as fila:\n for linje in fila:\n linjeteller += 1\n ordene = linje.split()\n for ordet in ordene:\n ordet = ordet.lower()\n if ordet in ordliste:\n teller = ordliste[ordet]\n teller += 1\n ordliste[ordet] = teller\n print(f\"Ordet \\\"{ordet}\\\" forekommer for {teller} gang på linje {linjeteller}\")\n else:\n ordliste[ordet] = 1\n print(f\"Ordet \\\"{ordet}\\\" forekommer først på linje {linjeteller}\")\n \nfila.close()\n","sub_path":"Øving_8/Øving 8a.2 teller ord første og flere ganger.py","file_name":"Øving 8a.2 teller ord første og flere ganger.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"516538602","text":"\"\"\" drivers for coordinate scans\n\"\"\"\n\nimport numpy\nimport automol\n\n\n# FUNCTIONS TO SET UP TORSION NAME LISTS\ndef names_from_geo(geo, ndim_tors, saddle=False):\n \"\"\" Build the tors name list from a geom\n \"\"\"\n if not saddle:\n tors_names = [\n [name]\n for name in automol.geom.zmatrix_torsion_coordinate_names(geo)\n ]\n if ndim_tors in ('mdhr', 'mdhrv'):\n tors_names = [[tors\n for rotor in tors_names\n for tors in rotor]]\n else:\n tors_names = []\n\n return tors_names\n\n\ndef names_from_dct(spc_dct_i, ndim_tors):\n \"\"\" Build the tors name list from a dictionary\n \"\"\"\n\n # Read names from dct\n inp_tors_names, amech_ts_tors_names = [], []\n if 'tors_names' in spc_dct_i:\n inp_tors_names = spc_dct_i['tors_names']\n if 'amech_ts_tors_names' in spc_dct_i:\n amech_ts_tors_names = spc_dct_i['amech_ts_tors_names']\n if ndim_tors == '1dhr':\n amech_ts_tors_names = [[name] for name in amech_ts_tors_names]\n else:\n amech_ts_tors_names = [[name for name in amech_ts_tors_names]]\n\n # Set the run tors names\n if inp_tors_names:\n tors_names = inp_tors_names\n print('Using tors names defined by user...')\n elif amech_ts_tors_names:\n tors_names = amech_ts_tors_names\n print('Using tors names generated by AutoMech...a')\n else:\n tors_names = []\n\n return tors_names, amech_ts_tors_names\n\n\n# FUNCTIONS USED TO BUILD LSTS OF TORSIONS OF ANY DIMENSIONALITY\ndef hr_prep(zma, tors_name_grps, scan_increment=30.0, ndim_tors='1dhr',\n frm_bnd_key=(), brk_bnd_key=()):\n \"\"\" set-up the hr for different rotor combinations\n tors_names = [ ['D1'], ['D2', 'D3'], ['D4'] ]\n \"\"\"\n\n # Get the tors names if thery have not already been supplied\n val_dct = automol.zmatrix.values(zma)\n\n # Deal with the dimensionality of the rotors\n if ndim_tors in ('mdhr', 'mdhrv'):\n tors_name_grps = mdhr_prep(zma, tors_name_grps)\n\n # Build the grids corresponding to the torsions\n tors_grids = [] # tors_syms = []\n for tors_names in tors_name_grps:\n tors_linspaces = automol.zmatrix.torsional_scan_linspaces(\n zma, tors_names, scan_increment, frm_bnd_key=frm_bnd_key,\n brk_bnd_key=brk_bnd_key)\n tors_grids.append(\n [numpy.linspace(*linspace) + val_dct[name]\n for name, linspace in zip(tors_names, tors_linspaces)]\n )\n # tors_sym_nums.append(list(automol.zmatrix.torsional_symmetry_numbers(\n # zma, tors_names, frm_bnd_key=frm_bnd_key, brk_bnd_key=brk_bnd_key))\n\n return tors_name_grps, tors_grids # tors_syms\n\n\ndef mdhr_prep(zma, run_tors_names):\n \"\"\" Handle cases where the MDHR\n \"\"\"\n\n # Figure out set of torsions are to be used: defined or AMech generated\n rotor_lst = run_tors_names\n\n # Check the dimensionality of each rotor to see if they are greater than 4\n # Call a function to reduce large rotors\n final_rotor_lst = []\n for rotor in rotor_lst:\n if len(rotor) > 4:\n for reduced_rotor in reduce_rotor_dimensionality(zma, rotor):\n final_rotor_lst.append(reduced_rotor)\n else:\n final_rotor_lst.append(rotor)\n\n return final_rotor_lst\n\n\ndef reduce_rotor_dimensionality(zma, rotor):\n \"\"\" For rotors with a dimensionality greater than 4, try and take them out\n \"\"\"\n\n # Find the methyl rotors for that are a part of the MDHR\n reduced_rotor_lst = []\n methyl_rotors = []\n for tors in rotor:\n # If a methyl rotor add to methyl rotor list, or add to reduced lst\n if is_methyl_rotor(zma, rotor): # Add arguments when ID methyls\n methyl_rotors.append(zma, tors)\n else:\n reduced_rotor_lst.append(tors)\n\n # Add each of methyl rotors, if any exist\n if methyl_rotors:\n for methyl_rotor in methyl_rotors:\n reduced_rotor_lst.append(methyl_rotor)\n\n # Check new dimensionality of list; if still high, flatten to lst of 1DHRs\n if len(reduced_rotor_lst) > 4:\n reduced_rotor_lst = [tors\n for rotor in reduced_rotor_lst\n for tors in rotor]\n\n return reduced_rotor_lst\n\n\ndef is_methyl_rotor(zma, rotor):\n \"\"\" Check if methyl rotor\n \"\"\"\n raise NotImplementedError(zma, rotor)\n\n\n# Building constraints\ndef build_constraint_dct(zma, tors_names):\n \"\"\" Build a dictionary of constraints\n \"\"\"\n constraint_names = [name\n for name_lst in tors_names\n for name in name_lst]\n constraint_names.sort(key=lambda x: int(x.split('D')[1]))\n zma_vals = automol.zmatrix.values(zma)\n constraint_dct = dict(zip(\n constraint_names,\n (round(zma_vals[name], 2) for name in constraint_names)\n ))\n\n return constraint_dct\n\n\n# Functions to handle setting up torsional defintion and potentials properly\ndef set_groups_ini(zma, tors_name, ts_bnd, saddle):\n \"\"\" Set the initial set of groups\n \"\"\"\n gra = automol.zmatrix.graph(zma, remove_stereo=True)\n coo_dct = automol.zmatrix.coordinates(zma, multi=False)\n axis = coo_dct[tors_name][1:3]\n atm_key = axis[1]\n if ts_bnd:\n for atm in axis:\n if atm in ts_bnd:\n atm_key = atm\n break\n group = list(\n automol.graph.branch_atom_keys(\n gra, atm_key, axis, saddle=saddle, ts_bnd=ts_bnd) - set(axis))\n if not group:\n for atm in axis:\n if atm != atm_key:\n atm_key = atm\n group = list(\n automol.graph.branch_atom_keys(\n gra, atm_key, axis, saddle=saddle, ts_bnd=ts_bnd) - set(axis))\n\n return group, axis, atm_key\n\n\ndef check_saddle_groups(zma, rxn_class, group, axis, pot, ts_bnd, sym_num):\n \"\"\" Assess that hindered rotor groups and axes\n \"\"\"\n n_atm = automol.zmatrix.count(zma)\n if 'addition' in rxn_class or 'abstraction' in rxn_class:\n group2 = []\n ts_bnd1 = min(ts_bnd)\n ts_bnd2 = max(ts_bnd)\n for idx in range(ts_bnd2, n_atm):\n group2.append(idx)\n if ts_bnd1 in group:\n for atm in group2:\n if atm not in group:\n group.append(atm)\n\n # Check to see if symmetry of XH3 rotor was missed\n if sym_num == 1:\n group2 = []\n for idx in range(n_atm):\n if idx not in group and idx not in axis:\n group2.append(idx)\n all_hyd = True\n symbols = automol.zmatrix.symbols(zma)\n hyd_count = 0\n for idx in group2:\n if symbols[idx] != 'H' and symbols[idx] != 'X':\n all_hyd = False\n break\n if symbols[idx] == 'H':\n hyd_count += 1\n if all_hyd and hyd_count == 3:\n sym_num = 3\n lpot = int(len(pot)/3)\n potp = []\n potp[0:lpot] = pot[0:lpot]\n pot = potp\n\n return group, axis, pot\n\n\ndef check_dummy_trans(zma):\n \"\"\" check trans\n \"\"\"\n atom_symbols = automol.zmatrix.symbols(zma)\n dummy_idx = []\n for atm_idx, atm in enumerate(atom_symbols):\n if atm == 'X':\n dummy_idx.append(atm_idx)\n remdummy = numpy.zeros(len(zma[0]))\n for dummy in dummy_idx:\n for idx, _ in enumerate(remdummy):\n if dummy < idx:\n remdummy[idx] += 1\n\n return remdummy\n","sub_path":"lib/structure/tors.py","file_name":"tors.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"341512882","text":"# -*- coding: utf-8 -*-\n\nfrom collections import deque\n\n\ndef merge_two_list_keep_order(l1, l2):\n \"\"\"\n Merge l1 and l2, keep the order\n \"\"\"\n result = []\n while l1 and l2:\n if l1[0] < l2[0]:\n result.append(l1.pop(0)) # lst.pop(0) time complexity is O(n)\n else:\n result.append(l2.pop(0))\n result.extend(l1 + l2)\n return result\n\n\ndef merge_two_list_keep_order_with_deque(lst1, lst2):\n \"\"\"Use deque for performance; deque popleft() is O(1) complexity;\n Better than list pop(0) O(n) complexity.\n \"\"\"\n result, q1, q2 = deque(), deque(lst1), deque(lst2)\n while q1 and q2:\n smaller = q1.popleft() if q1[0] <= q2[0] else q2.popleft()\n result.append(smaller)\n result.extend(q1 + q2)\n return list(result)\n\n\nif __name__ == '__main__':\n l1 = [1, 3, 2, 4, 5, 7]\n l2 = [2, 4, 1, 3, 0, 7]\n r = merge_two_list_keep_order(l1, l2)\n print(r)\n assert r == [1, 2, 3, 2, 4, 1, 3, 0, 4, 5, 7, 7]\n\n l3 = [4, 15, 16, 50]\n l4 = [8, 23, 42, 108]\n expect = [4, 8, 15, 16, 23, 42, 50, 108]\n assert merge_two_list_keep_order(l3, l4) == expect\n","sub_path":"src/problems/p13_merge_array_keep_order.py","file_name":"p13_merge_array_keep_order.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"133791650","text":"import re\nimport math\n\nclass NestedParser(object):\n \"\"\"\n Takes in a string and separates everything out into nested lists,\n broken by white space\n Modified from http://stackoverflow.com/a/14715850\n\n use - NestedParser.parse(string)\n\n Sample in - '(4 + 6) / sqrt((2 - 1) / (1 + 2))'\n out- [['4', '+', '6'], '/', 'sqrt', [['2', '-', '1'], '/', ['1', '+', '2']]]\n \"\"\"\n class ParserNode(list):\n def __init__(self, parent=None):\n self.parent = parent\n\n def __init__(self, left='\\(', right='\\)'):\n self.scanner = re.Scanner([(left, self.left),\n (right, self.right),\n (r\"\\s+\", None),\n (\".+?(?=(%s|%s|$|\\s))\" % (right, left), self.other), ])\n self.result = self.ParserNode()\n self.current = self.result\n\n def parse(self, content):\n self.scanner.scan(content)\n return self.remove_nodes(self.result)\n\n def left(self, scanner, token):\n new = self.ParserNode(self.current)\n self.current.append(new)\n self.current = new\n\n def right(self, scanner, token):\n self.current = self.current.parent\n\n def other(self, scanner, token):\n self.current.append(token.strip())\n\n # Otherwise the output has ParserNode objects nested in\n def remove_nodes(self, nest):\n out = []\n for x in nest:\n if isinstance(x, self.ParserNode):\n out.append(self.remove_nodes(x))\n else:\n out.append(x)\n return out\n\ndef eq_eval(equation):\n \"\"\"\n Recursive function that uses helper definitions to calculate\n an equation represented by nested lists of strings\n\n sample in - [['4', '+', '6'], '/', 'sqrt', [['2', '-', '1'], '/', ['1', '+', '2']]]\n out - 17.3205080757\n \"\"\"\n valid_funcs = ['sqrt', 'sin', 'cos', 'tan', 'ln']\n order_operations = {1: ['**'], \n 2: ['*', '/'],\n 3: ['+', '-']}\n\n # First evaluate the nested lists recursively\n for x in range(len(equation)):\n if type(equation[x]) is list:\n equation[x] = eq_eval(equation[x])\n \n equation = simplify_eq_funcs(equation, valid_funcs)\n for x in range(1, len(order_operations) + 1):\n equation = simplify_eq(equation, order_operations[x])\n \n return equation[0]\n\ndef simplify_eq_funcs(equation, funcs):\n \"\"\"\n Helper definition\n Loops through a list and calculates the math functions passed in\n \"\"\"\n # print 'funcs in', equation\n found = True\n eq = [x for x in equation]\n while found:\n temp_eq = []\n found = False\n for x in range(len(eq)):\n if eq[x] in funcs:\n temp_eq.append(eval_func(eq[x], eq[x + 1]))\n \n try:\n temp_eq.extend(eq[x + 2:])\n except:\n pass\n \n found = True\n break\n else:\n temp_eq.append(eq[x])\n \n eq = temp_eq\n # print 'funcs out', eq\n return eq\n\ndef simplify_eq(equation, ops):\n \"\"\"\n Helper definition\n Loops through a list and calculates the math operations passed in\n \"\"\"\n # print 'ops in', equation, ops\n found = True\n eq = [x for x in equation]\n while found:\n temp_eq = []\n found = False\n for x in range(len(eq)):\n if eq[x] in ops:\n temp_eq.append(eval_op(eq[x - 1], eq[x + 1], eq[x]))\n \n try:\n temp_eq.extend(eq[x + 2:])\n except:\n pass\n \n found = True\n break\n elif x < len(eq) - 1:\n if eq[x + 1] in ops:\n continue\n else:\n temp_eq.append(eq[x])\n else:\n temp_eq.append(eq[x])\n \n eq = temp_eq\n # print eq\n # print 'ops out', eq\n return eq\n\ndef eval_func(func, num):\n \"\"\"\n Computes the given mathmatical function\n \"\"\"\n if func == 'sqrt':\n return math.sqrt(float(num))\n elif func == 'sin':\n return math.sin(float(num))\n elif func == 'cos':\n return math.cos(float(num))\n elif func == 'tan':\n return math.tan(float(num))\n elif func == 'ln':\n return math.log(float(num))\n elif func == 'e':\n return math.exp(float(num))\n\ndef eval_op(num1, num2, op):\n \"\"\"\n Computes the given mathmatical operation\n \"\"\"\n if op == '+':\n return float(num1) + float(num2)\n elif op == '-':\n return float(num1) - float(num2)\n elif op == '*':\n return float(num1) * float(num2)\n elif op == '/':\n return float(num1) / float(num2)\n elif op == '**':\n return float(num1) ** float(num2)\n\ndef eval_string(eq_str):\n eq = NestedParser().parse(eq_str)\n return eq_eval(eq)\n\n# eval_string = '(4 + 6) / sqrt((2 - 1) / (1 + 2))'\n# # eval_string = '1 + 2 + 3 + 4 + 5'\n# q = NestedParser()\n# p = q.parse(eval_string)\n# print p\n#\n# print eq_eval(p)","sub_path":"String-Calc/string_calc.py","file_name":"string_calc.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"606719266","text":"# ------------------------------------------------------------\n# \"THE BEERWARE LICENSE\" (Revision 42):\n# and wrote this code.\n# As long as you retain this notice, you can do whatever you want\n# with this stuff. If we meet someday, and you think this stuff\n# is worth it, you can buy us a beer in return.\n# --Sergey Ovchinnikov and Peter Koo\n# ------------------------------------------------------------\n\n# IMPORTANT, only tested using PYTHON 3!\nfrom typing import Dict, Tuple, Optional\nimport contextlib\nimport os\nimport h5py\nimport gzip\nimport string\nimport numpy as np\nimport tensorflow as tf\nfrom scipy import stats\nfrom scipy.spatial.distance import pdist, squareform\n\n\n# ===============================================================================\n# Setup the alphabet\n# note: if you are modifying the alphabet\n# make sure last character is \"-\" (gap)\n# ===============================================================================\nalphabet = \"ARNDCQEGHILKMFPSTWYV-\"\ninvalid_state_index = alphabet.index('-')\nstates = len(alphabet)\na2n: Dict[str, int] = {a: n for n, a in enumerate(alphabet)}\n\n\n# ===============================================================================\n# Functions for prepping the MSA (Multiple sequence alignment) from fasta/a2m file\n# ===============================================================================\n\n\nclass SequenceLengthException(Exception):\n\n def __init__(self, protein_id: str):\n super().__init__(\"Sequence length was too long for protein {}\".format(protein_id))\n\n\nclass TooFewValidMatchesException(Exception):\n\n def __init__(self, protein_id: str = None):\n message = 'There were too few valid matches'\n if protein_id is not None:\n message += ' for protein {}'.format(protein_id)\n super().__init__(message)\n\n\ndef to_header_and_sequence(block):\n header, *seq = block.split('\\n')\n seq = ''.join(seq)\n return header, seq\n\n\ndef parse_fasta(filename: str, limit: int = -1, max_seq_len: Optional[int] = None) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Function to parse a fasta/a2m file.\n\n Args:\n filename (str): filename of fasta/a2m file to load\n sequence_at_end (bool): indicates whether the actual sequence is at the beginning/end of file\n limit (int): DEPRECATED, used to limit the number of sequence matches. Need to account for\n sequence_at_end argument to reintroduce.\n\n Returns:\n np.ndarray: array of headers\n np.ndarray: array of sequences\n \"\"\"\n\n filetype = os.path.basename(filename).split('.', maxsplit=1)[1]\n assert filetype in {'a2m', 'a2m.gz', 'fasta', 'fas', 'fasta.gz', 'fas.gz'}\n is_a2m = 'a2m' in filetype\n is_compressed = 'gz' in filetype\n\n def get_file_obj():\n return gzip.open(filename) if is_compressed else open(filename)\n\n delete_lowercase_trans = ''.maketrans('', '', string.ascii_lowercase) # type: ignore\n with get_file_obj() as f:\n fasta = f.read()\n if isinstance(fasta, bytes):\n fasta = fasta.decode()\n fasta = fasta.strip('>').translate(delete_lowercase_trans).split('>')\n\n if max_seq_len is not None:\n seqlen = len(to_header_and_sequence(fasta[0])[1])\n if seqlen > max_seq_len:\n raise SequenceLengthException(filename)\n\n if 0 < limit < len(fasta):\n headers_and_seqs = [to_header_and_sequence(block) for block in fasta[:limit]]\n if is_a2m:\n last = to_header_and_sequence(fasta[-1])\n headers_and_seqs = [last] + headers_and_seqs\n else:\n headers_and_seqs = [to_header_and_sequence(block) for block in fasta]\n if is_a2m:\n headers_and_seqs = headers_and_seqs[-1:] + headers_and_seqs[:-1]\n\n header, sequence = zip(*headers_and_seqs)\n\n return np.array(header), np.array(sequence)\n\n\ndef filt_gaps(msa: np.ndarray, gap_cutoff: float = 0.5) -> Tuple[np.ndarray, np.ndarray]:\n '''filters alignment to remove gappy positions'''\n non_gaps = np.where(np.mean(msa == 20, 0) < gap_cutoff)[0]\n return msa[:, non_gaps], non_gaps\n\n\ndef get_eff(msa: np.ndarray, eff_cutoff: float = 0.8) -> np.ndarray:\n '''compute effective weight for each sequence'''\n # pairwise identity\n msa_sm = 1.0 - squareform(pdist(msa, \"hamming\"))\n\n # weight for each sequence\n msa_w = 1 / np.sum(msa_sm >= eff_cutoff, -1)\n\n return msa_w\n\n\ndef mk_msa(seqs: np.ndarray, gap_cutoff: float = 0.5):\n '''converts list of sequences to msa'''\n\n assert all(len(seq) == len(seqs[0]) for seq in seqs)\n msa_ori_list = [[a2n.get(aa, invalid_state_index) for aa in seq] for seq in seqs]\n msa_ori = np.array(msa_ori_list)\n\n # remove positions with more than > 50% gaps\n msa, v_idx = filt_gaps(msa_ori, gap_cutoff)\n\n if len(v_idx) == 0:\n raise TooFewValidMatchesException()\n\n # compute effective weight for each sequence\n msa_weights = get_eff(msa, 0.8)\n\n # compute effective number of sequences\n ncol = msa.shape[1] # length of sequence\n w_idx = v_idx[np.stack(np.triu_indices(ncol, 1), -1)]\n\n return {\"msa_ori\": msa_ori,\n \"msa\": msa,\n \"weights\": msa_weights,\n \"neff\": np.sum(msa_weights),\n \"v_idx\": v_idx,\n \"w_idx\": w_idx,\n \"nrow\": msa.shape[0],\n \"ncol\": ncol,\n \"ncol_ori\": msa_ori.shape[1]}\n\n\n# ===============================================================================\n# GREMLIN\n# ===============================================================================\n\ndef sym_w(w):\n '''symmetrize input matrix of shape (x,y,x,y)'''\n x = w.shape[0]\n w = w * np.reshape(1 - np.eye(x), (x, 1, x, 1))\n w = w + tf.transpose(w, [2, 3, 0, 1])\n return w\n\n\ndef opt_adam(loss, name, var_list=None, lr=1.0, b1=0.9, b2=0.999, b_fix=False):\n # adam optimizer\n # Note: this is a modified version of adam optimizer. More specifically, we replace \"vt\"\n # with sum(g*g) instead of (g*g). Furthmore, we find that disabling the bias correction\n # (b_fix=False) speeds up convergence for our case.\n\n if var_list is None:\n var_list = tf.trainable_variables()\n\n gradients = tf.gradients(loss, var_list)\n if b_fix:\n t = tf.Variable(0.0, \"t\")\n opt = []\n for n, (x, g) in enumerate(zip(var_list, gradients)):\n if g is not None:\n ini = dict(initializer=tf.zeros_initializer, trainable=False)\n mt = tf.get_variable(name + \"_mt_\" + str(n), shape=list(x.shape), **ini)\n vt = tf.get_variable(name + \"_vt_\" + str(n), shape=[], **ini)\n\n mt_tmp = b1 * mt + (1 - b1) * g\n vt_tmp = b2 * vt + (1 - b2) * tf.reduce_sum(tf.square(g))\n lr_tmp = lr / (tf.sqrt(vt_tmp) + 1e-8)\n\n if b_fix:\n lr_tmp = lr_tmp * tf.sqrt(1 - tf.pow(b2, t)) / (1 - tf.pow(b1, t))\n\n opt.append(x.assign_add(-lr_tmp * mt_tmp))\n opt.append(vt.assign(vt_tmp))\n opt.append(mt.assign(mt_tmp))\n\n if b_fix:\n opt.append(t.assign_add(1.0))\n return(tf.group(opt))\n\n\ndef GREMLIN(msa, opt_type=\"adam\", opt_iter=100, opt_rate=1.0, batch_size=None):\n\n ##############################################################\n # SETUP COMPUTE GRAPH\n ##############################################################\n # kill any existing tensorflow graph\n tf.reset_default_graph()\n\n ncol = msa[\"ncol\"] # length of sequence\n\n # msa (multiple sequence alignment)\n MSA = tf.placeholder(tf.int32, shape=(None, ncol), name=\"msa\")\n\n # one-hot encode msa\n OH_MSA = tf.one_hot(MSA, states)\n\n # msa weights\n MSA_weights = tf.placeholder(tf.float32, shape=(None,), name=\"msa_weights\")\n\n # 1-body-term of the MRF\n V = tf.get_variable(name=\"V\",\n shape=[ncol, states],\n initializer=tf.zeros_initializer)\n\n # 2-body-term of the MRF\n W = tf.get_variable(name=\"W\",\n shape=[ncol, states, ncol, states],\n initializer=tf.zeros_initializer)\n\n # symmetrize W\n W = sym_w(W)\n\n def L2(x):\n return tf.reduce_sum(tf.square(x))\n\n ########################################\n # V + W\n ########################################\n VW = V + tf.tensordot(OH_MSA, W, 2)\n\n # hamiltonian\n H = tf.reduce_sum(tf.multiply(OH_MSA, VW), axis=2)\n # local Z (parition function)\n Z = tf.reduce_logsumexp(VW, axis=2)\n\n # Psuedo-Log-Likelihood\n PLL = tf.reduce_sum(H - Z, axis=1)\n\n # Regularization\n L2_V = 0.01 * L2(V)\n L2_W = 0.01 * L2(W) * 0.5 * (ncol - 1) * (states - 1)\n\n # loss function to minimize\n loss = -tf.reduce_sum(PLL * MSA_weights) / tf.reduce_sum(MSA_weights)\n loss = loss + (L2_V + L2_W) / msa[\"neff\"]\n\n ##############################################################\n # MINIMIZE LOSS FUNCTION\n ##############################################################\n if opt_type == \"adam\":\n opt = opt_adam(loss, \"adam\", lr=opt_rate)\n\n # generate input/feed\n def feed(feed_all=False):\n if batch_size is None or feed_all:\n return {MSA: msa[\"msa\"], MSA_weights: msa[\"weights\"]}\n else:\n idx = np.random.randint(0, msa[\"nrow\"], size=batch_size)\n return {MSA: msa[\"msa\"][idx], MSA_weights: msa[\"weights\"][idx]}\n\n # optimize!\n with tf.Session() as sess:\n # initialize variables V and W\n sess.run(tf.global_variables_initializer())\n\n # initialize V\n msa_cat = tf.keras.utils.to_categorical(msa[\"msa\"], states)\n pseudo_count = 0.01 * np.log(msa[\"neff\"])\n V_ini = np.log(np.sum(msa_cat.T * msa[\"weights\"], -1).T + pseudo_count)\n V_ini = V_ini - np.mean(V_ini, -1, keepdims=True)\n sess.run(V.assign(V_ini))\n\n # compute loss across all data\n def get_loss():\n round(sess.run(loss, feed(feed_all=True)) * msa[\"neff\"], 2)\n # print(\"starting\", get_loss())\n\n if opt_type == \"lbfgs\":\n lbfgs = tf.contrib.opt.ScipyOptimizerInterface\n opt = lbfgs(loss, method=\"L-BFGS-B\", options={'maxiter': opt_iter})\n opt.minimize(sess, feed(feed_all=True))\n\n if opt_type == \"adam\":\n for i in range(opt_iter):\n sess.run(opt, feed())\n # if (i + 1) % int(opt_iter / 10) == 0:\n # print(\"iter\", (i + 1), get_loss())\n\n # save the V and W parameters of the MRF\n V_ = sess.run(V)\n W_ = sess.run(W)\n\n # only return upper-right triangle of matrix (since it's symmetric)\n tri = np.triu_indices(ncol, 1)\n W_ = W_[tri[0], :, tri[1], :]\n\n mrf = {\"v\": V_,\n \"w\": W_,\n \"v_idx\": msa[\"v_idx\"],\n \"w_idx\": msa[\"w_idx\"]}\n\n return mrf\n\n\n# ===============================================================================\n# Explore the contact map\n# ===============================================================================\n\n# For contact prediction, the W matrix is reduced from LxLx21x21 to LxL matrix\n# (by taking the L2norm for each of the 20x20). In the code below, you can access\n# this as mtx[\"raw\"]. Further correction (average product correction) is then performed\n# to the mtx[\"raw\"] to remove the effects of entropy, mtx[\"apc\"]. The relative\n# ranking of mtx[\"apc\"] is used to assess importance. When there are enough effective\n# sequences (>1000), we find that the top 1.0L contacts are ~90% accurate! When the\n# number of effective sequences is lower, NN can help clean noise and fill in missing\n# contacts.\n\n# Functions for extracting contacts from MRF\n###################\n\n\ndef normalize(x):\n x = stats.boxcox(x - np.amin(x) + 1.0)[0]\n x_mean = np.mean(x)\n x_std = np.std(x)\n return((x - x_mean) / x_std)\n\n\ndef get_mtx(mrf):\n '''get mtx given mrf'''\n # l2norm of 20x20 matrices (note: we ignore gaps)\n raw = np.sqrt(np.sum(np.square(mrf[\"w\"][:, :-1, :-1]), (1, 2)))\n raw_sq = squareform(raw)\n\n # apc (average product correction)\n ap_sq = np.sum(raw_sq, 0, keepdims=True) * np.sum(raw_sq, 1, keepdims=True) / np.sum(raw_sq)\n apc = squareform(raw_sq - ap_sq, checks=False)\n\n mtx = {\"i\": mrf[\"w_idx\"][:, 0],\n \"j\": mrf[\"w_idx\"][:, 1],\n \"raw\": raw,\n \"apc\": apc,\n \"zscore\": normalize(apc)}\n return mtx\n\n\ndef run_gremlin(input_file: str, output_file: Optional[h5py.File] = None, max_seq_len: int = 700):\n # ===============================================================================\n # PREP MSA\n # ===============================================================================\n names, seqs = parse_fasta(input_file, limit=1000, max_seq_len=700)\n\n try:\n msa = mk_msa(seqs)\n except TooFewValidMatchesException:\n try:\n names, seqs = parse_fasta(input_file)\n msa = mk_msa(seqs)\n except TooFewValidMatchesException:\n raise TooFewValidMatchesException(input_file)\n\n mrf = GREMLIN(msa)\n mtx = get_mtx(mrf)\n\n this_protein_id = os.path.basename(input_file).split('.')[0]\n\n if output_file is not None:\n protein_group = output_file.create_group(this_protein_id)\n for key in ['v', 'w', 'raw', 'apc', 'v_idx', 'w_idx']:\n if key in mrf:\n array = mrf[key]\n elif key in mtx:\n array = mtx[key]\n dtype = array.dtype\n if dtype in [np.float32, np.float64]:\n array = np.asarray(array, np.float32)\n dtype_str = 'f'\n elif dtype in [np.int32, np.int64]:\n array = np.asarray(array, np.int32)\n dtype_str = 'i'\n else:\n raise ValueError(\"Unknown dtype {}\".format(dtype))\n\n protein_group.create_dataset(\n key, dtype=dtype_str, data=array, compression='gzip')\n else:\n return msa, mrf, mtx\n\n\nif __name__ == '__main__':\n import argparse\n from glob import glob\n from tqdm import tqdm\n\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n\n parser = argparse.ArgumentParser(description='Runs Gremlin_TF to output mrf from fasta/a2m file')\n # parser.add_argument('input_file', type=str, help='input fasta file')\n parser.add_argument('output_file', type=str, help='output h5py file')\n\n args = parser.parse_args()\n\n files = glob('/big/davidchan/roshan/raw/**a2m.gz')\n with tqdm(total=len(files)) as progress_bar:\n for shard in range(len(files) // 1000):\n output_file = args.output_file.split('.')[0]\n curr_out_file = output_file + f'_{shard}.h5'\n if os.path.exists(curr_out_file):\n progress_bar.update(1000)\n continue\n\n this_shard_files = files[1000 * shard:1000 * (shard + 1)]\n with h5py.File(curr_out_file, \"a\") as outfile:\n for input_file in this_shard_files:\n this_protein_id = os.path.basename(input_file).split('.')[0]\n if this_protein_id in outfile:\n progress_bar.update()\n continue\n\n with contextlib.suppress(SequenceLengthException):\n run_gremlin(input_file, outfile)\n\n progress_bar.update()\n","sub_path":"GREMLIN_TF.py","file_name":"GREMLIN_TF.py","file_ext":"py","file_size_in_byte":15356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499169895","text":"import pandas as pd\nimport os\nimport time\npath = '/Users/zhouzhirui/data/zillow/'\nos.chdir(path)\n\n\nclass Load(object):\n load_featinfo_flag = False\n\n def load_train(self):\n t1 = time.time()\n print('load train.csv ...')\n train = pd.read_csv('train_2016_v2.csv', parse_dates=['transactiondate'])\n print('train datashape: %d X %d ,cost time: %.2fs'%(train.shape[0], train.shape[1], time.time() - t1))\n return train\n \n def load_property(self):\n if self.load_featinfo_flag:\n t1 = time.time()\n print('load properties_2016.csv ..')\n feat = pd.read_csv('properties_2016.csv')\n # rename\n name_map_dict = dict(zip(self.featinfo.Feature, self.featinfo.map_name))\n feat.columns = map(lambda x: name_map_dict[x], feat.columns)\n feat = feat.drop('G_fips', axis=1)\n print('properties datashape: %d X %d ,cost time: %.2fs'%(feat.shape[0], feat.shape[1], time.time() - t1))\n return feat\n else:\n print('请先load_featinfo')\n \n def load_submission(self):\n t1 = time.time()\n print('load sample_submission.csv ..')\n submission = pd.read_csv('sample_submission.csv')\n print('submission datashape: %d X %d ,cost time: %.2fs'%(submission.shape[0], submission.shape[1], time.time() - t1))\n return submission\n\n def load_featinfo(self):\n t1 = time.time()\n print('load featureInfo.csv ..')\n featinfo = pd.read_csv('featureInfo.csv')\n print('featinfo datashape: %d X %d ,cost time: %.2fs'%(featinfo.shape[0], featinfo.shape[1], time.time() - t1))\n self.load_featinfo_flag = True\n self.featinfo = featinfo\n return featinfo","sub_path":"Kaggle-Zillow/pysrc/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"77759600","text":"# -*- coding=UTF-8 -*-\n# pyright: strict, reportTypeCommentUsage=none\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport nuke\n\nfrom wulifang._util import cast_str, assert_isinstance, cast_text\n\n\ndef rename_all_nodes_by_backdrop():\n \"\"\"Rename all nodes by them belonged backdrop node .\"\"\"\n\n for backdrop in nuke.allNodes(cast_str(\"BackdropNode\")):\n nodes = assert_isinstance(backdrop, nuke.BackdropNode).getNodes()\n title = (\n cast_text(backdrop[cast_str(\"label\")].value())\n .split((\"\\n\"))[0]\n .split((\" \"))[0]\n )\n if not title:\n continue\n for node in nodes:\n if \"_\" in cast_text(node.name()) or (\n nuke.exists(cast_str(cast_text(node.name()) + \".disable\"))\n and node[cast_str(\"disable\")].value()\n ):\n continue\n if cast_text(node.Class()) == \"Group\":\n name = cast_text(node.name()).rstrip(\"0123456789\")\n node.setName(\n cast_str(\"%s_%s_1\" % (name, title)), updateExpressions=True\n )\n else:\n node.setName(\n cast_str(\"%s_%s_1\" % (cast_text(node.Class()), title)),\n updateExpressions=True,\n )\n","sub_path":"wulifang/nuke/_rename_all_nodes_by_backdrop.py","file_name":"_rename_all_nodes_by_backdrop.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"209933061","text":"import json\nfrom flask import render_template\nfrom happex import app\nimport matplotlib\nimport io\n\nmatplotlib.use(\"agg\")\n\nfrom matplotlib import pyplot\n\n\ndef best_fit(X, Y):\n xbar = sum(X) / len(X)\n ybar = sum(Y) / len(Y)\n n = len(X) # or len(Y)\n numer = sum([xi * yi for xi, yi in zip(X, Y)]) - n * xbar * ybar\n denum = sum([xi ** 2 for xi in X]) - n * xbar ** 2\n b = numer / denum\n a = ybar - b * xbar\n return a, b\n\n\ndef line_plot(query, xlabel, ylabel):\n data = io.BytesIO()\n\n x, y = app.db.get_graph(query)\n a, b = best_fit(x, y)\n\n pyplot.scatter(x, y)\n yfit = [a + b * xi for xi in x]\n\n pyplot.plot(x, yfit)\n pyplot.xlabel(xlabel)\n pyplot.ylabel(ylabel)\n pyplot.savefig(data, format=\"png\")\n pyplot.clf()\n return data\n\n\n@app.route(\"/\")\ndef index():\n scores = [[x[0], float(x[1])] for x in app.db.get_all_scores()]\n return render_template(\"index.html\", data=scores)\n\n\n@app.route(\"/social_ladder\")\ndef social_ladder():\n return render_template(\"social_ladder.html\")\n\n\n@app.route(\"/social_ladder/graph.png\")\ndef get_ladder_graph():\n data = line_plot(\n \"select lifeladder, socialsupport from happy where lifeladder is not null and socialsupport is not null;\",\n \"Social Support\",\n \"Life Ladder\",\n )\n return data.getvalue(), 200, {\"Content-Type\": \"image/png\"}\n\n\n@app.route(\"/confidence\")\ndef confidence():\n return render_template(\"confidence.html\")\n\n\n@app.route(\"/confidence/graph.png\")\ndef get_conf_graph():\n data = line_plot(\n \"select lifeladder, confidence from happy where lifeladder is not null and confidence is not null;\",\n \"Confidence in Government\",\n \"Life Ladder\",\n )\n return data.getvalue(), 200, {\"Content-Type\": \"image/png\"}\n\n\n@app.route(\"/democracy\")\ndef democracy():\n return render_template(\"democracy.html\")\n\n\n@app.route(\"/democracy/graph.png\")\ndef get_dem_graph():\n data = line_plot(\n \"select lifeladder, democraticquality from happy where lifeladder is not null and democraticquality is not null;\",\n \"Democratic Quality\",\n \"Life Ladder\",\n )\n return data.getvalue(), 200, {\"Content-Type\": \"image/png\"}\n\n\n@app.route(\"/generosity\")\ndef generosity():\n return render_template(\"generosity.html\")\n\n\n@app.route(\"/generosity/graph.png\")\ndef get_gen_graph():\n data = line_plot(\n \"select paffect, generosity from happy where paffect is not null and generosity is not null;\",\n \"Positive Affect\",\n \"Generosity\",\n )\n return data.getvalue(), 200, {\"Content-Type\": \"image/png\"}\n\n\n@app.route(\"/naffect\")\ndef naffect():\n return render_template(\"naffect.html\")\n\n\n@app.route(\"/naffect/graph.png\")\ndef get_naffect_graph():\n data = line_plot(\n \"select naffect, generosity from happy where naffect is not null and generosity is not null;\",\n \"Negative Affect\",\n \"Generosity\",\n )\n return data.getvalue(), 200, {\"Content-Type\": \"image/png\"}\n\n\n@app.route(\"/country/\")\ndef get_country(ctry):\n rows = app.db.get_country(ctry)\n\n if not rows:\n return f\"

    No data available

    \"\n\n average = lambda i: round(\n sum([x[i] for x in rows if x[i] is not None])\n / len([x[i] for x in rows if x[i] is not None]),\n 2,\n )\n\n name = rows[0][0]\n\n # Add world averages for context?\n ladder = average(2)\n lifeexpect = average(3)\n socialsupport = average(4)\n generosity = average(5)\n paffect = average(6)\n delivery = average(7)\n corruption = average(8)\n\n return render_template(\n \"country.html\",\n code=ctry,\n name=name,\n ladder=ladder,\n lifeexpect=lifeexpect,\n socialsupport=socialsupport,\n generosity=generosity,\n paffect=paffect,\n deliveryquality=delivery,\n corruption=corruption,\n )\n\n\n@app.route(\"/country//graph.png\")\ndef get_country_graph(ctry):\n rows = app.db.get_country(ctry)\n\n data = io.BytesIO()\n\n pyplot.plot([x[1] for x in rows], [float(x[2]) for x in rows])\n pyplot.axis([2005, 2018, 0, 10])\n pyplot.savefig(data, format=\"png\")\n pyplot.clf()\n\n return data.getvalue(), 200, {\"Content-Type\": \"image/png\"}\n","sub_path":"happex/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"459056551","text":"import logging\nimport os.path\n\nimport requests\nfrom wand.image import Image\nfrom wand.exceptions import WandException\n\nfrom .backend import Base, db\n\n\nlog = logging.getLogger(__name__)\n\nFAVICON_SIZE = (16, 16)\nFAVICON_MEDIA_TYPES = ('image/vnd.microsoft.icon', 'image/x-icon')\nAUTO_DETECT = None\n\n\ndef favicon_format(response):\n \"\"\"Return favicon format from the response.\"\"\"\n content_type = response.headers.get('Content-Type')\n if content_type in FAVICON_MEDIA_TYPES:\n return 'ico'\n extension = os.path.splitext(response.url)[1].lstrip('.')\n if extension == 'ico':\n return 'ico'\n return AUTO_DETECT\n\n\nclass Favicon(Base):\n \"\"\"An icon of the feed.\"\"\"\n __tablename__ = 'favicons'\n\n feed_id = db.Column(db.Integer, db.ForeignKey('feeds.id'), nullable=False)\n #: Raw image data.\n image = db.Column(db.Binary)\n\n @classmethod\n def from_url(cls, url):\n \"\"\"Create a :class:`Favicon` instance from the given URL.\"\"\"\n log.debug('fetch favicon: %s', url)\n\n try:\n response = requests.get(url)\n response.raise_for_status()\n except requests.exceptions.RequestException as e:\n log.warning('failed to fetch favicon: %s %r', url, e)\n return None\n\n if response.status_code != 200:\n return None\n\n try:\n with Image(blob=response.content,\n format=favicon_format(response)) as image:\n image.sequence.current_index = 0\n image.resize(*FAVICON_SIZE)\n image.strip()\n blob = image.make_blob('png')\n return cls(image=blob)\n except WandException as e:\n log.warning('invalid favicon: %s %r', url, e)\n log.warning('favicon: %s %d %s',\n response.url,\n response.status_code,\n response.headers.get('Content-Type'))\n return None\n","sub_path":"feedvest/models/favicon.py","file_name":"favicon.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"453023021","text":"\"\"\"Common utils for Dyson tests.\"\"\"\n\nfrom typing import Optional, Type\nfrom unittest import mock\nfrom unittest.mock import MagicMock\n\nfrom libpurecool.dyson_device import DysonDevice\nfrom libpurecool.dyson_pure_cool import FanSpeed\n\nfrom homeassistant.core import HomeAssistant\n\nSERIAL = \"XX-XXXXX-XX\"\nNAME = \"Temp Name\"\nENTITY_NAME = \"temp_name\"\n\nBASE_PATH = \"homeassistant.components.dyson\"\n\n\ndef load_mock_device(device: DysonDevice) -> None:\n \"\"\"Load the mock with default values so it doesn't throw errors.\"\"\"\n device.serial = SERIAL\n device.name = NAME\n device.connect = mock.Mock(return_value=True)\n device.auto_connect = mock.Mock(return_value=True)\n device.state.hepa_filter_state = 0\n device.state.carbon_filter_state = 0\n device.state.speed = FanSpeed.FAN_SPEED_1.value\n device.state.oscillation_angle_low = \"000\"\n device.state.oscillation_angle_high = \"000\"\n device.state.filter_life = \"000\"\n device.state.heat_target = 200\n if hasattr(device, \"environmental_state\"):\n device.environmental_state.particulate_matter_25 = \"0000\"\n device.environmental_state.particulate_matter_10 = \"0000\"\n device.environmental_state.nitrogen_dioxide = \"0000\"\n device.environmental_state.volatil_organic_compounds = \"0000\"\n device.environmental_state.volatile_organic_compounds = \"0000\"\n device.environmental_state.temperature = 250\n\n\ndef get_basic_device(spec: Type[DysonDevice]) -> DysonDevice:\n \"\"\"Return a basic device with common fields filled out.\"\"\"\n device = MagicMock(spec=spec)\n load_mock_device(device)\n return device\n\n\nasync def async_update_device(\n hass: HomeAssistant, device: DysonDevice, state_type: Optional[Type] = None\n) -> None:\n \"\"\"Update the device using callback function.\"\"\"\n callback = device.add_message_listener.call_args[0][0]\n message = MagicMock(spec=state_type)\n await hass.async_add_executor_job(callback, message)\n await hass.async_block_till_done()\n","sub_path":"tests/components/dyson/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"544981871","text":"from docutils import nodes\nfrom sphinx.application import Sphinx\nfrom sphinx.domains import Domain\nfrom sphinx.roles import XRefRole\n\n\nclass RustDomain(Domain):\n \"\"\"\n Domain allowing easy references to Rust objects.\n \"\"\"\n name = 'rust'\n label = 'Rust'\n\n roles = {\n 'mod': XRefRole(),\n 'const': XRefRole(),\n 'fn': XRefRole(),\n 'struct': XRefRole(),\n }\n\n uri_base = '/apidocs/'\n\n def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):\n return []\n\n def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):\n parts = target.split('::')\n if parts[0][0] == '~':\n parts[0] = parts[0][1:]\n contnode.children[0] = nodes.Text(parts[-1])\n\n if typ == 'mod':\n uri = '/'.join(parts)\n elif typ == 'struct':\n uri = '/'.join(parts[:-1])\n uri += f'/struct.{parts[-1]}.html'\n elif typ == 'fn':\n uri = '/'.join(parts[:-1])\n uri += f'/fn.{parts[-1]}.html'\n\n uri = self.uri_base + uri\n\n node = nodes.reference('', '', classes=['rust', typ])\n node['refuri'] = uri\n node += contnode\n\n return node\n","sub_path":"bookdata/sphinx/rust.py","file_name":"rust.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"242965221","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport time\nimport json\nfrom flask import Flask, render_template, request\nfrom flask import redirect, url_for\nfrom flask import make_response\n\n\n\napp = Flask(__name__)\napp.debug = True\n# redis = redis.Redis()\n@app.route('/')\ndef hello():\n return('hello world')\n\n@app.route(\"/interpriter\", methods=[\"GET\"])\ndef login():\n global name,sex,occupation,language,place\n if (request.args.get(\"name\") and request.args.get(\"sex\") and request.args.get(\"occupation\") and request.args.get(\"lang\") and request.args.get(\"place\")):\n name = request.args.get(\"name\")\n sex = request.args.get(\"sex\")\n occupation = request.args.get(\"occupation\")\n language = request.args.get(\"lang\")\n place = request.args.get(\"place\")#登録情報が来る\n print(\"login:\", name)\n\n with open('static/json/interpriter.json','rt') as fin:\n interpriter_data = fin.read()\n\n interpriter_dict = json.loads(interpriter_data)\n interpriter_dict[name] = {'sex':sex,'occupation':occupation,'lang':language,'place':place}\n interpriter_json = json.dumps(interpriter_dict)\n\n with open('static/json/interpriter.json','wt') as fout:\n fout.write(interpriter_json)\n\n return redirect(url_for(\"index\"))\n return render_template(\"login.html\")\n\n@app.route(\"/visitor\", methods=[\"GET\"])\ndef login2():\n global name,sex,occupation,language,place\n if (request.args.get(\"name\") and request.args.get(\"sex\") and request.args.get(\"occupation\") and request.args.get(\"lang\") and request.args.get(\"place\")):\n name = request.args.get(\"name\")\n sex = request.args.get(\"sex\")\n occupation = request.args.get(\"occupation\")\n language = request.args.get(\"lang\")\n place = request.args.get(\"place\")#登録情報が来る\n print(\"login:\", name)\n\n with open('static/json/client.json','rt') as fin:\n client_data = fin.read()\n\n client_dict = json.loads(client_data)\n client_dict[name] = {'sex':sex,'occupation':occupation,'lang':language,'place':place}\n client_json = json.dumps(client_dict)\n\n with open('static/json/client.json','wt') as fout:\n fout.write(client_json)\n\n return redirect(url_for(\"visitor/index\"))\n return render_template(\"login_visitor.html\")\n\n\n@app.route(\"/interpriter/index\")\ndef interpriter():\n return render_template(\"index.html\"\n )\n\n\n@app.route(\"/visitor/index\")\ndef visitor():\n return render_template(\"index_visitor.html\",\n )\n\napp.run(port = 9999, debug =True)\n","sub_path":"webapp/interpriter.py","file_name":"interpriter.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"531795095","text":"# Constants\nNORTH = 'n'\nEAST = 'e'\nSOUTH = 's'\nWEST = 'w'\n\ndef move(direction, col, row):\n if direction == NORTH:\n row += 1\n elif direction == SOUTH:\n row -= 1\n elif direction == EAST:\n col += 1\n elif direction == WEST:\n col -= 1\n return(col, row) \n\ndef is_victory(col, row):\n return col == 3 and row == 1 # (3,1)\n\ndef print_directions(directions_str):\n print(\"You can travel: \", end='')\n first = True\n for ch in directions_str:\n if not first:\n print(\" or \", end='')\n if ch == NORTH:\n print(\"(N)orth\", end='')\n elif ch == EAST:\n print(\"(E)ast\", end='')\n elif ch == SOUTH:\n print(\"(S)outh\", end='')\n elif ch == WEST:\n print(\"(W)est\", end='')\n first = False\n print(\".\")\n \ndef find_directions(col, row):\n if col == 1 and row == 1: # (1,1)\n valid_directions = NORTH\n elif col == 1 and row == 2: # (1,2)\n valid_directions = NORTH+EAST+SOUTH\n elif col == 1 and row == 3: # (1,3)\n valid_directions = EAST+SOUTH\n elif col == 2 and row == 1: # (2,1)\n valid_directions = NORTH\n elif col == 2 and row == 2: # (2,2)\n valid_directions = SOUTH+WEST\n elif col == 2 and row == 3: # (2,3)\n valid_directions = EAST+WEST\n elif col == 3 and row == 2: # (3,2)\n valid_directions = NORTH+SOUTH\n elif col == 3 and row == 3: # (3,3)\n valid_directions = SOUTH+WEST\n return valid_directions\n\ndef isLeverTile(x, y):\n ''' Returns wether the x, y tile is a lever tile or not'''\n if y == 2 or (x == 2 and y == 3):\n return True\n else:\n return False\n\ndef leverPull(coins):\n ''' Promts player to pull the lever, if they do 1 is added to their coin total. '''\n answer = input(\"Pull a lever (y/n): \")\n if answer.lower() == \"y\":\n coins += 1\n print(\"You received 1 coins, your total is now {}.\".format(coins))\n return coins\n \n\ndef play():\n # The main program starts here\n victory = False\n row = 1\n col = 1\n coins = 0\n\n valid_directions = NORTH\n print_directions(valid_directions)\n\n while not victory:\n direction = input(\"Direction: \")\n direction = direction.lower()\n \n if not direction in valid_directions:\n print(\"Not a valid direction!\")\n else:\n col, row = move(direction, col, row)\n\n if isLeverTile(col, row):\n coins = leverPull(coins)\n\n\n victory = is_victory(col, row)\n if victory:\n print(\"Victory!\")\n else:\n valid_directions = find_directions(col, row)\n print_directions(valid_directions)\n \nmain()","sub_path":"ttGitHub.py","file_name":"ttGitHub.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"169769847","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 29 21:50:59 2016\n\n@author: Muskan\n\"\"\"\n\nimport sys\nimport math\nimport random\nnum_hidden_units = 14\nnum_op_units = 10\nnum_ip_units = 28*28\n\ndef convert(imgf, labelf, outf, n):\n f = open(imgf, \"rb\")\n o = open(outf, \"w\")\n l = open(labelf, \"rb\")\n\n f.read(16)\n l.read(8)\n images = []\n\n for i in range(n):\n image = [ord(l.read(1))]\n for j in range(num_ip_units):\n image.append(ord(f.read(1)))\n images.append(image)\n\n for image in images:\n o.write(\",\".join(str(pix) for pix in image)+\"\\n\")\n f.close()\n o.close()\n l.close()\n\ndef main():\n\t\n\tconvert_flag = True\n\tif convert_flag:\n\t\tprint ('Start Conversion')\n\n\t\tconvert(\"Input/train-images.idx3-ubyte\", \"Input/train-labels.idx1-ubyte\",\n\t \"Input/mnist_train.csv\", 60000)\n\t\t\n\t\tconvert(\"Input/t10k-images.idx3-ubyte\", \"Input/t10k-labels.idx1-ubyte\",\n\t \"Input/mnist_test.csv\", 10000)\n\n\t\tprint ('Done Converting')\n\n\tsys.exit(1)\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"569758084","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_CH_MAQUINA(object):\n def setupUi(self, CH_MAQUINA):\n CH_MAQUINA.setObjectName(\"CH_MAQUINA\")\n CH_MAQUINA.resize(1366, 760)\n\n self.centralwidget = QtWidgets.QWidget(CH_MAQUINA)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)\n self.gridLayoutWidget.setGeometry(QtCore.QRect(9, -1, 341, 98))\n self.gridLayoutWidget.setObjectName(\"gridLayoutWidget\")\n self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)\n self.gridLayout.setContentsMargins(0, 0, 0, 0)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.acumulador = QtWidgets.QLabel(self.gridLayoutWidget)\n self.acumulador.setText(\"\")\n self.acumulador.setObjectName(\"acumulador\")\n self.gridLayout.addWidget(self.acumulador, 1, 1, 1, 1)\n self.instrucci = QtWidgets.QLabel(self.gridLayoutWidget)\n self.instrucci.setText(\"\")\n self.instrucci.setObjectName(\"instrucci\")\n self.gridLayout.addWidget(self.instrucci, 3, 1, 1, 1)\n self.label_4 = QtWidgets.QLabel(self.gridLayoutWidget)\n self.label_4.setObjectName(\"label_4\")\n self.gridLayout.addWidget(self.label_4, 4, 0, 1, 1)\n self.totalMemoria = QtWidgets.QLabel(self.gridLayoutWidget)\n self.totalMemoria.setText(\"\")\n self.totalMemoria.setObjectName(\"totalMemoria\")\n self.gridLayout.addWidget(self.totalMemoria, 5, 1, 1, 1)\n self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget)\n self.label_2.setObjectName(\"label_2\")\n self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)\n self.label_5 = QtWidgets.QLabel(self.gridLayoutWidget)\n self.label_5.setObjectName(\"label_5\")\n self.gridLayout.addWidget(self.label_5, 5, 0, 1, 1)\n self.pos_mem = QtWidgets.QLabel(self.gridLayoutWidget)\n self.pos_mem.setText(\"\")\n self.pos_mem.setObjectName(\"pos_mem\")\n self.gridLayout.addWidget(self.pos_mem, 2, 1, 1, 1)\n self.label_3 = QtWidgets.QLabel(self.gridLayoutWidget)\n self.label_3.setObjectName(\"label_3\")\n self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\n self.gridLayout.addItem(spacerItem, 0, 0, 1, 1)\n self.valor = QtWidgets.QLabel(self.gridLayoutWidget)\n self.valor.setText(\"\")\n self.valor.setObjectName(\"valor\")\n self.gridLayout.addWidget(self.valor, 4, 1, 1, 1)\n self.label = QtWidgets.QLabel(self.gridLayoutWidget)\n self.label.setObjectName(\"label\")\n self.gridLayout.addWidget(self.label, 1, 0, 1, 1)\n self.pbTotalMen = QtWidgets.QProgressBar(self.gridLayoutWidget)\n self.pbTotalMen.setProperty(\"value\", 24)\n self.pbTotalMen.setObjectName(\"pbTotalMen\")\n self.gridLayout.addWidget(self.pbTotalMen, 6, 0, 1, 1)\n self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)\n self.gridLayoutWidget_2.setGeometry(QtCore.QRect(380, 10, 491, 411))\n self.gridLayoutWidget_2.setObjectName(\"gridLayoutWidget_2\")\n self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)\n self.gridLayout_2.setContentsMargins(0, 0, 0, 0)\n self.gridLayout_2.setObjectName(\"gridLayout_2\")\n self.spinKernel = QtWidgets.QSpinBox(self.gridLayoutWidget_2)\n self.spinKernel.setMaximum(999)\n self.spinKernel.setProperty(\"value\", 79)\n self.spinKernel.setObjectName(\"spinKernel\")\n self.gridLayout_2.addWidget(self.spinKernel, 10, 0, 1, 1)\n self.label_11 = QtWidgets.QLabel(self.gridLayoutWidget_2)\n self.label_11.setObjectName(\"label_11\")\n self.gridLayout_2.addWidget(self.label_11, 9, 0, 1, 1)\n self.memoria = QtWidgets.QTableWidget(self.gridLayoutWidget_2)\n self.memoria.setObjectName(\"memoria\")\n self.memoria.setColumnCount(5)\n self.memoria.setRowCount(0)\n item = QtWidgets.QTableWidgetItem()\n self.memoria.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n self.memoria.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n self.memoria.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n self.memoria.setHorizontalHeaderItem(3, item)\n item = QtWidgets.QTableWidgetItem()\n self.memoria.setHorizontalHeaderItem(4, item)\n self.gridLayout_2.addWidget(self.memoria, 4, 0, 1, 1)\n self.label_9 = QtWidgets.QLabel(self.gridLayoutWidget_2)\n self.label_9.setObjectName(\"label_9\")\n self.gridLayout_2.addWidget(self.label_9, 0, 0, 1, 1)\n self.pbMemoria = QtWidgets.QProgressBar(self.gridLayoutWidget_2)\n self.pbMemoria.setProperty(\"value\", 24)\n self.pbMemoria.setObjectName(\"pbMemoria\")\n self.gridLayout_2.addWidget(self.pbMemoria, 8, 0, 1, 1)\n self.pbKernel = QtWidgets.QProgressBar(self.gridLayoutWidget_2)\n self.pbKernel.setProperty(\"value\", 24)\n self.pbKernel.setObjectName(\"pbKernel\")\n self.gridLayout_2.addWidget(self.pbKernel, 11, 0, 1, 1)\n self.label_10 = QtWidgets.QLabel(self.gridLayoutWidget_2)\n self.label_10.setObjectName(\"label_10\")\n self.gridLayout_2.addWidget(self.label_10, 5, 0, 1, 1)\n self.spinMemoria = QtWidgets.QSpinBox(self.gridLayoutWidget_2)\n self.spinMemoria.setSuffix(\"\")\n self.spinMemoria.setMinimum(0)\n self.spinMemoria.setMaximum(9999)\n self.spinMemoria.setProperty(\"value\", 100)\n self.spinMemoria.setObjectName(\"spinMemoria\")\n self.gridLayout_2.addWidget(self.spinMemoria, 6, 0, 1, 1)\n self.label_16 = QtWidgets.QLabel(self.gridLayoutWidget_2)\n self.label_16.setObjectName(\"label_16\")\n self.gridLayout_2.addWidget(self.label_16, 7, 0, 1, 1)\n self.gridLayoutWidget_4 = QtWidgets.QWidget(self.centralwidget)\n self.gridLayoutWidget_4.setGeometry(QtCore.QRect(890, 10, 389, 85))\n self.gridLayoutWidget_4.setObjectName(\"gridLayoutWidget_4\")\n self.gridLayout_4 = QtWidgets.QGridLayout(self.gridLayoutWidget_4)\n self.gridLayout_4.setContentsMargins(0, 0, 0, 0)\n self.gridLayout_4.setObjectName(\"gridLayout_4\")\n self.label_22 = QtWidgets.QLabel(self.gridLayoutWidget_4)\n self.label_22.setObjectName(\"label_22\")\n self.gridLayout_4.addWidget(self.label_22, 1, 1, 1, 1)\n self.label_21 = QtWidgets.QLabel(self.gridLayoutWidget_4)\n self.label_21.setObjectName(\"label_21\")\n self.gridLayout_4.addWidget(self.label_21, 1, 0, 1, 1)\n\n self.encender = QtWidgets.QPushButton(self.gridLayoutWidget_4)\n self.encender.setMaximumSize(QtCore.QSize(57, 57))\n self.encender.setText(\"\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\"on.png\"), QtGui.QIcon.Normal, QtGui.QIcon.On)\n self.encender.setIcon(icon1)\n self.encender.setIconSize(QtCore.QSize(57, 57))\n self.encender.setObjectName(\"encender\")\n self.gridLayout_4.addWidget(self.encender, 0, 0, 1, 1)\n\n self.apagar = QtWidgets.QPushButton(self.gridLayoutWidget_4)\n self.apagar.setMaximumSize(QtCore.QSize(57, 57))\n self.apagar.setAutoFillBackground(False)\n self.apagar.setText(\"\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\"on.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.apagar.setIcon(icon2)\n self.apagar.setIconSize(QtCore.QSize(57, 57))\n self.apagar.setObjectName(\"apagar\")\n self.gridLayout_4.addWidget(self.apagar, 0, 1, 1, 1)\n\n self.imprimir = QtWidgets.QPushButton(self.gridLayoutWidget_4)\n self.imprimir.setText(\"\")\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\"Imprimir.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.imprimir.setIcon(icon3)\n self.imprimir.setIconSize(QtCore.QSize(57, 57))\n self.imprimir.setObjectName(\"imprimir\")\n self.gridLayout_4.addWidget(self.imprimir, 0, 5, 1, 1)\n\n self.ejecutar = QtWidgets.QPushButton(self.gridLayoutWidget_4)\n self.ejecutar.setText(\"\")\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(\"ejecutar.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.ejecutar.setIcon(icon4)\n self.ejecutar.setIconSize(QtCore.QSize(57, 57))\n self.ejecutar.setObjectName(\"ejecutar\")\n self.gridLayout_4.addWidget(self.ejecutar, 0, 2, 1, 1)\n\n self.label_25 = QtWidgets.QLabel(self.gridLayoutWidget_4)\n self.label_25.setObjectName(\"label_25\")\n self.gridLayout_4.addWidget(self.label_25, 1, 5, 1, 1)\n self.label_23 = QtWidgets.QLabel(self.gridLayoutWidget_4)\n self.label_23.setObjectName(\"label_23\")\n self.gridLayout_4.addWidget(self.label_23, 1, 2, 1, 1)\n\n self.abrir = QtWidgets.QPushButton(self.gridLayoutWidget_4)\n self.abrir.setText(\"\")\n icon5 = QtGui.QIcon()\n icon5.addPixmap(QtGui.QPixmap(\"Abrir.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.abrir.setIcon(icon5)\n self.abrir.setIconSize(QtCore.QSize(57, 57))\n self.abrir.setObjectName(\"abrir\")\n self.gridLayout_4.addWidget(self.abrir, 0, 3, 1, 1)\n self.label_24 = QtWidgets.QLabel(self.gridLayoutWidget_4)\n self.label_24.setObjectName(\"label_24\")\n self.gridLayout_4.addWidget(self.label_24, 1, 3, 1, 1)\n self.label_12 = QtWidgets.QLabel(self.centralwidget)\n self.label_12.setGeometry(QtCore.QRect(890, 100, 91, 29))\n self.label_12.setObjectName(\"label_12\")\n self.label_13 = QtWidgets.QLabel(self.centralwidget)\n self.label_13.setGeometry(QtCore.QRect(890, 120, 350, 250))\n self.label_13.setMaximumSize(QtCore.QSize(350, 250))\n self.label_13.setText(\"\")\n self.label_13.setPixmap(QtGui.QPixmap(\"monitor.png\"))\n self.label_13.setScaledContents(True)\n self.label_13.setObjectName(\"label_13\")\n self.label_14 = QtWidgets.QLabel(self.centralwidget)\n self.label_14.setGeometry(QtCore.QRect(890, 360, 61, 41))\n self.label_14.setObjectName(\"label_14\")\n self.label_15 = QtWidgets.QLabel(self.centralwidget)\n self.label_15.setGeometry(QtCore.QRect(890, 380, 350, 250))\n self.label_15.setMaximumSize(QtCore.QSize(350, 250))\n self.label_15.setText(\"\")\n self.label_15.setPixmap(QtGui.QPixmap(\"impresora.png\"))\n self.label_15.setScaledContents(True)\n self.label_15.setObjectName(\"label_15\")\n self.monitor = QtWidgets.QLabel(self.centralwidget)\n self.monitor.setGeometry(QtCore.QRect(820, 680, 331, 161))\n self.monitor.setText(\"\")\n self.monitor.setObjectName(\"monitor\")\n self.impresora = QtWidgets.QLabel(self.centralwidget)\n self.impresora.setGeometry(QtCore.QRect(180, 700, 241, 81))\n self.impresora.setText(\"\")\n self.impresora.setObjectName(\"impresora\")\n self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 160, 329, 401))\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.label_6 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_6.setObjectName(\"label_6\")\n self.verticalLayout.addWidget(self.label_6)\n self.procesos = QtWidgets.QTableWidget(self.verticalLayoutWidget)\n self.procesos.setObjectName(\"procesos\")\n self.procesos.setColumnCount(6)\n self.procesos.setRowCount(0)\n item = QtWidgets.QTableWidgetItem()\n self.procesos.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n self.procesos.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n self.procesos.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n self.procesos.setHorizontalHeaderItem(3, item)\n item = QtWidgets.QTableWidgetItem()\n self.procesos.setHorizontalHeaderItem(4, item)\n item = QtWidgets.QTableWidgetItem()\n self.procesos.setHorizontalHeaderItem(5, item)\n self.verticalLayout.addWidget(self.procesos)\n self.label_7 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_7.setObjectName(\"label_7\")\n self.verticalLayout.addWidget(self.label_7)\n self.variables = QtWidgets.QTableWidget(self.verticalLayoutWidget)\n self.variables.setObjectName(\"variables\")\n self.variables.setColumnCount(5)\n self.variables.setRowCount(0)\n item = QtWidgets.QTableWidgetItem()\n self.variables.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n self.variables.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n self.variables.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n self.variables.setHorizontalHeaderItem(3, item)\n item = QtWidgets.QTableWidgetItem()\n self.variables.setHorizontalHeaderItem(4, item)\n self.verticalLayout.addWidget(self.variables)\n self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.label_8.setObjectName(\"label_8\")\n self.verticalLayout.addWidget(self.label_8)\n self.etiquetas = QtWidgets.QTableWidget(self.verticalLayoutWidget)\n self.etiquetas.setObjectName(\"etiquetas\")\n self.etiquetas.setColumnCount(4)\n self.etiquetas.setRowCount(0)\n item = QtWidgets.QTableWidgetItem()\n self.etiquetas.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n self.etiquetas.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n self.etiquetas.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n self.etiquetas.setHorizontalHeaderItem(3, item)\n self.verticalLayout.addWidget(self.etiquetas)\n self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)\n self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(380, 420, 485, 211))\n self.verticalLayoutWidget_2.setObjectName(\"verticalLayoutWidget_2\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.label_17 = QtWidgets.QLabel(self.verticalLayoutWidget_2)\n self.label_17.setObjectName(\"label_17\")\n self.verticalLayout_2.addWidget(self.label_17)\n self.editProgr = QtWidgets.QPlainTextEdit(self.verticalLayoutWidget_2)\n self.editProgr.setObjectName(\"editProgr\")\n self.verticalLayout_2.addWidget(self.editProgr)\n self.errores = QtWidgets.QTextEdit(self.verticalLayoutWidget_2)\n self.errores.setObjectName(\"errores\")\n self.verticalLayout_2.addWidget(self.errores)\n self.analizarSinta = QtWidgets.QPushButton(self.verticalLayoutWidget_2)\n self.analizarSinta.setObjectName(\"analizarSinta\")\n self.verticalLayout_2.addWidget(self.analizarSinta)\n CH_MAQUINA.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(CH_MAQUINA)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 680, 26))\n self.menubar.setObjectName(\"menubar\")\n self.menuArchivo = QtWidgets.QMenu(self.menubar)\n self.menuArchivo.setObjectName(\"menuArchivo\")\n self.menuEjecutar = QtWidgets.QMenu(self.menubar)\n self.menuEjecutar.setObjectName(\"menuEjecutar\")\n self.menuInformaci_n = QtWidgets.QMenu(self.menubar)\n self.menuInformaci_n.setObjectName(\"menuInformaci_n\")\n self.menuImprimir = QtWidgets.QMenu(self.menubar)\n self.menuImprimir.setObjectName(\"menuImprimir\")\n CH_MAQUINA.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(CH_MAQUINA)\n self.statusbar.setObjectName(\"statusbar\")\n CH_MAQUINA.setStatusBar(self.statusbar)\n self.actionEncender = QtWidgets.QAction(CH_MAQUINA)\n self.actionEncender.setCheckable(False)\n self.actionEncender.setObjectName(\"actionEncender\")\n self.actionApagar = QtWidgets.QAction(CH_MAQUINA)\n self.actionApagar.setObjectName(\"actionApagar\")\n self.actionCargar_Programa = QtWidgets.QAction(CH_MAQUINA)\n self.actionCargar_Programa.setObjectName(\"actionCargar_Programa\")\n self.actionPrograma = QtWidgets.QAction(CH_MAQUINA)\n self.actionPrograma.setObjectName(\"actionPrograma\")\n self.actionSalir_del_S_O = QtWidgets.QAction(CH_MAQUINA)\n self.actionSalir_del_S_O.setObjectName(\"actionSalir_del_S_O\")\n self.actionPaso_a_paso = QtWidgets.QAction(CH_MAQUINA)\n self.actionPaso_a_paso.setObjectName(\"actionPaso_a_paso\")\n self.menuArchivo.addAction(self.actionEncender)\n self.menuArchivo.addAction(self.actionApagar)\n self.menuArchivo.addAction(self.actionCargar_Programa)\n self.menuArchivo.addAction(self.actionSalir_del_S_O)\n self.menuEjecutar.addAction(self.actionPrograma)\n self.menuEjecutar.addAction(self.actionPaso_a_paso)\n self.menubar.addAction(self.menuArchivo.menuAction())\n self.menubar.addAction(self.menuEjecutar.menuAction())\n self.menubar.addAction(self.menuInformaci_n.menuAction())\n self.menubar.addAction(self.menuImprimir.menuAction())\n\n self.retranslateUi(CH_MAQUINA)\n self.actionSalir_del_S_O.triggered.connect(CH_MAQUINA.close)\n self.actionEncender.triggered.connect(self.encender.click)\n self.actionApagar.triggered.connect(self.apagar.click)\n self.actionCargar_Programa.triggered.connect(self.abrir.click)\n QtCore.QMetaObject.connectSlotsByName(CH_MAQUINA)\n\n def retranslateUi(self, CH_MAQUINA):\n _translate = QtCore.QCoreApplication.translate\n CH_MAQUINA.setWindowTitle(_translate(\"CH_MAQUINA\", \"Sistema Operativo CH - MÁQUINA\"))\n self.label_4.setText(_translate(\"CH_MAQUINA\", \"Valor\"))\n self.label_2.setText(_translate(\"CH_MAQUINA\", \"Posición de memoria\"))\n self.label_5.setText(_translate(\"CH_MAQUINA\", \"Total de memoria\"))\n self.label_3.setText(_translate(\"CH_MAQUINA\", \"Instrucción\"))\n self.label.setText(_translate(\"CH_MAQUINA\", \"Acumulador\"))\n self.label_11.setText(_translate(\"CH_MAQUINA\", \"KERNEL\"))\n item = self.memoria.horizontalHeaderItem(0)\n item.setText(_translate(\"CH_MAQUINA\", \"POS\"))\n item = self.memoria.horizontalHeaderItem(1)\n item.setText(_translate(\"CH_MAQUINA\", \"PROGRAMA\"))\n item = self.memoria.horizontalHeaderItem(2)\n item.setText(_translate(\"CH_MAQUINA\", \"INSTRUCCIÓN\"))\n item = self.memoria.horizontalHeaderItem(3)\n item.setText(_translate(\"CH_MAQUINA\", \"ARGUMENTO\"))\n item = self.memoria.horizontalHeaderItem(4)\n item.setText(_translate(\"CH_MAQUINA\", \"VALOR\"))\n self.label_9.setText(_translate(\"CH_MAQUINA\", \"Memoria\"))\n self.label_10.setText(_translate(\"CH_MAQUINA\", \"MEMORIA\"))\n self.label_16.setText(_translate(\"CH_MAQUINA\", \"MEMORIA DISPONIBLE\"))\n self.label_22.setText(_translate(\"CH_MAQUINA\", \"OFF\"))\n self.label_21.setText(_translate(\"CH_MAQUINA\", \"ON\"))\n self.label_25.setText(_translate(\"CH_MAQUINA\", \"Impr\"))\n self.label_23.setText(_translate(\"CH_MAQUINA\", \"Run\"))\n self.label_24.setText(_translate(\"CH_MAQUINA\", \"Open\"))\n self.label_6.setText(_translate(\"CH_MAQUINA\", \"Procesos\"))\n item = self.procesos.horizontalHeaderItem(0)\n item.setText(_translate(\"CH_MAQUINA\", \"ID\"))\n item = self.procesos.horizontalHeaderItem(1)\n item.setText(_translate(\"CH_MAQUINA\", \"PROGRAMAS\"))\n item = self.procesos.horizontalHeaderItem(2)\n item.setText(_translate(\"CH_MAQUINA\", \"Nº INST.\"))\n item = self.procesos.horizontalHeaderItem(3)\n item.setText(_translate(\"CH_MAQUINA\", \"RB\"))\n item = self.procesos.horizontalHeaderItem(4)\n item.setText(_translate(\"CH_MAQUINA\", \"RCL\"))\n item = self.procesos.horizontalHeaderItem(5)\n item.setText(_translate(\"CH_MAQUINA\", \"RLP\"))\n self.label_7.setText(_translate(\"CH_MAQUINA\", \"Variables\"))\n item = self.variables.horizontalHeaderItem(0)\n item.setText(_translate(\"CH_MAQUINA\", \"POS\"))\n item = self.variables.horizontalHeaderItem(1)\n item.setText(_translate(\"CH_MAQUINA\", \"PROG.\"))\n item = self.variables.horizontalHeaderItem(2)\n item.setText(_translate(\"CH_MAQUINA\", \"TIPO\"))\n item = self.variables.horizontalHeaderItem(3)\n item.setText(_translate(\"CH_MAQUINA\", \"VARIABLES\"))\n item = self.variables.horizontalHeaderItem(4)\n item.setText(_translate(\"CH_MAQUINA\", \"VALOR\"))\n self.label_8.setText(_translate(\"CH_MAQUINA\", \"Etiquetas\"))\n item = self.etiquetas.horizontalHeaderItem(0)\n item.setText(_translate(\"CH_MAQUINA\", \"POS\"))\n item = self.etiquetas.horizontalHeaderItem(1)\n item.setText(_translate(\"CH_MAQUINA\", \"PROG.\"))\n item = self.etiquetas.horizontalHeaderItem(2)\n item.setText(_translate(\"CH_MAQUINA\", \"ETIQUETA\"))\n item = self.etiquetas.horizontalHeaderItem(3)\n item.setText(_translate(\"CH_MAQUINA\", \"ARGUMENTO\"))\n self.label_17.setText(_translate(\"CH_MAQUINA\", \"EDITOR DE PROGRAMAS CH\"))\n self.analizarSinta.setText(_translate(\"CH_MAQUINA\", \"Actualizar archivo\"))\n self.menuArchivo.setTitle(_translate(\"CH_MAQUINA\", \"Archivo\"))\n self.menuEjecutar.setTitle(_translate(\"CH_MAQUINA\", \"Ejecutar\"))\n self.menuInformaci_n.setTitle(_translate(\"CH_MAQUINA\", \"Información\"))\n self.menuImprimir.setTitle(_translate(\"CH_MAQUINA\", \"Imprimir\"))\n self.actionEncender.setText(_translate(\"CH_MAQUINA\", \"Encender\"))\n self.actionApagar.setText(_translate(\"CH_MAQUINA\", \"Apagar\"))\n self.actionCargar_Programa.setText(_translate(\"CH_MAQUINA\", \"Cargar Programa\"))\n self.actionPrograma.setText(_translate(\"CH_MAQUINA\", \"Programa\"))\n self.actionSalir_del_S_O.setText(_translate(\"CH_MAQUINA\", \"Salir del S.O.\"))\n self.actionPaso_a_paso.setText(_translate(\"CH_MAQUINA\", \"Paso a paso\"))\n\n","sub_path":"Interfaz_CH_MAQUINA.py","file_name":"Interfaz_CH_MAQUINA.py","file_ext":"py","file_size_in_byte":22531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"345122840","text":"# vim: expandtab:ts=4:sw=4\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom operator import itemgetter\nfrom time import time\nfrom bb_proc import get_iou, bb_update_vp2, ds_score, bb_update_vp, bb_pred, bb_pred_kcf,bb_pred_kcf_aff\nfrom num2fname import num2fname\nimport cv2\nimport os\n\nfrom deep_sort import nn_matching\nfrom deep_sort.detection import Detection\n\n# %% FP Filter\n# copy from shiyan4.py 去掉cf app\n\n## 运动模型LSTM+数据关联\n## 外观不用CNN,而是用的IOU。remove CNN\n\ndef gather_sequence_info(sequence_dir, detection_file):\n \"\"\"Gather sequence information, such as image filenames, detections,\n groundtruth (if available).\n\n Parameters\n ----------\n sequence_dir : str\n Path to the MOTChallenge sequence directory.\n detection_file : str\n Path to the detection file.\n\n Returns\n -------\n Dict\n A dictionary of the following sequence information:\n\n * sequence_name: Name of the sequence\n * image_filenames: A dictionary that maps frame indices to image\n filenames.\n * detections: A numpy array of detections in MOTChallenge format.\n * groundtruth: A numpy array of ground truth in MOTChallenge format.\n * image_size: Image size (height, width).\n * min_frame_idx: Index of the first frame.\n * max_frame_idx: Index of the last frame.\n\n \"\"\"\n image_dir = os.path.join(sequence_dir, \"img1\")\n image_filenames = {\n int(os.path.splitext(f)[0]): os.path.join(image_dir, f)\n for f in os.listdir(image_dir)}\n groundtruth_file = os.path.join(sequence_dir, \"gt/gt.txt\")\n\n detections = None\n if detection_file is not None:\n detections = np.load(detection_file)\n groundtruth = None\n if os.path.exists(groundtruth_file):\n groundtruth = np.loadtxt(groundtruth_file, delimiter=',')\n\n if len(image_filenames) > 0:\n image = cv2.imread(next(iter(image_filenames.values())),\n cv2.IMREAD_GRAYSCALE)\n image_size = image.shape\n else:\n image_size = None\n\n if len(image_filenames) > 0:\n min_frame_idx = min(image_filenames.keys())\n max_frame_idx = max(image_filenames.keys())\n else:\n min_frame_idx = int(detections[:, 0].min())\n max_frame_idx = int(detections[:, 0].max())\n\n info_filename = os.path.join(sequence_dir, \"seqinfo.ini\")\n if os.path.exists(info_filename):\n with open(info_filename, \"r\") as f:\n line_splits = [l.split('=') for l in f.read().splitlines()[1:]]\n info_dict = dict(\n s for s in line_splits if isinstance(s, list) and len(s) == 2)\n\n update_ms = 1000 / int(info_dict[\"frameRate\"])\n else:\n update_ms = None\n\n feature_dim = detections.shape[1] - 10 if detections is not None else 0\n seq_info = {\n \"sequence_name\": os.path.basename(sequence_dir),\n \"image_filenames\": image_filenames,\n \"detections\": detections,\n \"groundtruth\": groundtruth,\n \"image_size\": image_size,\n \"min_frame_idx\": min_frame_idx,\n \"max_frame_idx\": max_frame_idx,\n \"feature_dim\": feature_dim,\n \"update_ms\": update_ms\n }\n return seq_info\n\ndef create_detections(detection_mat, frame_idx, min_height=0):\n \"\"\"Create detections for given frame index from the raw detection matrix.\n\n Parameters\n ----------\n detection_mat : ndarray\n Matrix of detections. The first 10 columns of the detection matrix are\n in the standard MOTChallenge detection format. In the remaining columns\n store the feature vector associated with each detection.\n frame_idx : int\n The frame index.\n min_height : Optional[int]\n A minimum detection bounding box height. Detections that are smaller\n than this value are disregarded.\n\n Returns\n -------\n List[tracker.Detection]\n Returns detection responses at given frame index.\n\n \"\"\"\n frame_indices = detection_mat[:, 0].astype(np.int)\n mask = frame_indices == frame_idx\n\n detection_list = []\n for row in detection_mat[mask]:\n bbox, confidence, feature = row[2:6], row[6], row[10:]\n #if bbox[3] < min_height:\n # continue\n detection_list.append(Detection(bbox, confidence, feature))\n return detection_list\n# %%\n# %%\nif __name__ == \"__main__\":\n # MOT17\n\n # chosekind = 'FRCNN'\n chosekind = 'SDP'\n # chosekind = 'DPM'\n \n \n \n if chosekind =='FRCNN':\n print('current detection:%s\\n'% chosekind)\n # test = True\n test = False\n if test:\n # FRCNN test One 1\n print('Here we test!\\n') \n fpath = '../../MOT17/test/'\n foldername = ('MOT17-01-FRCNN', 'MOT17-03-FRCNN', 'MOT17-06-FRCNN',\n 'MOT17-07-FRCNN', 'MOT17-08-FRCNN', 'MOT17-12-FRCNN',\n 'MOT17-14-FRCNN')\n resolution = ((1920, 1080), (1920, 1080), (640, 480), (1920, 1080),\n (1920, 1080), (1920, 1080), (1920, 1080))\n length = (450, 1500, 1194, 500, 625, 900, 750)\n else:\n # FRCNN train\n print('Here we train!\\n')\n fpath = '../../MOT17/train/'\n foldername = ('MOT17-02-FRCNN', 'MOT17-04-FRCNN', 'MOT17-05-FRCNN',\n 'MOT17-09-FRCNN', 'MOT17-10-FRCNN', 'MOT17-11-FRCNN',\n 'MOT17-13-FRCNN')\n resolution = ((1920, 1080), (1920, 1080), (640, 480), (1920, 1080),\n (1920, 1080), (1920, 1080), (1920, 1080))\n length = (600, 1050, 837, 525, 654, 900, 750)\n \n threshold_l = 0 # low detection threshold\n threshold_h = 0.9 # high detection threshold\n threshold_s = 0.0377 # score threshold\n\n threshold_s2 = 0.4 # score threshold for id shorter than 7 frames\n n_init = 4 # time threshold\n\n elif chosekind =='SDP':\n print('current detection:%s\\n'% chosekind)\n # test = True\n test = False\n if test:\n # SDP test Two 2\n print('Here we test!\\n')\n fpath = '../../MOT17/test/'\n foldername = ('MOT17-01-SDP', 'MOT17-03-SDP', 'MOT17-06-SDP',\n 'MOT17-07-SDP', 'MOT17-08-SDP', 'MOT17-12-SDP',\n 'MOT17-14-SDP')\n resolution = ((1920, 1080), (1920, 1080), (640, 480), (1920, 1080),\n (1920, 1080), (1920, 1080), (1920, 1080))\n length = (450, 1500, 1194, 500, 625, 900, 750)\n else:\n # SDP train\n print('Here we train!\\n')\n fpath = '../../MOT17/train/'\n \n foldername = ('MOT17-02-SDP', 'MOT17-04-SDP', 'MOT17-05-SDP',\n 'MOT17-09-SDP', 'MOT17-10-SDP', 'MOT17-11-SDP',\n 'MOT17-13-SDP')\n resolution = ((1920, 1080), (1920, 1080), (640, 480), (1920, 1080),\n (1920, 1080), (1920, 1080), (1920, 1080))\n length = (600, 1050, 837, 525, 654, 900, 750)\n \n threshold_l = 0.3 # low detection threshold\n threshold_h = 0.5 # high detection threshold\n threshold_s = 0.0359 # score threshold\n\n threshold_s2 = 0.3 # score threshold for id shorter than 7 frames\n n_init = 5 # time threshold\n\n elif chosekind == 'DPM':\n print('current detection:%s\\n'% chosekind)\n # test = True\n test = False\n if test:\n # DPM test\n print('Here we test!\\n')\n fpath = '../../MOT17/test/'\n foldername = ('MOT17-01-DPM', 'MOT17-03-DPM', 'MOT17-06-DPM',\n 'MOT17-07-DPM', 'MOT17-08-DPM', 'MOT17-12-DPM',\n 'MOT17-14-DPM')\n resolution = ((1920, 1080), (1920, 1080), (640, 480), (1920, 1080),\n (1920, 1080), (1920, 1080), (1920, 1080))\n length = (450, 1500, 1194, 500, 625, 900, 750)\n else: \n # DPM train\n print('Here we train!\\n')\n fpath = '../../MOT17/train/'\n foldername = ('MOT17-02-DPM', 'MOT17-04-DPM', 'MOT17-05-DPM',\n 'MOT17-09-DPM', 'MOT17-10-DPM', 'MOT17-11-DPM',\n 'MOT17-13-DPM')\n resolution = ((1920, 1080), (1920, 1080), (640, 480), (1920, 1080),\n (1920, 1080), (1920, 1080), (1920, 1080))\n length = (600, 1050, 837, 525, 654, 900, 750)\n \n threshold_l = -10 # low detection threshold\n threshold_h = -9 # high detection threshold\n threshold_s = 0.0155 # score threshold\n\n threshold_s2 = 0.36 # score threshold for id shorter than 7 frames\n # t_min = 7 # time threshold ,that is n_init\n n_init = 7\n\n else:\n print('please chose right sequence.\\n')\n\n\n time_cnt = 0\n# %%\n # start tracking\n for folder, res, l in zip(foldername, resolution, length):\n print('Processing sequence: %s...' % folder)\n\n # detection data ================================================\n fname_det = '%s%s/det/det.txt' % (fpath, folder)\n dets = np.loadtxt(fname_det, delimiter=',')\n dets = dets.astype('float32')\n\n detection_file = '%s%s/det/%s.npy' % (fpath, folder, folder)\n sequence_dir = '%s%s' % (fpath, folder)\n seq_info = gather_sequence_info(sequence_dir, detection_file)\n\n min_confidence = 0.3\n nms_max_overlap = 1.0\n min_detection_height = 0\n max_cosine_distance = 0.2\n\n p_thr = 0.3 #0.25\n\n nn_budget = 100\n\n metric = nn_matching.NearestNeighborDistanceMetric(\n \"cosine\", max_cosine_distance, nn_budget)\n\n start = time()\n\n id_active, id_inactive = [], []\n\n # for each frame\n for f_num in range(1, l + 1):\n curframe = f_num\n frame_idx = curframe\n print('current frame is %d\\n' % curframe)\n\n # the detections of current frame =================================\n dets_f = dets[dets[:, 0] == f_num, :]\n dets_f = dets_f[dets_f[:, 6] > threshold_l, :] # filter detection\n if dets_f.shape[0] == 0:\n continue\n\n # Load current frame generate detections.\n detections = create_detections(\n seq_info[\"detections\"], frame_idx, min_detection_height)\n detections = [d for d in detections if d.confidence > threshold_l]\n #detections = [d for d in detections if d.confidence >= min_confidence]\n\n # Run non-maxima suppression.\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n # indices = preprocessing.non_max_suppression(\n # boxes, nms_max_overlap, scores)\n # detections = [detections[i] for i in indices] # detections type: detection.Detection\n\n\n\n # add feature costmatrix ==========================================\n\n match_scores = np.zeros(dets_f.shape[0], dtype='float32')\n matched_flag = np.zeros(dets_f.shape[0], dtype=bool)\n\n # match_cosdistance = np.zeros(dets_f.shape[0], dtype='float32') # zero initiation\n # match_eudistance = np.zeros(dets_f.shape[0], dtype='float32')\n \n match_cosdistance = np.ones(dets_f.shape[0], dtype='float32') # one iinitiation\n # match_eudistance = np.ones(dets_f.shape[0], dtype='float32')\n\n id_updated = []\n\n imgurl = '%s%s/img1/%s' % (fpath, folder, num2fname(f_num))\n image = cv2.imread(imgurl)\n\n for id_ in id_active:\n # if this id is too short to use lstm motion model\n if len(id_['bb']) < 7:\n\n # calculates the bb matching score\n # method1 iouscore max\n\n for det_num, det in enumerate(dets_f):\n if matched_flag[det_num] == True:\n match_scores[det_num] = 0\n else:\n match_scores[det_num] = get_iou(\n id_['bb'][-1], det[2:6])[0][0]\n '''\n\n # method2 metric learning min\n # using nn_matching\n for det_num, det in enumerate(dets_f):\n if matched_flag[det_num] == True:\n match_cosdistance[det_num] = 100 #inf\n match_scores[det_num] = 0\n else:\n updateindex = det_num\n detfeature = detections[updateindex].feature # (128,)\n\n trackfeature = id_['features'][-1] # (128,)\n\n # Here to find the nearliest distance detection\n dis_cos = nn_matching._nn_cosine_distance([trackfeature], [detfeature])\n # dis_eu = nn_matching._nn_euclidean_distance([trackfeature], [detfeature])\n match_cosdistance[det_num]=dis_cos\n '''\n\n\n # method1 max iou\n\n best_match = dets_f[match_scores.argmax()]\n best_match_score = match_scores.max()\n updateindex = match_scores.argmax()\n\n # method2 min distance\n '''\n minindex = match_cosdistance.argmin()\n best_match2 = dets_f[minindex]\n best_match_score2 = match_cosdistance.min()\n '''\n # matches the bb with highest score\n # matching successfully =========================================\n # method1\n\n if best_match_score >= threshold_s2:\n bb_update_vp2(id_, best_match[2:6], res)\n id_['bb'].append(best_match[2:6])\n id_['max_score'] = max(id_['max_score'], best_match[-1])\n id_['features'].append(detections[updateindex].feature)\n\n matched_flag[match_scores.argmax()] = True\n id_updated.append(id_)\n\n # finishes this id\n else:\n # if it's a valid id\n if (id_['max_score'] >= threshold_h and\n len(id_['bb']) >= n_init):\n id_inactive.append(id_)\n '''\n # method2\n if best_match_score2 < max_cosine_distance:\n bb_update_vp2(id_, best_match2[2:6], res)\n id_['bb'].append(best_match2[2:6])\n id_['max_score'] = max(id_['max_score'], best_match2[-1])\n id_['features'].append(detections[minindex].feature)\n\n matched_flag[minindex] = True\n id_updated.append(id_)\n\n # finishes this id\n else:\n # if it's a valid id\n if (id_['max_score'] >= threshold_h and\n len(id_['bb']) >= n_init):\n id_inactive.append(id_)\n '''\n# // length(track)<7\n\n else:\n # LSTM\n # calculates the bb matching score\n for det_num, det in enumerate(dets_f):\n if matched_flag[det_num] == True:\n match_scores[det_num] = 0\n else:\n match_scores[det_num] = ds_score(\n id_, det[2:6], res)[0][0]\n\n best_match = dets_f[match_scores.argmax()]\n best_match_score = match_scores.max()\n updateindex = match_scores.argmax()\n\n\n # matches the bb with highest score\n # matching successfully========================================\n if best_match_score >= threshold_s:\n\n #det = best_match[2:6]\n #peakvalue = bb_pred_kcf_aff(det, image)\n\n #detfeature = detections[updateindex].feature\n\n #trackfeature = id_['features'][-1] # (128,)\n #dis_cos = nn_matching._nn_cosine_distance([trackfeature], [detfeature])\n\n #if peakvalue > p_thr: # response 0.25\n bb_update_vp(id_, best_match[2:6], res)\n id_['bb'].append(best_match[2:6])\n id_['max_score'] = max(id_['max_score'], best_match[-1])\n\n id_['features'].append(detections[updateindex].feature)\n\n matched_flag[match_scores.argmax()] = True\n id_['pred'] = 0\n id_updated.append(id_)\n\n # the id was not updated, predict the next bb, fix frame option\n # elif id_['pred'] < 3: # 6 4 missing detections\n # print(\"KCFpredframe:%s\" % f_num) \n \n # if f_num==749 and folder == 'MOT17-11-SDP':\n #print(\"give up frame %s\" % f_num)\n #continue\n \n # peakvalue = bb_pred_kcf(id_, image)\n\n # not updating max_score here\n # bb_pred(id_, res) #original\n #id_updated.append(id_)\n\n # finishes this id\n else:\n # if it has pred, clear all pred. not clearing v, p list\n for i in range(id_['pred']):\n id_['bb'].pop()\n id_['pred'] = 0\n\n # if it's a valid id\n if (id_['max_score'] >= threshold_h and\n len(id_['bb']) >= n_init):\n id_inactive.append(id_)\n\n # creates new ids ==============================add detection feature # reid feature\n\n\n\n\n id_new = [{'bb': [det[2:6]],\n 'v_list': np.zeros((6, 2), dtype='float32'),\n 'max_score': det[6],\n 'f_start': f_num,\n 'pred': 0,\n 'features':[detections[det_num].feature]}\n for det_num, det in enumerate(dets_f)\n if matched_flag[det_num] == False]\n\n id_active = id_updated + id_new\n\n # =======================================================================================----------------\n # finishes the remained ids\n for id_ in id_active:\n # if it has pred, clear all pred. not clearing v, p list\n for i in range(id_['pred']):\n id_['bb'].pop()\n id_['pred'] = 0\n\n # if it's a valid id\n if id_['max_score'] >= threshold_h and len(id_['bb']) >= n_init:\n id_inactive.append(id_)\n\n end = time()\n time_cnt += end - start\n# %%\n # now id_inactive is the final tracking result\n result_bb = []\n for id_num, id_ in enumerate(id_inactive):\n for bb_num, bb in enumerate(id_['bb']):\n result_bb += [[id_['f_start'] + bb_num, id_num + 1, bb[0], bb[1],\n bb[2], bb[3], -1, -1, -1, -1]]\n result_bb.sort(key=itemgetter(1, 0))\n with open('./results/%s.txt' % folder, 'w') as rst_f:\n for bb in result_bb:\n rst_f.write(','.join([str(value) for value in bb]) + '\\n')\n\n print('Total tracking time consumption:', time_cnt, 's.')\n","sub_path":"py/shiyan7.py","file_name":"shiyan7.py","file_ext":"py","file_size_in_byte":20088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"406908479","text":"import time\r\nfrom openerp.osv import fields, osv\r\nfrom openerp.tools.translate import _\r\nimport openerp.addons.decimal_precision as dp\r\nimport logging\r\n_logger = logging.getLogger(__name__)\r\n\r\nclass dincelpurchase_pay_invoice(osv.osv_memory):\r\n\t_name = \"dincelpurchase.pay.invoice\"\r\n\t#_description = \"Sales Make MRP\"\r\n\tdef _amount_total(self, cr, uid, ids, field_name, arg, context=None):\r\n\t\tres = {}\r\n\t\t#_total=0.0\r\n\t\t#_logger.error(\"updatelink_order_dcs._amount_untaxed_amount_untaxed0000[\"+str(_total)+\"]\")\t\r\n\t\tfor record in self.browse(cr, uid, ids):\r\n\t\t\t#if record.amount:\r\n\t\t\t#\t_total=_total+record.amount\r\n\t\t\t#if record.amount_fee:\r\n\t\t\t#_total=record.amount+record.amount_fee\t\r\n\t\t\tres[record.id] = record.amount+record.amount_fee\t\r\n\t\treturn res\r\n\t\t\r\n\tdef onchange_fee_amt(self, cr, uid, ids, _amt, _fee, context=None):\r\n\t\tcontext = context or {}\r\n\t\treturn {'value': {'amount_total': (_amt+_fee)}}\t\r\n\t\t\r\n\tdef onchange_pay_lines(self, cr, uid, ids, payline_ids, paymethod_id, _amt, _fee, context=None):\r\n\t\tcontext = context or {}\r\n\t\t\r\n\t\tamt = 0.0\r\n\t\tamt_fee = 0.0 \r\n\t\t\r\n\t\tif payline_ids:\r\n\t\t\t\r\n\t\t\tline_ids = self.resolve_2many_commands(cr, uid, 'pay_lines', payline_ids, ['amount','reconcile'], context)\r\n\t\t\t\r\n\t\t\tfor line in line_ids:\r\n\t\t\t\tif line:\r\n\t\t\t\t\t#_logger.error(\"updatelink_order_dcs.onchange_pay_lineslinelineleine[\"+str(line)+\"]\")\t\r\n\t\t\t\t\tif line['amount']:\r\n\t\t\t\t\t\tamt += line['amount']\r\n\t\t\ttry:\r\n\t\t\t\tif paymethod_id:\r\n\t\t\t\t\tobj\t\t= self.pool.get('dincelaccount.paymethod').browse(cr,uid,paymethod_id,context=context)\r\n\t\t\t\t\tif obj.fee_purchase:\r\n\t\t\t\t\t\tamt_fee=amt*obj.fee_purchase*0.01\r\n\t\t\t\t\t\t#amt += amt_fee\r\n\t\t\texcept ValueError:\r\n\t\t\t\tpass\r\n\t\treturn {'value': {'amount': amt,'amount_total': (amt+amt_fee),'amount_fee':amt_fee}}\r\n\t\t\r\n\t_columns = {\r\n\t\t'date': fields.date('Payment Date'),\r\n\t\t'pay_lines':fields.one2many('dincelpurchase.pay.invoice.line', 'pay_invoice_id', 'Invoies'),\r\n\t\t'qty':fields.float(\"Qty test\"),\r\n\t\t'journal_id':fields.many2one('account.journal', 'Journal'),\r\n 'account_id':fields.many2one('account.account', 'Account'),\r\n\t\t'partner_id':fields.many2one('res.partner', 'Partner'),\r\n 'amount': fields.float('Total'),\r\n 'reference': fields.char('Ref #'),\r\n\t\t'comment': fields.char('Payment Description', size=18), #changed from 12 to 20 #as per Rita 20/3/2017 #changed back to 12 as per Felix (for aba file generation) [Dincel Const. Sys]\r\n\t\t'company_id': fields.many2one('res.company', 'Company'),\r\n\t\t'x_paymethod_id':fields.many2one('dincelaccount.paymethod', 'Pay Method'),\r\n\t\t'amount_fee': fields.float('Card Fee'),\r\n\t\t'amount_total': fields.function(_amount_total, digits_compute=dp.get_precision('Account'), string='Total'),\r\n\t}\r\n\t\r\n\tdef button_reset_total(self, cr, uid, ids, context=None):\r\n\t\treturn True\r\n\t\t\r\n\tdef make_payment_dcs(self, cr, uid, ids, context=None):\r\n\t\t_vobj = self.pool.get('account.voucher')\r\n\t\t_vobjline = self.pool.get('dincelaccount.voucher.payline')\r\n\t\t_obj = self.pool.get('dincelpurchase.pay.invoice').browse(cr, uid, ids[0], context=context)\r\n\t\ttot_amt = 0.0\r\n\t\tif _obj.pay_lines: \r\n\t\t\tfor line in _obj.pay_lines:\r\n\t\t\t\tif line.amount > 0.0:\r\n\t\t\t\t\ttot_amt += line.amount\r\n\t\t\r\n\t\t_objperiod \t= self.pool.get('account.period') \r\n\t\tperiod_id\t= _objperiod.find(cr, uid, _obj.date, context=context)[0]\r\n\t\tvals = {\r\n\t\t\t'journal_id':_obj.journal_id.id,\r\n\t\t\t'amount':tot_amt,\r\n\t\t\t'x_amount_xtra':0,\r\n\t\t\t'x_amount_base':tot_amt,\r\n\t\t\t'account_id':_obj.account_id.id,\r\n\t\t\t'reference':_obj.reference,\r\n\t\t\t'type':'payment',\r\n\t\t\t'state':'draft',\r\n\t\t\t#'partner_id':_obj.partner_id.id,\r\n\t\t\t'period_id':period_id,\r\n\t\t\t'date':_obj.date,\r\n\t\t\t'comment':_obj.comment,\r\n\t\t\t}\r\n\t\tif \t_obj.partner_id:\r\n\t\t\tvals['partner_id']=_obj.partner_id.id \r\n\t\tif \t_obj.x_paymethod_id:\r\n\t\t\tvals['x_paymethod_id']=_obj.x_paymethod_id.id \r\n\t\tif \t_obj.amount_fee:\r\n\t\t\tvals['x_amount_xtra']=_obj.amount_fee\r\n\t\t\t\r\n\t\t#_logger.error(\"make_payment_dcsmake_payment_dcs[\"+str(vals)+\"]\")\t\r\n\t\tvoucher_id =_vobj.create(cr, uid, vals, context=context)\r\n\t\t\t\r\n\t\tif tot_amt > 0.0:\r\n\t\t\tfor line in _obj.pay_lines:\r\n\t\t\t\tif line.amount > 0.0:\r\n\t\t\t\t\tvals = {\r\n\t\t\t\t\t\t'voucher_id':voucher_id,\r\n\t\t\t\t\t\t'amount':line.amount,\r\n\t\t\t\t\t\t'invoice_id':line.invoice_id.id,\r\n\t\t\t\t\t\t'partner_id':line.invoice_id.partner_id.id,\r\n\t\t\t\t\t\t'supplier_id':line.invoice_id.partner_id.id,\r\n\t\t\t\t\t\t'type':'pay_invoice',\r\n\t\t\t\t\t\t'ref_aba':line.invoice_id.number,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t_vobjline.create(cr, uid, vals, context=context)\r\n\t\t\t\t\t\r\n\t\t\t\tif line.amount == line.amount_balance:\r\n\t\t\t\t\tself.pool.get('account.invoice').write(cr, uid, [line.invoice_id.id], {'state': 'paid'})\r\n\t\t\t_vobj.supplier_payment_validate_dcs(cr, uid, [voucher_id], context) #auto validate the payment...\r\n\t\telse:#refund invoice ....\r\n\t\t\t#eg GIO /etc\r\n\t\t\tfor line in _obj.pay_lines:\r\n\t\t\t\tif line.amount:\r\n\t\t\t\t\tvals = {\r\n\t\t\t\t\t\t'voucher_id':voucher_id,\r\n\t\t\t\t\t\t'amount':line.amount,\r\n\t\t\t\t\t\t'invoice_id':line.invoice_id.id,\r\n\t\t\t\t\t\t'partner_id':line.invoice_id.partner_id.id,\r\n\t\t\t\t\t\t'supplier_id':line.invoice_id.partner_id.id,\r\n\t\t\t\t\t\t'type':'pay_invoice',\r\n\t\t\t\t\t\t'ref_aba':line.invoice_id.number,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t_vobjline.create(cr, uid, vals, context=context)\r\n\t\t\t\t\t\r\n\t\t\t\tif line.amount == line.amount_balance:\r\n\t\t\t\t\tself.pool.get('account.invoice').write(cr, uid, [line.invoice_id.id], {'state': 'paid'})\r\n\t\t\t_vobj.supplier_payment_validate_dcs(cr, uid, [voucher_id], context) #auto validate the payment...\t\t\r\n\t\treturn True\r\n\t\t\r\n\t\r\n\tdef onchange_journal_dcs(self, cr, uid, ids, journal_id, context=None):\r\n\t\tif context is None:\r\n\t\t\tcontext = {}\r\n\t\tvals={}\t\r\n\t\tobj=self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)\r\n\t\tif obj:\r\n\t\t\taccount_id \t= obj.default_debit_account_id.id\r\n\t\t\tvals\t\t= {'account_id':account_id,'x_paymethod_id':None} #to clear the list for none setup\r\n\t\t\tif obj.x_paymethod_id:\r\n\t\t\t\tvals['x_paymethod_id']=obj.x_paymethod_id.id\r\n\t\t\t\t\r\n\t\treturn {'value':vals}\t\r\n\t\t\r\n\tdef onchange_account_id(self, cr, uid, ids, account_id, context=None):\r\n\t\treturn True\r\n\t\t\r\n\tdef on_change_qty(self, cr, uid, ids, product_qty, pay_lines, context=None):\r\n\t\t\r\n\t\tnew_pay_lines = []\r\n\t\t_partner_id=None\r\n\t\t_prev_partner=None\r\n\t\tif context and context.get('active_ids'):\r\n\t\t\t\r\n\t\t\t_ids=context.get('active_ids')\r\n\t\t\t\r\n\t\t\tfor o in self.pool.get('account.invoice').browse(cr, uid, _ids, context=context):\r\n\t\t\t\t\r\n\t\t\t\tif o.state==\"open\" and o.type==\"in_invoice\":\t#in_invoice=supplier invoice only\r\n\t\t\t\t\t#if not _partner_id:\r\n\t\t\t\t\t_partner_id= o.partner_id.id or False\r\n\t\t\t\t\tif not _prev_partner:\r\n\t\t\t\t\t\t_prev_partner=_partner_id\r\n\t\t\t\t\tif _partner_id:#== o.partner_id.id: #take only one partner\r\n\t\t\t\t\t\tamount_bal=o.amount_total\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tsql=\"select sum(amount) as tot from dincelaccount_voucher_payline where invoice_id=\" + str(o.id) #type='pay_invoice' and [this condition removed...]\r\n\t\t\t\t\t\t\tcr.execute(sql)\r\n\t\t\t\t\t\t\trows = cr.fetchall()\r\n\t\t\t\t\t\t\tif len(rows) > 0 and rows[0][0]:\r\n\t\t\t\t\t\t\t\tamount_bal = amount_bal-float(rows[0][0])\r\n\t\t\t\t\t\texcept ValueError:\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\tvals = {\r\n\t\t\t\t\t\t\t'invoice_id':o.id,\r\n\t\t\t\t\t\t\t'supplier_invoice_number':o.supplier_invoice_number,\r\n\t\t\t\t\t\t\t'amount_balance':amount_bal,\r\n\t\t\t\t\t\t\t'date_due':o.date_due or False,\r\n\t\t\t\t\t\t\t'date':o.date_due or False,\r\n\t\t\t\t\t\t\t'amount_original':o.amount_total,\r\n\t\t\t\t\t\t\t'amount':0,\r\n\t\t\t\t\t\t\t'reconcile':False,\r\n\t\t\t\t\t\t\t'partner_id': o.partner_id.id or False,\r\n\t\t\t\t\t\t\t'invoice_number':o.internal_number,\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tnew_pay_lines.append(vals)\r\n\t\t\t\t\t\t\r\n\t\tvals={'pay_lines': new_pay_lines}\t\r\n\t\tif _prev_partner==_partner_id:\r\n\t\t\t#_partner_id=None\r\n\t\t\tvals['partner_id']=_partner_id\r\n\t\telse:\t\r\n\t\t\tvals['partner_id']=None\r\n\t\t\t\r\n\t\treturn {'value':vals}\r\n\t\t\r\n \r\n\tdef _get_init_qty(self, cr, uid, context=None):\r\n\t\treturn 1\r\n\t_defaults = {\r\n\t\t'date': fields.date.context_today, #for getting local date...see...quotation (dincelcrm)\r\n\t\t'qty': _get_init_qty,\r\n\t\t#'date': lambda *a: time.strftime('%Y-%m-%d'), not getting local date...but gmt\r\n 'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.voucher',context=c),\r\n\t\t}\r\n\t\t\r\nclass dincelpurchase_pay_invoice_line(osv.osv_memory):\r\n\t_name = \"dincelpurchase.pay.invoice.line\"\r\n\tdef _amount_subtotal(self, cr, uid, ids, field_name, arg, context=None):\r\n\t\tres = {}\r\n\t\t#_total=0.0\r\n\t\t#_logger.error(\"updatelink_order_dcs._amount_untaxed_amount_untaxed0000[\"+str(_total)+\"]\")\t\r\n\t\tfor record in self.browse(cr, uid, ids):\r\n\t\t\t#if record.amount:\r\n\t\t\t#\t_total=_total+record.amount\r\n\t\t\t#if record.amount_fee:\r\n\t\t\t#_total=record.amount+record.amount_fee\t\r\n\t\t\tres[record.id] = record.amount+record.amount_fee\t\r\n\t\treturn res\r\n\t\t\r\n\t_columns = {\r\n\t\t'pay_invoice_id': fields.many2one('dincelpurchase.pay.invoice', 'Pay Reference'),\r\n\t\t'invoice_id': fields.many2one('account.invoice', 'Invoice'),\r\n\t\t'reconcile': fields.boolean('Full Reconcile'),\r\n\t\t'supplier_invoice_number':fields.char(\"Supplier Invoice No.\"),\r\n\t\t'amount': fields.float('Amount'),\r\n\t\t'amount_fee': fields.float('Card Fee'),\r\n\t\t'amount_balance': fields.float('Amount Balance'),\r\n\t\t'amount_subtotal': fields.function(_amount_subtotal, digits_compute=dp.get_precision('Account'), string='Subtotal'),\r\n\t\t'date':fields.date('Invoice Date'),\r\n\t\t'date_due':fields.date('Due Date'),\r\n\t\t'name':fields.char('Memo'),\r\n\t\t'paymethod_id':fields.many2one('dincelaccount.paymethod', 'Pay Method'),\r\n\t\t'amount_original': fields.related('invoice_id', 'amount_total', type='float', string='Invoice Value',store=False),\r\n\t\t'invoice_number': fields.related('invoice_id', 'internal_number', type='text', string='Number',store=False),\r\n\t\t'partner_id':fields.related('invoice_id', 'partner_id', type='many2one', relation='res.partner', string='Partner'),\r\n\t\t#'paymethod_id':fields.many2one('dincelaccount.paymethod', 'Pay Method'),\r\n\t}\r\n\t \r\n\t\t\r\n\tdef onchange_reconcile(self, cr, uid, ids, reconcile, amount, amount_unreconciled, context=None):\r\n\t\tamount=0.0\r\n\t\t#vals = {'amount': 0.0}\r\n\t\tif reconcile:\r\n\t\t\tamount=amount_unreconciled\r\n\t\t\r\n\t\tvals = { 'amount': amount}\r\n\t\treturn {'value': vals}\r\n\t\t\r\n\tdef onchange_amount(self, cr, uid, ids, amount, amount_unreconciled, context=None):\r\n\t\tvals = {}\r\n\t\tif amount:\r\n\t\t\tvals['reconcile'] = (amount == amount_unreconciled)\r\n\t\t\tvals['amount'] = amount#vals = { 'amount': amount}\r\n\t\t\t\r\n\t\treturn {'value': vals}\t\t\t \r\n\t ","sub_path":"dincelaccount/wizard/purchase_invoice_pay.py","file_name":"purchase_invoice_pay.py","file_ext":"py","file_size_in_byte":10251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"573311339","text":"from PyQt5 import QtGui\nfrom PyQt5.QtCore import Qt\nimport pyqtgraph as pg\nimport serial\nimport time\nimport os\nfrom hotfire_packet import ECParse\nimport struct\n\nparser = ECParse()\n\n# Gloabals\nmtr = ['mtr0', 'mtr1', 'mtr2', 'mtr3']\nmtr_enable = []\nmtr_disable = []\nmtr_setpoint = []\nmtr_position = []\nmtr_pwm = []\nmtr_send = []\nmtr_setpointfb = []\n\n\nrun_name = input(\"Enter run name: \")\n\nserial_log = open('data/'+run_name+\"_serial_log.csv\", \"w+\")\ninfo_log = open('data/'+run_name+\"_python_log.csv\", \"w+\")\ncommand_log = open('data/'+run_name+\"_command_log.csv\", \"w+\")\ndata_log = open('data/'+run_name+\"_datalog.csv\", \"w+\")\n\ncommand_log.write(\"Time, Command/info\\n\")\ndata_log.write(parser.csv_header)\n\n## Always start by initializing Qt (only once per application)\napp = QtGui.QApplication([])\n## Define a top-level widget to hold everything\nw = QtGui.QWidget()\nw.setWindowTitle('MASA Hotfire GUI - logging to '+run_name)\n## Create a grid layout to manage the widgets size and position\nlayout = QtGui.QGridLayout()\nw.setLayout(layout)\n# Zero Indexes for the gui layout (row, column)\nzr = 2\nzc = 2\n\n# Populate the alias dictionary\nalias = {}\nalias_file = open(\"devices.alias\")\nfor line in alias_file:\n\ts = line.split('\\t')\n\talias[s[0]] = s[1].rstrip('\\r\\n')\n\ninfo_log.write(\"Alias FIle\")\nfor line in alias_file:\n\tinfo_log.write(line)\ninfo_log.write(str(alias))\ninfo_log.write(\"\\n\")\n\ntry:\n\tif(\"STATE_N\" in alias.keys()):\n\t\tstate_dict = {}\n\t\tfor n in range(0, int(alias[\"STATE_N\"])):\n\t\t\tstate_dict[n] = alias[\"STATE\"+str(n)]\n\telse:\n\t\traise Exception(\"STATE_N definition not found in devices.alias file\")\nexcept Exception:\n\tprint(\"INVALID STATE ALIAS DEFINITIONS\")\n\n# Try to open the serial port\n\nser = serial.Serial(port=None, baudrate=4000000, timeout=0.5)\nser.port = alias[\"COM_PORT\"]\n\ntry:\n\tser.open()\n\tif(ser.is_open):\n\t\tser.readline()\nexcept:\n\tprint(\"Could not open Serial Port\")\n\n# Parse a line and upate GUI fields\nwrite_csv_header = True\ndef parse_serial():\n\n\ttry:\n\t\tif(ser.is_open):\n\t\t\t\t# Read a packet\n\t\t\tpacket = ser.readline()\t\n\t\t\t# Unstuff the packet\n\t\t\tunstuffed = b''\n\t\t\tindex = int(packet[0])\n\t\t\tfor n in range(1, len(packet)):\n\t\t\t\ttemp = packet[n:n+1]\n\t\t\t\tif(n == index):\n\t\t\t\t\tindex = int(packet[n])+n\n\t\t\t\t\ttemp = b'\\n'\n\t\t\t\tunstuffed = unstuffed + temp\n\t\t\tpacket = unstuffed\n\t\t\t#line = str(line, 'ascii')\n\t\t\t#try:\n\t\t\t\t#split_line = line.split(',')\n\t\t\ttry:\n\t\t\t\tparser.parse_packet(packet)\n\t\t\texcept:\n\t\t\t\tprint(\"Parser error\")\n\t\t\t\tinfo_log.write(time.ctime()+\" parser error\\n\")\n\t\t\tdata_log.write(parser.log_string+'\\n')\n\t\t\tserial_log.write(\"%.3f,\" % time.clock())\n\t\t\tserial_log.write(str(packet)+'\\n')\n\t\t\t# except:\n\t\t\t# \tprint(\"Error\")\n\t\t\t# \tpass\n\n\t\t\tstate_label.setText(\"STATE = \"+state_dict[parser.STATE])\n\n\t\t\tlog_to_auto_label.setText(\"Logging to auto: \"+str(parser.LOG_TO_AUTO))\n\t\t\t# if(AUTOSTRING == \"0\"):\n\t\t\t# \tpass \t# No new string sent\n\t\t\t# else:\n\t\t\t# \ttemp = \"\"\n\t\t\t# \tsplit_auto = AUTOSTRING.split('|')\n\t\t\t# \tfor chunk in split_auto:\n\t\t\t# \t\ttemp = temp + chunk + \"\\n\"\n\t\t\t# \tautofeedback.setPlainText(temp)\n\t\t\t# \tprint(\"AUTOSTRING RECIEVED: \"+AUTOSTRING)\n\n\t\t\tmask = 1\n\t\t\trunning_autos_string = \"Running Autos: \"\n\t\t\t# Update auto state feedback\n\t\t\tfor n in range(0, 16):\n\t\t\t\tstate = 0\n\t\t\t\tif(mask & parser.auto_states):\n\t\t\t\t\trunning_autos_string += (str(n)+\", \")\n\t\t\t\tmask = mask << 1\n\n\t\t\t\trunning_autos_label.setText(running_autos_string)\n\t\t\t# print(\"Packet parsed\")\n\t\t\t# print(\"battery: \"+str(ebatt)+\" \\t and %.2f\" % time.clock())\n\n\t\t\tmask = 1\n\t\t\t# Update valve state feedback\n\t\t\tfor n in range(0, 16):\n\t\t\t\tstate = 0\n\t\t\t\tif(mask & parser.valve_states):\n\t\t\t\t\tstate = 1\n\t\t\t\tvalve_buttons[n][2].setText(str(state))\n\t\t\t\tvalve_buttons[n][3].setText(str(parser.ivlv[n]))\n\t\t\t\tvalve_buttons[n][4].setText(str(parser.evlv[n]))\n\t\t\t\tmask = mask << 1\n\n\t\t\t\tpressure_labels[n][1].setText(str(parser.pressure[n])+\"psi\")\n\t\t\t# Update loop rates\n\t\t\tsamplerate_setpointfb.setText(str(parser.samplerate)+\"hz\")\n\t\t\ttelemrate_setpointfb.setText(str(parser.telemetry_rate)+\"hz\")\n\t\t\tfor mtrx in range(0, 4):\n\t\t\t\ttry:\n\t\t\t\t\tmtr_setpointfb[mtrx].setText(str(parser.motor_setpoint[mtrx]))\n\t\t\t\t\tmtr_position[mtrx].setText(str(parser.motor_position[mtrx]))\n\t\t\t\t\tmtr_pwm[mtrx].setText(\"PWM: \"+str(parser.motor_pwm[mtrx]))\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\t\t\t#main_cycle_rate.setText(str(round(1000000/main_cycle_time, 3)))\n\t\t\tmotor_cycle_rate.setText(str(round(1000000/parser.motor_cycle_time, 3)))\n\t\t\tadc_cycle_rate.setText(str(round(1000000/parser.adc_cycle_time, 3)))\n\t\t\ttelemetry_cycle_rate.setText(str(round(1000000/parser.telemetry_cycle_time, 3)))\n\n\t\t\t# Board health\n\t\t\tebatt_value.setText(str(parser.ebatt))\n\t\t\tibus_value.setText(str(parser.ibus))\n\n\t\t\t# motor gain feedback\n\t\t\tkpfb.setText(str(parser.motor_control_gain[0]))\n\t\t\tkifb.setText(str(parser.motor_control_gain[1]))\n\t\t\tkdfb.setText(str(parser.motor_control_gain[2]))\n\n\t\t\tcount1_label.setText(\"count1: \"+str(parser.count1))\n\t\t\tcount2_label.setText(\"count2: \"+str(parser.count2))\n\t\t\tcount3_label.setText(\"count3: \"+str(parser.count3))\n\n\t\t\tstate_label.setText(\"STATE = \"+state_dict[parser.STATE])\n\n\t\t\tthrust_load_label.setText(\"Thrust = \"+str(parser.thrust_load))\n\t\t\tfor n in range(0, 4):\n\t\t\t\tload_label[n].setText(str(n)+\": \"+str(parser.load[n]))\n\t\t\tfor n in range(0, 4):\n\t\t\t\ttc_label[n].setText(\"TC-\"+str(n)+\": \"+str(parser.thermocouple[n]))\n\texcept Exception:\n\t\tprint(Exception)\n\n\ndef command(device, command):\n\tcommand_string = \"command \"+str(device)+\" \"+str(command)\n\tsend(command_string)\n\ndef set(variable, value):\n\tcommand_string = \"set \"+str(variable)+\" \"+str(value)\n\tsend(command_string)\n\ndef motor_enable(motor_num, enable):\n\tcommand_string = \"\"\n\tif(enable):\n\t\tcommand_string += \"enable \"\n\telse:\n\t\tcommand_string += \"disable \"\n\tcommand_string += \"mtr\"+str(motor_num)+\" x\"\n\tsend(command_string)\n\ndef send(command_string):\n\tcommand_string = command_string + \" \\r\"\n\tprint(\"SENDING: \"+command_string.rstrip('\\n'))\n\tcommand_log.write(\"%.3f,\\tSENDING: \" % time.clock()+command_string)\n\tif(ser.is_open):\n\t\tser.write(command_string.encode('ascii'))\n\nvalve_buttons = []\npressure_labels = []\nfor n in range(0, 16):\n\n\t# Valve wdgets init\n\ttemp = []\n\tvlv_id = 'vlv'+str(n)\n\tif vlv_id in alias.keys():\n\t\tvlv_id = alias[vlv_id]\n\ttemp.append(QtGui.QPushButton(str(vlv_id)+' = OFF'))\n\ttemp.append(QtGui.QPushButton(str(vlv_id)+' = ON'))\n\ttemp.append(QtGui.QLabel())\n\ttemp.append(QtGui.QLabel())\n\ttemp.append(QtGui.QLabel())\n\tvalve_buttons.append(temp)\n\n\t# Pressure reading widgets init\n\tptemp = []\n\tptemp.append(QtGui.QLabel())\n\tptemp.append(QtGui.QLabel())\n\tpressure_labels.append(ptemp)\n\tpress_id = \"pressure\"+str(n)\n\tif press_id in alias.keys():\n\t\tpress_id = alias[press_id]\n\tpressure_labels[n][0].setText(press_id+\":\")\n\tpressure_labels[n][1].setText(str(0)+\"psi\")\n\n\n## For some reason this doesnt work with a for loop, sorry\nvalve_buttons[0][0].clicked.connect(lambda: command(\"vlv0\", 0))\nvalve_buttons[1][0].clicked.connect(lambda: command(\"vlv1\", 0))\nvalve_buttons[2][0].clicked.connect(lambda: command(\"vlv2\", 0))\nvalve_buttons[3][0].clicked.connect(lambda: command(\"vlv3\", 0))\nvalve_buttons[4][0].clicked.connect(lambda: command(\"vlv4\", 0))\nvalve_buttons[5][0].clicked.connect(lambda: command(\"vlv5\", 0))\nvalve_buttons[6][0].clicked.connect(lambda: command(\"vlv6\", 0))\nvalve_buttons[7][0].clicked.connect(lambda: command(\"vlv7\", 0))\nvalve_buttons[8][0].clicked.connect(lambda: command(\"vlv8\", 0))\nvalve_buttons[9][0].clicked.connect(lambda: command(\"vlv9\", 0))\nvalve_buttons[10][0].clicked.connect(lambda: command(\"vlv10\", 0))\nvalve_buttons[11][0].clicked.connect(lambda: command(\"vlv11\", 0))\nvalve_buttons[12][0].clicked.connect(lambda: command(\"vlv12\", 0))\nvalve_buttons[13][0].clicked.connect(lambda: command(\"vlv13\", 0))\nvalve_buttons[14][0].clicked.connect(lambda: command(\"vlv14\", 0))\nvalve_buttons[15][0].clicked.connect(lambda: command(\"vlv15\", 0))\nvalve_buttons[0][1].clicked.connect(lambda: command(\"vlv0\", 1))\nvalve_buttons[1][1].clicked.connect(lambda: command(\"vlv1\", 1))\nvalve_buttons[2][1].clicked.connect(lambda: command(\"vlv2\", 1))\nvalve_buttons[3][1].clicked.connect(lambda: command(\"vlv3\", 1))\nvalve_buttons[4][1].clicked.connect(lambda: command(\"vlv4\", 1))\nvalve_buttons[5][1].clicked.connect(lambda: command(\"vlv5\", 1))\nvalve_buttons[6][1].clicked.connect(lambda: command(\"vlv6\", 1))\nvalve_buttons[7][1].clicked.connect(lambda: command(\"vlv7\", 1))\nvalve_buttons[8][1].clicked.connect(lambda: command(\"vlv8\", 1))\nvalve_buttons[9][1].clicked.connect(lambda: command(\"vlv9\", 1))\nvalve_buttons[10][1].clicked.connect(lambda: command(\"vlv10\", 1))\nvalve_buttons[11][1].clicked.connect(lambda: command(\"vlv11\", 1))\nvalve_buttons[12][1].clicked.connect(lambda: command(\"vlv12\", 1))\nvalve_buttons[13][1].clicked.connect(lambda: command(\"vlv13\", 1))\nvalve_buttons[14][1].clicked.connect(lambda: command(\"vlv14\", 1))\n#valve_buttons[15][1].clicked.connect(lambda: command(\"vlv15\", 1)) # This is the igniter channel\n\n# motor control\nfor mtrx in range(0, 4):\n\tmtr_enable.append(QtGui.QPushButton(mtr[mtrx]+\" ENABLE\"))\n\tmtr_disable.append(QtGui.QPushButton(mtr[mtrx]+\" DISABLE\"))\n\tmtr_setpoint.append(QtGui.QLineEdit())\n\tmtr_position.append(QtGui.QLabel(\"POSITION FB\"))\n\tmtr_pwm.append(QtGui.QLabel(\"pwm FB\"))\n\tmtr_send.append(QtGui.QPushButton(\"Command Setpoint\"))\n\tmtr_setpointfb.append(QtGui.QLabel(\"SETPOINT FB\"))\n\n\tif mtr[mtrx] in alias.keys():\n\t\tmtr_enable[mtrx].setText(alias[mtr[mtrx]]+\" ENABLE\")\n\t\tmtr_disable[mtrx].setText(alias[mtr[mtrx]]+\" DISABLE\")\n\n\tlayout.addWidget(mtr_disable[mtrx], zr+1+(2*mtrx), zc+5)\n\tlayout.addWidget(mtr_enable[mtrx], zr+1+(2*mtrx), zc+6)\n\tlayout.addWidget(mtr_send[mtrx],zr+2+(2*mtrx), zc+5)\n\tlayout.addWidget(mtr_pwm[mtrx], zr+1+(2*mtrx), zc+8)\t\n\tlayout.addWidget(mtr_setpoint[mtrx], zr+2+(2*mtrx), zc+6)\n\tlayout.addWidget(mtr_setpointfb[mtrx], zr+2+(2*mtrx), zc+7)\n\tlayout.addWidget(mtr_position[mtrx], zr+2+(2*mtrx), zc+8)\n\nmtr_send[0].clicked.connect(lambda: command('mtr0', mtr_setpoint[0].text()))\nmtr_send[1].clicked.connect(lambda: command('mtr1', mtr_setpoint[1].text()))\nmtr_send[2].clicked.connect(lambda: command('mtr2', mtr_setpoint[2].text()))\nmtr_send[3].clicked.connect(lambda: command('mtr3', mtr_setpoint[3].text()))\nmtr_enable[0].clicked.connect(lambda: motor_enable(0, 1))\nmtr_enable[1].clicked.connect(lambda: motor_enable(1, 1))\nmtr_enable[2].clicked.connect(lambda: motor_enable(2, 1))\nmtr_enable[3].clicked.connect(lambda: motor_enable(3, 1))\nmtr_disable[0].clicked.connect(lambda: motor_enable(0, 0))\nmtr_disable[1].clicked.connect(lambda: motor_enable(1, 0))\nmtr_disable[2].clicked.connect(lambda: motor_enable(2, 0))\nmtr_disable[3].clicked.connect(lambda: motor_enable(3, 0))\n\n\n# Samplerate Set\nsamplerate_setpoint = QtGui.QLineEdit()\nsamplerate_setpointfb = QtGui.QLabel(\"SAMPLERATE FB\")\nsamplerate_send = QtGui.QPushButton(\"Update samplerate (Hz)\")\nsamplerate_send.clicked.connect(lambda: set(\"samplerate\", samplerate_setpoint.text()))\nlayout.addWidget(samplerate_send, zr+7, zc+10)\nlayout.addWidget(samplerate_setpoint, zr+7, zc+11)\nlayout.addWidget(samplerate_setpointfb, zr+7, zc+12)\n# Telemrate set\ntelemrate_setpoint = QtGui.QLineEdit()\ntelemrate_setpointfb = QtGui.QLabel(\"TELEMRATE FB\")\ntelemrate_send = QtGui.QPushButton(\"Update telemrate (Hz)\")\ntelemrate_send.clicked.connect(lambda: set(\"telemrate\", (\"rs422 \"+telemrate_setpoint.text())))\n\nlayout.addWidget(telemrate_send, zr+8, zc+10)\nlayout.addWidget(telemrate_setpoint, zr+8, zc+11)\nlayout.addWidget(telemrate_setpointfb, zr+8, zc+12)\n\n# Motor gains set\nMOTOR_GAINS_LABEL = QtGui.QLabel(\"Motor Gains\")\nkp_set = QtGui.QPushButton(\"Update Kp\")\nki_set = QtGui.QPushButton(\"Update Ki\")\nkd_set = QtGui.QPushButton(\"Update Kd\")\nkp_set.clicked.connect(lambda: set(\"gain\", \"0 \"+str(kp_input.text())))\nki_set.clicked.connect(lambda: set(\"gain\", \"1 \"+str(ki_input.text())))\nkd_set.clicked.connect(lambda: set(\"gain\", \"2 \"+str(kd_input.text())))\nkp_input = QtGui.QLineEdit()\nki_input = QtGui.QLineEdit()\nkd_input = QtGui.QLineEdit()\nkpfb = QtGui.QLabel(\"kpfb\")\nkifb = QtGui.QLabel(\"kifb\")\nkdfb = QtGui.QLabel(\"kdfb\")\n\nlayout.addWidget(kp_set, zr+9, zc+5)\nlayout.addWidget(ki_set, zr+10, zc+5)\nlayout.addWidget(kd_set, zr+11, zc+5)\nlayout.addWidget(kp_input, zr+9, zc+6)\nlayout.addWidget(ki_input, zr+10, zc+6)\nlayout.addWidget(kd_input, zr+11, zc+6)\nlayout.addWidget(kpfb, zr+9, zc+7)\nlayout.addWidget(kifb, zr+10, zc+7)\nlayout.addWidget(kdfb, zr+11, zc+7)\n\n# State Feedback\nstate_label = QtGui.QLabel(\"STATE = N/A\")\narm_button = QtGui.QPushButton(\"ARM\")\ndisarm_button = QtGui.QPushButton(\"DISARM\")\nhotfire_button = QtGui.QPushButton(\"HOTFIRE\")\narm_button.clicked.connect(lambda: send(\"arm\"))\ndisarm_button.clicked.connect(lambda: send(\"disarm\"))\nhotfire_button.clicked.connect(lambda: send(\"hotfire\"))\n\nlayout.addWidget(state_label, zr+12, zc+5)\nlayout.addWidget(arm_button, zr+13, zc+5)\nlayout.addWidget(disarm_button, zr+14, zc+5)\nlayout.addWidget(hotfire_button, zr+15, zc+5)\n\n# Loads\nthrust_load_label = QtGui.QLabel(\"NET THRUST\")\nthrust_load_label.setAlignment(Qt.AlignCenter)\nload_label = []\nfor n in range(0, 4):\n\tload_label.append(QtGui.QLabel(\"LOAD \"+str(n)))\n\nlayout.addWidget(thrust_load_label, zr+12, zc+6, 1, 2)\nlayout.addWidget(load_label[0], zr+13, zc+6)\nlayout.addWidget(load_label[1], zr+13, zc+7)\nlayout.addWidget(load_label[2], zr+14, zc+6)\nlayout.addWidget(load_label[3], zr+14, zc+7)\n\n# Thermoucouples\ntc_label = []\nfor n in range(0, 4):\n\ttc_label.append(QtGui.QLabel(\"TC-\"+str(n)))\n\nlayout.addWidget(tc_label[0], zr+12, zc+11)\nlayout.addWidget(tc_label[1], zr+13, zc+11)\nlayout.addWidget(tc_label[2], zr+14, zc+11)\nlayout.addWidget(tc_label[3], zr+15, zc+11)\n\n\n\n# Raw Command\ndef raw_command():\n\tsend(raw_command_input.text())\n\traw_command_input.setText(\"\")\nraw_command_input = QtGui.QLineEdit('command entry')\nraw_command_send = QtGui.QPushButton(\"Send Command\")\nraw_command_send.clicked.connect(raw_command)\nraw_command_input.returnPressed.connect(raw_command)\n\nlayout.addWidget(raw_command_input, zr+16, zc+5, 1, 2)\nlayout.addWidget(raw_command_send, zr+16, zc+7)\n\nlog_to_auto_label = QtGui.QLabel(\"LOG_TO_AUTO\")\nautofeedback = QtGui.QPlainTextEdit(\"Autosequence feedback\")\nrunning_autos_label = QtGui.QLabel(\"RUNNING_AUTOS\")\nlayout.addWidget(autofeedback, zr+1, zc+10, 4, 3)\nlayout.addWidget(log_to_auto_label, zr+5, zc+10)\nlayout.addWidget(running_autos_label, zr+6, zc+10, 1, 2)\n\n# Board Health\nBOARD_HEALTH_LABEL = QtGui.QLabel(\"Board Health\")\nebatt_label = QtGui.QLabel(\"BATT\")\nibus_label = QtGui.QLabel(\"I-BUS\")\nebatt_value = QtGui.QLabel(\"EBATT\")\nibus_value = QtGui.QLabel(\"IBUS\")\n\nlayout.addWidget(BOARD_HEALTH_LABEL, zr+9, zc+8)\nlayout.addWidget(ebatt_label, zr+10, zc+8)\nlayout.addWidget(ibus_label, zr+11, zc+8)\nlayout.addWidget(ebatt_value, zr+10, zc+9)\nlayout.addWidget(ibus_value, zr+11, zc+9)\n\n# Loop times\nLOOP_RATE_LABEL = QtGui.QLabel(\"Loop rates (hz)\")\nmotor_cycle_rate = QtGui.QLabel(\"MCR\")\nmain_cycle_rate = QtGui.QLabel(\"MCR\")\nadc_cycle_rate = QtGui.QLabel(\"ACR\")\ntelemetry_cycle_rate = QtGui.QLabel(\"TCR\")\nmotor_cycle_rate_label = QtGui.QLabel(\"Motor:\")\nmain_cycle_rate_label = QtGui.QLabel(\"Main:\")\nadc_cycle_rate_label = QtGui.QLabel(\"ADC\")\ntelemetry_cycle_rate_label = QtGui.QLabel(\"Telem\")\n# Label place\n\nlayout.addWidget(LOOP_RATE_LABEL, zr+12, zc+8)\nlayout.addWidget(main_cycle_rate_label, zr+13, zc+8)\nlayout.addWidget(motor_cycle_rate_label, zr+14, zc+8)\nlayout.addWidget(adc_cycle_rate_label, zr+15, zc+8)\nlayout.addWidget(telemetry_cycle_rate_label, zr+16, zc+8)\n# Readout place\n\nlayout.addWidget(main_cycle_rate, zr+13, zc+9)\nlayout.addWidget(motor_cycle_rate, zr+14, zc+9)\nlayout.addWidget(adc_cycle_rate, zr+15, zc+9)\nlayout.addWidget(telemetry_cycle_rate, zr+16, zc+9)\n\n# counts things for debugging\ncount1_label = QtGui.QLabel(\"COUNT1\")\ncount2_label = QtGui.QLabel(\"COUNT2\")\ncount3_label = QtGui.QLabel(\"COUNT3\")\n\nlayout.addWidget(count1_label, zr+12, zc+10)\nlayout.addWidget(count2_label, zr+13, zc+10)\nlayout.addWidget(count3_label, zr+14, zc+10)\n\n\ncolumns = 12\ncol_label = []\nfor n in range(0, columns+1):\n\tcol_label.append(QtGui.QLabel())\n\ncol_label[0].setText(\"Valve OFF\")\ncol_label[1].setText(\"Valve ON\")\ncol_label[2].setText(\"State\")\n\ncol_label[3].setText(\"Current\")\ncol_label[4].setText(\"Voltage\")\ncol_label[5].setText(\"Pressure\")\ncol_label[7].setText(\"Value\")\ncol_label[8].setText(\"Set Values\")\ncol_label[9].setText(\"Feedback\")\ncol_label[10].setText(\"Actual\")\ncol_label[11].setText(\"Actual\")\n\n\n\n\nlayout.addWidget(col_label[0], zr+0, 0)\nlayout.addWidget(col_label[1], zr+0, 1)\nlayout.addWidget(col_label[2], zr+0, 2)\nlayout.addWidget(col_label[3], zr+0, 3)\nlayout.addWidget(col_label[4], zr+0, 4)\nlayout.addWidget(col_label[5], zr+0, 5)\nlayout.addWidget(col_label[6], zr+0, 6)\nlayout.addWidget(col_label[7], zr+0, 7)\nlayout.addWidget(col_label[8], zr+0, 8)\n\ndef death():\n\tcommand_log.close()\n\tinfo_log.close()\n\tserial_log.close()\n\tdata_log.close()\n\tapp.quit()\n\nKILL = QtGui.QPushButton(\"KILL\")\nKILL.clicked.connect(death)\n\nlayout.addWidget(KILL, zr+0, zc+10)\n\n# Valve buttons and labels\nfor n in range(0, 16):\n\n\tlayout.addWidget(valve_buttons[n][0], zr+n+1, zc+0-2)\n\tlayout.addWidget(valve_buttons[n][1], zr+n+1, zc+1-2)\n\tlayout.addWidget(valve_buttons[n][2], zr+n+1, zc+2-2)\n\tlayout.addWidget(valve_buttons[n][3], zr+n+1, zc+3-2)\n\tlayout.addWidget(valve_buttons[n][4], zr+n+1, zc+4-2)\n\tlayout.addWidget(pressure_labels[n][0], zr+n+1, zc+3)\n\tlayout.addWidget(pressure_labels[n][1], zr+n+1, zc+4)\n\nif(1):\n\t# Add image\n\n\t#logo = QtGui.QLabel(w)\n\t#logo.setGeometry(1000, 250, 800, 250)\n\t#use full ABSOLUTE path to the image, not relative\n\n\t#logo.setPixmap(QtGui.QPixmap(os.getcwd() + \"/masa2.png\"))\n\tpass\n\nif(0):\n\tp = w.palette()\n\tp.setColor(w.backgroundRole(), Qt.black)\n\tw.setPalette(p)\n\n## Display the widget as a new window\nw.show()\n\ntimer = pg.QtCore.QTimer()\ntimer.timeout.connect(parse_serial)\ntimer.start(10) # 100hz\n\n## Start the Qt event loop\napp.exec_()","sub_path":"gui/archived/hotfire_v0.py","file_name":"hotfire_v0.py","file_ext":"py","file_size_in_byte":17807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"334552087","text":"from parameterized import parameterized\nimport unittest\nfrom woniuboss.lib.woniuboss_api.personnel_management.personnel_management import PersonnelManagement\nfrom woniuboss.tools.woniuboss_api.utility import Utility\n# 在该页面调试时test_config_info里需要多加一层跳出\"..\\\\\",testdata也需要多加一层\n# 准备数据\ntest_config_info=Utility.get_json('..\\\\..\\\\conf\\\\woniuboss_api\\\\PM\\\\testdata.conf')\npm_page_info = Utility.get_excel_to_tuple(test_config_info[0])\npm_new_info = Utility.get_excel_to_tuple(test_config_info[1])\npm_modify_info = Utility.get_excel_to_tuple(test_config_info[2])\n# ('http://192.168.75.128:8080/WoniuBoss2.5/user/login', 'POST',\n# {'username': 'WNCD000', 'password': 'woniu123', 'verifycode': '0000'}, 200, 'employment_management-pass'),\n# login_info中[]的需要参照上面选取\n\n\nclass PersonnelManagementTest(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tprint(\"test start\")\n\n\tdef tearDown(self):\n\t\tprint(\"test end\")\n\n\t@parameterized.expand(pm_page_info)\n\t# PM_001\n\tdef test_pm_page(self, url, action, data, code, expect):\n\t\tpm_page_url = f'{url}'\n\t\tpm_page_resp = PersonnelManagement().do_pm_query(pm_page_url, data)\n\t\tpm_page_code = pm_page_resp.status_code\n\t\tif pm_page_code == code:\n\t\t\tactual = 'PM-succeed'\n\t\telse:\n\t\t\tactual = 'PM-failed'\n\n\t\t# 断言\n\t\tself.assertEqual(actual, expect)\n\n\n\n\t@parameterized.expand(pm_new_info)\n\t# PM_002\n\tdef test_pm_new_page(self, url, action, data, code, expect):\n\t\tpm_new_url = f'{url}'\n\t\tpm_new_resp = PersonnelManagement().do_pm_new(pm_new_url, data)\n\t\tpm_new_code = pm_new_resp.json()\n\t\tif pm_new_code:\n\t\t\tactual = 'PM-succeed'\n\t\telse:\n\t\t\tactual = 'PM-failed'\n\n\t\t# 断言\n\t\tself.assertEqual(actual, expect)\n\n\t@parameterized.expand(pm_modify_info)\n\t# PM_003\n\tdef test_pm_modify_page(self, url, action, data, code, expect):\n\t\tpm_modify_url = f'{url}'\n\t\tpm_modify_resp = PersonnelManagement().do_pm_modify(pm_modify_url, data)\n\t\tpm_modify_code = pm_modify_resp.json()\n\t\tif pm_modify_code:\n\t\t\tactual = 'PM-succeed'\n\t\telse:\n\t\t\tactual = 'PM-failed'\n\n\t\t# 断言\n\t\tself.assertEqual(actual, expect)\n\nif __name__ == '__main__':\n\n\tunittest.main(verbosity=2)\n","sub_path":"yanlongbin/Project code/woniuboss/bin/woniuboss_api/personnel_management_test/test_personnel_management.py","file_name":"test_personnel_management.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"443149135","text":"\"\"\"\n=================================\nAuthor: Flora Chen\nTime: 2021/2/3 20:42\n-_- -_- -_- -_- -_- -_- -_- -_-\n=================================\n\"\"\"\nimport pytest, json\nfrom middleware.handler import MidHandler\nfrom requests import request\nfrom jsonpath import jsonpath\n\n\nclass TestAudit:\n \"\"\"\n 审核项目接口测试用例\n 借款人登录新建项目\n 管理员登录审核项目\n \"\"\"\n test_data = MidHandler.excel.read(\"audit\")\n\n @pytest.mark.parametrize(\"data\", test_data)\n def test_audit(self, data, admin_login, add_loan, loan_login, db):\n # 动态设置类属性中需要的数据\n setattr(MidHandler, \"admin_token\", admin_login[\"authorization\"])\n setattr(MidHandler, \"loan_token\", loan_login[\"authorization\"])\n setattr(MidHandler, \"loan_id\", str(add_loan[\"id\"]))\n\n # 使用正则表达式替换数据\n data = MidHandler.replace_data(json.dumps(data))\n data = json.loads(data)\n\n url = MidHandler.conf_data[\"ENV\"][\"BASE_URL\"] + data[\"url\"]\n method = data[\"method\"]\n header = json.loads(data[\"header\"])\n\n case = data[\"data\"]\n expected = json.loads(data[\"expected\"])\n\n # 修改项目的状态,不符合要求的项目状态,无法审核成功\n if '项目状态为2' in data['title']:\n sql = 'update loan set status=2 where id={}'.format(getattr(MidHandler, \"loan_id\"))\n db.update(sql)\n elif '项目状态为5' in data['title']:\n sql = 'update loan set status=5 where id={}'.format(getattr(MidHandler, \"loan_id\"))\n db.update(sql)\n elif '项目状态为3' in data['title']:\n sql = 'update loan set status=3 where id={}'.format(getattr(MidHandler, \"loan_id\"))\n db.update(sql)\n elif '项目状态为4' in data['title']:\n sql = 'update loan set status=4 where id={}'.format(getattr(MidHandler, \"loan_id\"))\n db.update(sql)\n\n response = request(url=url, method=method, json=json.loads(case), headers=header)\n response_data = response.json()\n\n try:\n for key, value in expected.items():\n assert jsonpath(response_data, key)[0] == value\n except AssertionError as e:\n MidHandler.log.error(e)\n MidHandler.log.info(\n \"\\ncaseid: {}, title: {}\\nurl: {}\\nmethod: {}\\nheader: {}\\ncase_data: {}\\nresponse: {}\\nresult: {}\\n\".format(\n data[\"case_id\"], data[\"title\"], url, method, header, data,\n response_data, \"Failed\"))\n raise e\n else:\n MidHandler.log.info(\n \"\\ncaseid: {}, title: {}\\nurl: {}\\nmethod: {}\\nheader: {}\\ncase_data: {}\\nresponse: {}\\nresult: {}\\n\".format(\n data[\"case_id\"], data[\"title\"], url, method, header, data,\n response_data, \"Passed\"))\n\n\nif __name__ == \"__main__\":\n pytest.main([\"test_audit.py\"])\n","sub_path":"tests/test_audit.py","file_name":"test_audit.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"564674918","text":"from char_rbm.simple import CharRBM\n\n\ndataset = \"data/black_metal_bands.txt\"\nrbm = CharRBM()\ntrain = True\nif train:\n rbm.train(dataset, preserve_case=True)\n rbm.save()\nelse:\n model_path = \"models/heavy_metal_bands_.pickle\"\n rbm.load(model_path)\n\nsamples = rbm.sample()\nprint(samples)\n","sub_path":"examples/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"650552499","text":"from pandas_datareader import data\nfrom datetime import datetime, timedelta\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.embed import components\nfrom bokeh.resources import CDN\n\n\ndef generate_plot(start=(datetime.today()-timedelta(days=60)), end=datetime.today(), company=\"AAPL\"):\n\n start = start\n end = end\n company = data.DataReader(name=company, data_source=\"yahoo\", start=start, end=end)\n\n p = figure(x_axis_type='datetime', width=1000, height=400, sizing_mode=\"scale_width\")\n p.title.text = \"Candlestick chart\"\n p.grid.grid_line_alpha = 0.5\n\n hossa = company.index[company.Close > company.Open]\n bessa = company.index[company.Close < company.Open]\n stable = company.index[company.Close == company.Open]\n\n for day in hossa:\n p.segment(x0=day, y0=company.loc[day].High, x1=day, y1=company.loc[day].Low, color='black')\n p.quad(left=day - timedelta(hours=6), right=day + timedelta(hours=6),\n top=company.loc[day].Close, bottom=company.loc[day].Open, fill_color='green',\n line_color='black')\n\n for day in bessa:\n p.segment(x0=day, y0=company.loc[day].High, x1=day, y1=company.loc[day].Low, color='black')\n p.quad(left=day - timedelta(hours=6), right=day + timedelta(hours=6),\n top=company.loc[day].Open, bottom=company.loc[day].Close, fill_color='red',\n line_color='black')\n\n for day in stable:\n p.segment(x0=day, y0=company.loc[day].High, x1=day, y1=company.loc[day].Low, color='black')\n p.quad(left=day - timedelta(hours=6), right=day + timedelta(hours=6),\n top=company.loc[day].Open, bottom=company.loc[day].Close, fill_color='grey',\n line_color='black')\n\n js_script, div = components(p)\n cdn_js = CDN.js_files\n cdn_css = CDN.css_files\n\n return js_script, div, cdn_js, cdn_css\n","sub_path":"App8_FinancialGraph/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"639816362","text":"import numpy as np\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data\nimport torch\nfrom sklearn import metrics\nimport argparse\nimport timeit\nimport os\n\nfrom model import CPIModel\n\ndef train(net, dataset, optimizer, loss_function, epoch):\n net.train()\n train_loss = 0\n for index, (fingerprints, adjacency, words, interaction) in enumerate(dataset, 1):\n optimizer.zero_grad()\n output = net.forward(fingerprints, adjacency, words)\n loss = loss_function(output, interaction)\n train_loss += loss.item()\n loss.backward()\n optimizer.step()\n \n print('epoch %3d batch %4d/%4d train_loss %5.3f' % (epoch, index, len(dataset), train_loss / index), end='')\n\ndef test(net, dataset, loss_function):\n net.eval()\n test_loss = 0\n y_score, y_true = [], []\n for index, (fingerprints, adjacency, words, interaction) in enumerate(dataset, 1):\n with torch.no_grad():\n output = net.forward(fingerprints, adjacency, words)\n\n loss = loss_function(output, interaction)\n test_loss += loss.item()\n score = F.softmax(output, 1).cpu()\n y_score.append(score)\n y_true.append(interaction.cpu())\n\n y_score = np.concatenate(y_score)\n y_pred = [np.argmax(x) for x in y_score]\n y_true = np.concatenate(y_true)\n\n if np.sum(y_pred) != 0:\n acc = metrics.accuracy_score(y_true, y_pred)\n auc = metrics.roc_auc_score(y_true, y_score[:,1])\n prec = metrics.precision_score(y_true, y_pred)\n recall = metrics.recall_score(y_true, y_pred)\n\n print(' test_loss %5.3f test_auc %5.3F test_prec %5.3f test_recall %5.3f' % (test_loss / index, auc, prec, recall), end='')\n else:\n print(' test_loss %5.3f' % (test_loss / index), end='')\n\n return test_loss / index\n\ndef main(args):\n np.random.seed(args.random_seed)\n torch.manual_seed(args.random_seed)\n \n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('Using %s device.' % (device))\n\n # Load preprocessed data.\n data = torch.load('data/%s.pth' % (args.dataset))\n dataset_train = data['dataset_train']\n dataset_test = data['dataset_test']\n n_fingerprint = data['n_fingerprint']\n n_word = data['n_word']\n\n for i in range(len(dataset_train)):\n dataset_train[i] = [\n torch.LongTensor(dataset_train[i][0]).to(device),\n torch.FloatTensor(dataset_train[i][1]).to(device),\n torch.LongTensor(dataset_train[i][2]).to(device),\n torch.LongTensor(dataset_train[i][3]).to(device)]\n\n for i in range(len(dataset_test)):\n dataset_test[i] = [\n torch.LongTensor(dataset_test[i][0]).to(device),\n torch.FloatTensor(dataset_test[i][1]).to(device),\n torch.LongTensor(dataset_test[i][2]).to(device),\n torch.LongTensor(dataset_test[i][3]).to(device)]\n\n print('train %d batches' % (len(dataset_train)))\n print('test %d batches' % (len(dataset_test)))\n \n # Set a model\n net = CPIModel(n_fingerprint, n_word, args)\n print('# of model parameters:', sum([np.prod(p.size()) for p in net.parameters()]))\n\n if os.path.exists('model/%s.pth' % (args.dataset)):\n net.load_state_dict(torch.load('model/%s.pth' % (args.dataset)))\n\n net = net.to(device)\n\n optimizer = optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n loss_function = F.cross_entropy\n\n test_losses = []\n\n # Start training\n for epoch in range(args.epochs):\n epoch_start = timeit.default_timer()\n\n if epoch % args.decay_interval == 0:\n optimizer.param_groups[0]['lr'] *= args.lr_decay\n\n train(net, dataset_train, optimizer, loss_function, epoch)\n test_loss = test(net, dataset_test, loss_function)\n\n print(' %5.2fsec' % (timeit.default_timer() - epoch_start))\n\n test_losses.append(test_loss)\n\n os.makedirs('model', exist_ok=True)\n torch.save(net.state_dict(), 'model/%s.pth' % (args.dataset))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--random_seed', default=123, type=int)\n parser.add_argument('--dataset', default='human', choices=['human', 'celegans'])\n parser.add_argument('--radius', default=2, choices=[1, 2, 3])\n parser.add_argument('--ngram', default=3, choices=[2, 3])\n parser.add_argument('--dim', default=10, type=int)\n parser.add_argument('--layer_gnn', default=3, type=int)\n parser.add_argument('--side', default=5, type=int)\n parser.add_argument('--window', default=2*5+1, type=int) # 2*side+1\n parser.add_argument('--layer_cnn', default=3, type=int)\n parser.add_argument('--layer_output', default=3, type=int)\n parser.add_argument('--lr', default=1e-3, type=float)\n parser.add_argument('--lr_decay', default=0.5, type=float)\n parser.add_argument('--decay_interval', default=10, type=int)\n parser.add_argument('--weight_decay', default=1e-6, type=float)\n parser.add_argument('--epochs', default=100, type=int)\n args = parser.parse_args()\n print(vars(args))\n\n main(args)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"204169068","text":"import uuid\nfrom datetime import datetime\n\nimport inflect\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\n\nfrom .base import BaseFormView, BaseTemplateView\nfrom application.services.address_helper import *\nfrom ..forms.childcare_address import *\nfrom ..utils import *\n\n\ndef get_address_number(app_id, add):\n \"\"\"\n A function that uses the API to find the number of complete and incomplete childcare addresses within the postcode\n lookup page.\n :param app_id: The ID of the current applicant\n :param add: A flag that is appended to the URL should a user use the 'add another address' button on the address\n details page.\n *** This can be abused by using the 'add another' button, completing a second address, then using the\n back button to return to the postcode lookup page\n :return: Return tuple of the address number and the ordinal string for adaptable html content\n \"\"\"\n formatter = inflect.engine()\n api_response = NannyGatewayActions().list('childcare-address', params={'application_id': app_id})\n\n # API records do not exist while on the postcode lookup page for the first time so the ordinal needs to be defined\n # outside of the address number\n if api_response.status_code == 404:\n return '1', 'First'\n\n complete_addresses = [address for address in api_response.record if address['street_line1'] is not None]\n\n # catch first time journey users or those using the back button from the address lookup page on the first journey\n if len(complete_addresses) == 0:\n addr_num = 1\n else:\n addr_num = len(complete_addresses) + 1 if add else len(complete_addresses)\n\n return str(addr_num), formatter.number_to_words(formatter.ordinal(addr_num)).title()\n\n\nclass ChildcareAddressPostcodeView(BaseFormView):\n \"\"\"\n Class containing the view(s) for handling the GET requests to the childcare address postcode page.\n \"\"\"\n template_name = 'childcare-address-postcode.html'\n success_url = 'Childcare-Address-Lookup'\n form_class = ChildcareAddressForm\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Override base BaseFormView method to add 'fields' key to context for rendering in template.\n \"\"\"\n app_id = self.request.GET['id']\n add = self.request.GET.get('add') # Returns none if 'add another' button is not used - User using back button\n kwargs['add'] = add\n childcare_address_id = self.request.GET[\n 'childcare_address_id'] if 'childcare_address_id' in self.request.GET else None\n self.initial = {\n 'id': app_id\n }\n\n api_response = NannyGatewayActions().list('childcare-address',\n params={'application_id': app_id})\n\n if api_response.status_code == 200:\n api_response.record = [address for address in api_response.record if address['street_line1'] is not None]\n\n if 'childcare_address_id' in self.request.GET:\n self.initial['childcare_address_id'] = childcare_address_id\n\n if 'form' not in kwargs:\n kwargs['form'] = self.get_form()\n\n kwargs['fields'] = [kwargs['form'].render_field(name, field) for name, field in kwargs['form'].fields.items()]\n kwargs['id'] = app_id\n kwargs['addr_num'], kwargs['ordinal'] = get_address_number(app_id, add)\n return super(ChildcareAddressPostcodeView, self).get_context_data(**kwargs)\n\n def form_valid(self, form):\n \"\"\"\n Re-route the user if the postcode given is accurate.\n \"\"\"\n app_id = self.request.GET['id']\n childcare_address_id = self.request.GET[\n 'childcare_address_id'] if 'childcare_address_id' in self.request.GET else None\n postcode = form.cleaned_data['postcode']\n add_another = self.request.GET.get('add')\n\n if childcare_address_id:\n # update postcode of address\n api_response = NannyGatewayActions().read('childcare-address',\n params={'childcare_address_id': childcare_address_id})\n api_response.record['postcode'] = postcode\n NannyGatewayActions().put('childcare-address', params=api_response.record) # Update entire record.\n\n else:\n api_response = NannyGatewayActions().create(\n 'childcare-address',\n params={\n 'date_created': datetime.today(),\n 'application_id': app_id,\n 'childcare_address_id': uuid.uuid4(),\n 'postcode': postcode\n }\n )\n if api_response.status_code == 201:\n childcare_address_id = api_response.record['childcare_address_id']\n\n return HttpResponseRedirect(build_url('Childcare-Address-Lookup', get={\n 'id': app_id,\n 'childcare_address_id': childcare_address_id,\n 'add': add_another\n }))\n\n\nclass ChildcareAddressLookupView(BaseFormView):\n \"\"\"\n Class containing the view(s) for handling the GET requests to the childcare address lookup page.\n \"\"\"\n template_name = 'childcare-address-lookup.html'\n success_url = 'Childcare-Address-Details'\n form_class = ChildcareAddressLookupForm\n\n def form_valid(self, form):\n \"\"\"\n Re-route the user if the address selected is valid.\n \"\"\"\n app_id = self.request.GET['id']\n childcare_address_id = self.request.GET[\n 'childcare_address_id'] if 'childcare_address_id' in self.request.GET else None\n selected_address_index = form.cleaned_data['address']\n\n if childcare_address_id:\n # update postcode of address\n api_response = NannyGatewayActions().read('childcare-address',\n params={'childcare_address_id': childcare_address_id})\n record = api_response.record\n selected_address = AddressHelper.get_posted_address(selected_address_index, record['postcode'])\n record['street_line1'] = selected_address['line1']\n record['street_line2'] = selected_address['line2']\n record['town'] = selected_address['townOrCity']\n record['postcode'] = selected_address['postcode']\n record['home_address'] = False\n NannyGatewayActions().put('childcare-address', params=record)\n\n return HttpResponseRedirect(build_url('Childcare-Address-Details', get={\n 'id': app_id\n }))\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Override base BaseFormView method to add 'fields' key to context for rendering in template.\n \"\"\"\n app_id = self.request.GET['id']\n childcare_address_id = self.request.GET[\n 'childcare_address_id'] if 'childcare_address_id' in self.request.GET else None\n\n self.initial = {\n 'id': app_id\n }\n kwargs['id'] = app_id\n kwargs['childcare_address_id'] = childcare_address_id\n\n api_response = NannyGatewayActions().list('childcare-address',\n params={'application_id': app_id})\n\n add = self.request.GET.get('add') # Returns none if 'add another' button is not used - User using back button\n kwargs['add'] = add\n\n if api_response.status_code == 200:\n api_response.record = [address for address in api_response.record if address['street_line1'] is not None]\n\n if childcare_address_id:\n api_response = NannyGatewayActions().read('childcare-address',\n params={'childcare_address_id': childcare_address_id})\n postcode = api_response.record['postcode']\n kwargs['postcode'] = postcode\n addresses = AddressHelper.create_address_lookup_list(postcode)\n\n self.initial['choices'] = addresses\n\n kwargs['addr_num'], kwargs['ordinal'] = get_address_number(app_id, add)\n if 'form' not in kwargs:\n kwargs['form'] = self.get_form()\n\n kwargs['fields'] = [kwargs['form'].render_field(name, field) for name, field in kwargs['form'].fields.items()]\n\n return super(ChildcareAddressLookupView, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Handles POST requests for lookup form\n \"\"\"\n self.get_context_data()\n form = self.get_form()\n\n # Clear incomplete addresses from the API record\n app_id = self.request.GET['id']\n api_response = NannyGatewayActions().list('childcare-address', params={'application_id': app_id})\n if api_response.status_code == 200:\n api_response.record = [address for address in api_response.record if address['street_line1'] is not None]\n\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n\nclass ChildcareAddressManualView(BaseFormView):\n \"\"\"\n Class containing the view(s) for handling the GET requests to the childcare address manual entry page.\n \"\"\"\n template_name = 'childcare-address-manual.html'\n success_url = 'Childcare-Address-Details'\n form_class = ChildcareAddressManualForm\n\n def form_valid(self, form):\n \"\"\"\n Re-route the user if the manual address given is valid.\n \"\"\"\n app_id = self.request.GET['id']\n childcare_address_id = self.request.GET[\n 'childcare_address_id'] if 'childcare_address_id' in self.request.GET else None\n street_line1 = form.cleaned_data['street_line1']\n street_line2 = form.cleaned_data['street_line2']\n town = form.cleaned_data['town']\n county = form.cleaned_data['county']\n postcode = form.cleaned_data['postcode']\n\n if childcare_address_id:\n # update postcode of address\n api_response = NannyGatewayActions().read('childcare-address',\n params={'childcare_address_id': childcare_address_id})\n record = api_response.record\n record['street_line1'] = street_line1\n record['street_line2'] = street_line2\n record['town'] = town\n record['county'] = county\n record['postcode'] = postcode\n record['home_address'] = False\n NannyGatewayActions().patch('childcare-address', params=record)\n\n else:\n NannyGatewayActions().create(\n 'childcare-address',\n params={\n 'date_created': datetime.today(),\n 'application_id': app_id,\n 'street_line1': street_line1,\n 'street_line2': street_line2,\n 'town': town,\n 'county': county,\n 'postcode': postcode,\n 'home_address': False\n }\n )\n\n # Redefine API response so that incorrect address records can be removed\n api_response = NannyGatewayActions().list('childcare-address', params={'application_id': app_id})\n incomplete_addresses = [address for address in api_response.record if address['street_line1'] is None]\n\n # Delete record that have not being completed - only invalid\n for address in incomplete_addresses:\n NannyGatewayActions().delete('childcare-address', params=address)\n\n return HttpResponseRedirect(build_url('Childcare-Address-Details', get={\n 'id': app_id,\n 'add': 0\n }))\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Override base BaseFormView method to add 'fields' key to context for rendering in template.\n \"\"\"\n app_id = self.request.GET['id']\n\n childcare_address_id = self.request.GET[\n 'childcare_address_id'] if 'childcare_address_id' in self.request.GET else None\n\n add = self.request.GET.get('add') # Returns none if 'add another' button is not used - User using back button\n\n self.initial = {\n 'id': app_id\n }\n\n kwargs['id'] = app_id\n kwargs['add'] = add\n api_response = NannyGatewayActions().list('childcare-address',\n params={'application_id': app_id})\n\n if api_response.status_code == 200:\n api_response.record = [address for address in api_response.record if address['street_line1'] is not None]\n\n if childcare_address_id:\n self.initial['childcare_address_id'] = childcare_address_id\n kwargs['childcare_address_id'] = childcare_address_id\n\n if 'form' not in kwargs:\n kwargs['form'] = self.get_form()\n\n kwargs['fields'] = [kwargs['form'].render_field(name, field) for name, field in kwargs['form'].fields.items()]\n kwargs['addr_num'], kwargs['ordinal'] = get_address_number(app_id, add)\n\n return super(ChildcareAddressManualView, self).get_context_data(**kwargs)\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Handles POST requests for lookup form\n \"\"\"\n self.get_context_data()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n# TODO: Check field names how will flagging work here???\nclass ChildcareAddressDetailsView(BaseTemplateView):\n \"\"\"\n Class containing the view(s) for handling the GET requests to the childcare address details page.\n \"\"\"\n template_name = 'childcare-address-details.html'\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"\n Method to redirect to Where you work page when the applicant removes all addresses\n :return: HTTP response redirect\n \"\"\"\n\n app_id = self.request.GET['id']\n\n api_response = NannyGatewayActions().list('childcare-address', params={'application_id': app_id})\n\n # If the number of childcare addresses equal to 1 and the applicant clicks the Remove this address link\n if 'childcare-address-id' in self.request.GET:\n\n if api_response.status_code == 200 and len(api_response.record) <= 1:\n\n # Check if childcare address exists (to handle page reloads)\n childcare_address_id = self.request.GET['childcare-address-id']\n last_childcare_address = NannyGatewayActions().list('childcare-address',\n params={\n 'childcare_address_id': childcare_address_id})\n\n if last_childcare_address.status_code == 200:\n\n if len(last_childcare_address.record) > 0:\n # Delete the childcare address\n childcare_address_id = self.request.GET['childcare-address-id']\n NannyGatewayActions().delete('childcare-address',\n params={'childcare_address_id': childcare_address_id})\n\n # Set Where you work default response to No\n application_response = NannyGatewayActions().read('application',\n params={'application_id': app_id})\n\n record = application_response.record\n record['address_to_be_provided'] = False\n NannyGatewayActions().put('application', params=record)\n\n # set childcare location to false\n ha_response = NannyGatewayActions().read('applicant-home-address',\n params={'application_id': app_id})\n if ha_response.status_code == 200:\n ha_record = ha_response.record\n ha_record['childcare_address'] = False\n NannyGatewayActions().put('applicant-home-address', ha_record)\n\n\n # Redirect to Where you work page\n return HttpResponseRedirect(build_url('Childcare-Address-Where-You-Work', get={\n 'id': app_id,\n }))\n\n return super(ChildcareAddressDetailsView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Override base BaseTemplateView method to add 'fields' key to context for rendering in template.\n \"\"\"\n app_id = self.request.GET['id']\n kwargs['id'] = app_id\n\n # Redefine API response so that incorrect address records can be removed\n api_response = NannyGatewayActions().list('childcare-address', params={'application_id': app_id})\n if api_response.status_code == 200:\n incomplete_addresses = [address for address in api_response.record if address['street_line1'] is None]\n\n # Delete record that have not being completed\n for address in incomplete_addresses:\n NannyGatewayActions().delete('childcare-address', params=address)\n\n # If clicking on Remove this address link\n if 'childcare-address-id' in self.request.GET:\n childcare_address_id = self.request.GET['childcare-address-id']\n childcare_address_api_response = NannyGatewayActions().read('childcare-address', params = {'childcare_address_id': childcare_address_id})\n home_address_response = NannyGatewayActions().read('applicant-home-address', params = {'application_id': app_id})\n NannyGatewayActions().delete('childcare-address', params={'childcare_address_id': childcare_address_id})\n if childcare_address_api_response.status_code == 200 and home_address_response.status_code == 200:\n ca_record = childcare_address_api_response.record\n ha_record = home_address_response.record\n if ca_record['home_address'] == True:\n ha_record['childcare_address'] = False\n NannyGatewayActions().put('applicant-home-address', params=ha_record)\n\n # Generate list of childcare addresses and display in through page context\n api_response = NannyGatewayActions().list('childcare-address', params={'application_id': app_id})\n\n addresses = {}\n count = 1\n if api_response.status_code == 200:\n for address in api_response.record:\n addresses[str(count)] = {\n \"address\": AddressHelper.format_address(address, \", \"),\n \"childcare_address_id\": address['childcare_address_id']\n }\n count += 1\n kwargs['childcare_addresses'] = sorted(addresses.items())\n\n return super(ChildcareAddressDetailsView, self).get_context_data(**kwargs)\n\n def post(self, request):\n \"\"\"\n Handle post requests to the details page.\n \"\"\"\n app_id = request.GET['id']\n if 'add_another' in request.POST:\n api_response = NannyGatewayActions().list('childcare-address', params={'application_id': app_id})\n if api_response.status_code == 200 and len(api_response.record) > 4:\n context = self.get_context_data()\n context['non_field_errors'] = [\"You can only enter up to 5 childcare addresses\"]\n context['error_summary_title'] = \"There was a problem\"\n return render(request, self.template_name, context)\n\n # Return the URL with a flag that is used to increment the number of addresses\n return HttpResponseRedirect(build_url('Childcare-Address-Postcode-Entry', get={\n 'id': app_id,\n 'add': 1\n }))\n else:\n return HttpResponseRedirect(build_url('Childcare-Address-Summary', get={\n 'id': app_id,\n }))\n","sub_path":"application/presentation/childcare_address/views/childcare_address_entry.py","file_name":"childcare_address_entry.py","file_ext":"py","file_size_in_byte":20113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"118743047","text":"def conta_letras(frase,contar=\"vogais\"):\n lst_vogais = [65,69,73,79,85]\n i=0\n frase = frase.upper().replace(\" \",\"\")\n contador = 0\n while i < len(frase):\n if(contar == \"consoantes\"):\n flag = True\n for ascii_vogal in lst_vogais:\n ascii_palavra_frase = ord(frase[i])\n if(ascii_palavra_frase == ascii_vogal):\n flag = False\n \n if(flag):\n contador += 1\n else:\n for ascii_vogal in lst_vogais:\n ascii_palavra_frase = ord(frase[i])\n if (ascii_palavra_frase == ascii_vogal ):\n contador += 1\n break\n \n i += 1 \n# print(contador)\n return contador\n\"\"\"\"\ndef main():\n# conta_letras('programamos em python')\n # deve devolver 6\n\n# conta_letras('programamos em python', 'vogais')\n # deve devolver 6\n\n# conta_letras('programamos em python', 'consoantes')\n # deve devolver 13\n conta_letras(\"a b c d e f g h i j k l m n o p q r s t u v w x y z\",\"consoantes\")\n print(conta_letras(\"oi\"))\nmain()\n\"\"\"","sub_path":"semana2/conta_char.py","file_name":"conta_char.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"336784544","text":"\"\"\"\r\nLinearly interpolates absorbance between each wavenumber,\r\nand outputs a constant data point spacing of 1.929 cm^-1 for a wavenumber range of 450 - 4000 cm^-1.\r\n\r\nAuthor: Rasmus Vest Nielsen\r\n\"\"\"\r\n\r\nfrom scipy.interpolate import interp1d\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport glob\r\n\r\n# Load spectra data.\r\nspectra_file_names = glob.glob(\"spectra_csv/*.csv\")\r\n\r\nspectra_counter = 0\r\ndf_list = [] # List containing all spectra.\r\nfor spectra_file_name in spectra_file_names:\r\n\r\n df_spectra = pd.read_csv(spectra_file_name,\r\n usecols = [1, 2])\r\n \r\n # Due to inaccurate estimation of x-axis,\r\n # the start and end wavenumber is forced to\r\n # match the OMNIC library specifications.\r\n # Typically, we have an error of 1 to 2\r\n # cm^-1 dependent on how precise the \r\n # operator of graphffer.py set the axis start\r\n # and end point.\r\n df_spectra[\"wavenumbers\"].iloc[0] = 450\r\n df_spectra[\"wavenumbers\"].iloc[-1] = 4000\r\n\r\n #print(df_spectra)\r\n\r\n wavenumbers = df_spectra[\"wavenumbers\"]\r\n absorbance = df_spectra[\"absorbance\"]\r\n\r\n # Perform linear interpolation between each\r\n # wavenumber.\r\n f = interp1d(wavenumbers, absorbance)\r\n\r\n # Generate new interval.\r\n constant_rate_wavenumbers = np.arange(450, \r\n 4001, 1.929)\r\n #constant_rate_wavenumbers[-1] = 4000\r\n #print(constant_rate_wavenumbers)\r\n\r\n constant_rate_absorbance = f(\r\n constant_rate_wavenumbers)\r\n #print(constant_rate_absorbance)\r\n\r\n out_dir = \"constant_rate_spectra\"\r\n if not os.path.isdir(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n # Get index of spectra.\r\n spectra_idx = spectra_file_name.split(\r\n \"\\\\\")[1].split(\".\")[0]\r\n\r\n d = {\"wavenumber\": constant_rate_wavenumbers,\r\n spectra_idx + \"_absorbance\": \r\n constant_rate_absorbance}\r\n\r\n df_tmp = pd.DataFrame(data = d)\r\n df_tmp = df_tmp.set_index(\"wavenumber\")\r\n df_list.append(df_tmp)\r\n\r\n# Concatenate each of spectra.\r\ndf = pd.concat(df_list, axis = 1)\r\ndf.to_csv(out_dir + \"/spectra.csv\")\r\n","sub_path":"interpolate_spectra.py","file_name":"interpolate_spectra.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"545959289","text":"#!/usr/bin/env python3\n\nfrom ubloxMessage import UbloxMessage, CLIDPAIR\nimport serial\nimport serial.threaded\nimport time\nimport traceback\nimport logging\n\nimport datetime\n\nclass UbloxReader(serial.threaded.Protocol):\n def __init__(self):\n self.buffer = b''\n self.start = 0\n self.pollResult = None\n self.pollTarget = None\n self.printMessageFlag = False\n self.printMessageFilter = None\n self.saveStreamFlag = False\n self.saveStreamFilter = None\n self.saveFormat = 'ubx'\n self.saveFile = None\n self.saveFileName = 'ublox'\n # self.userHandler = None\n self.lastFlushTime = None\n self.logger = logging\n\n def userHandler(self, msgTime, msgFormat, msgData, rawMessage):\n pass\n\n # Required for serial.threaded.Protocol\n def connection_made(self, transport):\n super(UbloxReader, self).connection_made(transport)\n self.logger.debug('Serial port opened\\n')\n\n # Required for serial.threaded.Protocol\n def data_received(self, data):\n self.logger.debug('Received {} bytes'.format(len(data)))\n self.buffer = self.buffer + data\n self.logger.debug('Buffer size: {} bytes'.format(len(self.buffer)))\n self.parse()\n\n # Required for serial.threaded.Protocol\n def connection_lost(self, exc):\n if exc:\n self.logger.error('*** EXCEPTION *** {}'.format(exc))\n self.logger.debug('Serial port closed.')\n if self.saveFile is not None:\n self.saveFile.close()\n self.saveFile = None\n self.logger.debug('Save file closed.')\n\n # Parse buffer looking for messages\n def parse(self):\n self.logger.debug('in UbloxReader.parse()')\n if len(self.buffer) < 8:\n self.logger.debug('UbloxReader.parse(): not enough data in buffer')\n return\n index = self.buffer.find(b'\\xb5\\x62')\n if index >= 0:\n self.start += index\n msgTime = time.time()\n self.logger.debug('UbloxReader.parse(): sending for validation')\n result = UbloxMessage.validate(self.buffer[self.start:])\n if result['valid']:\n rawMessage = self.buffer[self.start:]\n self.logger.debug('UbloxReader.parse(): sending to UbloxMessage.parse()')\n msgFormat, msgData, remainder = UbloxMessage.parse(rawMessage)\n rawMessage = rawMessage[:len(rawMessage) - len(remainder)] if remainder is not None else rawMessage[:len(rawMessage)]\n self.buffer = remainder if remainder is not None else b''\n self.start = 0\n if msgFormat is not None:\n self.logger.debug('UbloxReader.parse(): sending to UbloxReader.handleMessage()')\n self.handleMessage(msgTime, msgFormat, msgData, rawMessage)\n return\n else:\n # Invalid message, move past sync bytes\n if result['lengthMatch'] or ((result['length'] is not None) and (result['length'] > 4096)):\n if result['lengthMatch']:\n self.logger.debug('UbloxReader.parse(): invalid message in buffer, moving past sync')\n else:\n self.logger.debug('UbloxReader.parse(): invalid length ({}) - enforcing max length of 4096 bytes'.format(result['length']))\n self.buffer = self.buffer[self.start+2:]\n return\n else:\n self.logger.debug('Ublox.parse(): Header indicates a message of length {}, buffer only has {} bytes'.format(result['length'], len(self.buffer)))\n return\n # Discard all but the last byte\n else:\n self.logger.debug('UbloxReader.parse(): could not find sync in buffer, discarding all but the last byte')\n self.buffer = self.buffer[-1:]\n self.start = 0\n return\n\n # Handle a received message\n def handleMessage(self, msgTime, msgFormat, msgData, rawMessage):\n # This is a polled message\n if (self.pollTarget is not None) and (msgFormat in self.pollTarget):\n self.pollResult = (msgFormat, msgData)\n self.pollTarget = None\n\n # Save message\n if self.saveStreamFlag and (self.saveStreamFilter is None or msgFormat in self.saveStreamFilter):\n self.saveMessage(msgTime, msgFormat, msgData, rawMessage)\n\n # Print message to screen\n if self.printMessageFlag and (self.printMessageFilter is None or msgFormat in self.printMessageFilter):\n self.printMessage(msgTime, msgFormat, msgData)\n\n # Call user handler\n if self.userHandler is not None:\n self.userHandler(msgTime, msgFormat, msgData, rawMessage)\n\n def printMessage(self, msgTime, msgFormat, msgData):\n UbloxMessage.printMessage(msgFormat, msgData, msgTime, fmt='short')\n\n def saveMessage(self, msgTime, msgFormat, msgData, rawMessage):\n if self.saveInterval is not None:\n if msgFormat == 'NAV-PVT':\n year = msgData[0]['Year']\n month = msgData[0]['Month']\n day = msgData[0]['Day']\n hour = msgData[0]['Hour']\n minute = msgData[0]['Min']\n second = msgData[0]['Sec']\n newFile = False\n if self.saveInterval == 'hourly' and hour != self.curInterval:\n self.curInterval = hour\n newFile = True\n elif self.saveInterval == 'daily' and day != self.curInterval:\n self.curInterval = day\n newFile = True\n\n if newFile:\n if self.saveFile is not None:\n self.saveFile.close()\n dt = datetime.datetime(year, month, day, hour, minute, second)\n filename = '{}_{}.{}'.format(self.saveFileName, dt.strftime('%Y%m%dT%H%M%SZ'), self.saveFormat)\n self.logger.info('*** Opening save file {} for write'.format(filename))\n self.saveFile = open(filename, 'wb')\n self.lastFlushTime = time.time()\n else:\n if self.saveFile is None:\n filename = '{}_{}.{}'.format(self.saveFileName, datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ'), self.saveFormat)\n self.logger.info('*** Opening save file {} for write'.format(filename))\n self.saveFile = open(filename, 'wb')\n self.lastFlushTime = time.time()\n if self.saveFormat == 'ubx' and self.saveFile is not None:\n self.logger.debug('Saving {} message of raw length {}'.format(msgFormat, len(rawMessage)))\n self.saveFile.write(rawMessage)\n if (time.time() - self.lastFlushTime) > 60:\n self.saveFile.flush()\n self.lastFlushTime = time.time()\n\n def poll(self, ser, msgFormat, length=0, data=[], timeout=0.5, maxRetries=20):\n retries = 0\n while retries < maxRetries: \n self.pollResult = None\n self.pollTarget = [msgFormat]\n\n self.logger.info('Polling for {} (attempt {})'.format(msgFormat, retries+1))\n self.sendMessage(ser, msgFormat, length, data)\n\n startTime = time.time()\n while self.pollResult is None:\n time.sleep(0.01)\n if (time.time() - startTime) > timeout:\n self.logger.warn('Timeout waiting for response!')\n break\n\n if self.pollResult is not None:\n return self.pollResult\n\n retries += 1\n\n raise Exception('Failed to get response!')\n\n def sendConfig(self, ser, msgFormat, length, data, timeout=0.5, maxRetries=20):\n retries = 0\n while retries < maxRetries:\n self.pollResult = None\n self.pollTarget = ['ACK-ACK', 'ACK-NACK']\n\n self.logger.info('Sending config message {} (attempt {})'.format(msgFormat, retries+1))\n self.sendMessage(ser, msgFormat, length, data)\n\n startTime = time.time()\n while self.pollResult is None:\n time.sleep(0.01)\n if (time.time() - startTime) > timeout:\n self.logger.warn('Timeout waiting for ACK')\n break\n\n if self.pollResult is not None:\n if self.checkAck(self.pollResult[0], self.pollResult[1], msgFormat):\n self.logger.info('Config message ACKed by ublox')\n return\n\n retries += 1\n\n raise Exception('Failed to set configuration!')\n\n def sendMessage(self, ser, msgFormat, length, data):\n message = UbloxMessage.buildMessage(msgFormat, length, data)\n ser.write(message)\n\n def checkAck(self, ackMessageType, ackData, cfgMessageType):\n if ackMessageType == 'ACK-NACK':\n raise Exception('ublox receiver responded with ACK-NACK!')\n\n if ackMessageType != 'ACK-ACK':\n raise ValueError('This is not an ACK-ACK or ACK-NACK message! ({})\\n{}'.format(ackMessageType, ackData))\n\n clsId, msgId = CLIDPAIR[cfgMessageType]\n if ackData[0]['ClsID'] != clsId or ackData[0]['MsgID'] != msgId:\n raise ValueError('ublox receiver ACKed a different message ({}, {})!'.format(ackData[0]['ClsID'], ackData[0]['MsgID']))\n\n return True\n\n def setSaveInterval(self, interval):\n if interval not in ['daily', 'hourly', None]:\n raise Exception('Invalid save interval!')\n self.saveInterval = interval\n self.curInterval = None\n\n\n\nif __name__=='__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--device', '-d', default='/dev/ttyHS1', help='Specify the serial port device to communicate with. e.g. /dev/ttyO5')\n parser.add_argument('--loop', '-l', action='store_true', help='Keep sending requests in a loop')\n parser.add_argument('--verbose', '-v', action='store_true')\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n elif args.verbose:\n logging.basicConfig(level=logging.INFO)\n else:\n logging.basicConfig(level=logging.ERROR)\n\n ser = serial.Serial(args.device, 115200, timeout=1)\n\n with serial.threaded.ReaderThread(ser, UbloxReader) as protocol:\n while True:\n try:\n msgFormat, msgData = protocol.poll(ser, 'MON-VER')\n UbloxMessage.printMessage(msgFormat, msgData, header=datetime.datetime.now().strftime('[%Y-%m-%d %H:%M:%S.%f]\\n'))\n time.sleep(0.1)\n except KeyboardInterrupt:\n break\n","sub_path":"ublox2.py","file_name":"ublox2.py","file_ext":"py","file_size_in_byte":10838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"169109121","text":" # coding=utf-8\nfrom __future__ import print_function\n\nfrom nose.tools import *\nfrom .utils import *\nfrom unittest import TestCase\ntry:\n from unittest import mock\nexcept ImportError:\n import mock\n\nfrom types import MethodType\n\nimport json\nimport subprocess\nimport scuba.compat\nimport scuba.dockerutil as uut\nimport scuba.dockerutil\n\ncontainer_inspect_string = '''[\n {\n \"Id\": \"4c3d00d1870c72d99b4f6c0828ab1456e37712d0fafc1ac6d2507bc6e39c3987\",\n \"Created\": \"2018-03-27T11:41:26.744529362Z\",\n \"Path\": \"bash\",\n \"Args\": [],\n \"State\": {\n \"Status\": \"exited\",\n \"Running\": false,\n \"Paused\": false,\n \"Restarting\": false,\n \"OOMKilled\": false,\n \"Dead\": false,\n \"Pid\": 0,\n \"ExitCode\": 0,\n \"Error\": \"\",\n \"StartedAt\": \"2018-03-27T11:41:27.196032955Z\",\n \"FinishedAt\": \"2018-03-27T11:41:29.22813183Z\"\n },\n \"Image\": \"sha256:c648cd6a73969d01003f84dcb558aa19f153fdbb63f6e7bc096cf204c1d46280\",\n \"ResolvConfPath\": \"/var/lib/docker/containers/4c3d00d1870c72d99b4f6c0828ab1456e37712d0fafc1ac6d2507bc6e39c3987/resolv.conf\",\n \"HostnamePath\": \"/var/lib/docker/containers/4c3d00d1870c72d99b4f6c0828ab1456e37712d0fafc1ac6d2507bc6e39c3987/hostname\",\n \"HostsPath\": \"/var/lib/docker/containers/4c3d00d1870c72d99b4f6c0828ab1456e37712d0fafc1ac6d2507bc6e39c3987/hosts\",\n \"LogPath\": \"/var/lib/docker/containers/4c3d00d1870c72d99b4f6c0828ab1456e37712d0fafc1ac6d2507bc6e39c3987/4c3d00d1870c72d99b4f6c0828ab1456e37712d0fafc1ac6d2507bc6e39c3987-json.log\",\n \"Name\": \"/keen_meninsky\",\n \"RestartCount\": 0,\n \"Driver\": \"overlay2\",\n \"Platform\": \"linux\",\n \"MountLabel\": \"\",\n \"ProcessLabel\": \"\",\n \"AppArmorProfile\": \"docker-default\",\n \"ExecIDs\": null,\n \"HostConfig\": {\n \"Binds\": null,\n \"ContainerIDFile\": \"\",\n \"LogConfig\": {\n \"Type\": \"json-file\",\n \"Config\": {}\n },\n \"NetworkMode\": \"default\",\n \"PortBindings\": {},\n \"RestartPolicy\": {\n \"Name\": \"no\",\n \"MaximumRetryCount\": 0\n },\n \"AutoRemove\": false,\n \"VolumeDriver\": \"\",\n \"VolumesFrom\": null,\n \"CapAdd\": null,\n \"CapDrop\": null,\n \"Dns\": [],\n \"DnsOptions\": [],\n \"DnsSearch\": [],\n \"ExtraHosts\": null,\n \"GroupAdd\": null,\n \"IpcMode\": \"shareable\",\n \"Cgroup\": \"\",\n \"Links\": null,\n \"OomScoreAdj\": 0,\n \"PidMode\": \"\",\n \"Privileged\": false,\n \"PublishAllPorts\": false,\n \"ReadonlyRootfs\": false,\n \"SecurityOpt\": null,\n \"UTSMode\": \"\",\n \"UsernsMode\": \"\",\n \"ShmSize\": 67108864,\n \"Runtime\": \"runc\",\n \"ConsoleSize\": [\n 0,\n 0\n ],\n \"Isolation\": \"\",\n \"CpuShares\": 0,\n \"Memory\": 0,\n \"NanoCpus\": 0,\n \"CgroupParent\": \"\",\n \"BlkioWeight\": 0,\n \"BlkioWeightDevice\": [],\n \"BlkioDeviceReadBps\": null,\n \"BlkioDeviceWriteBps\": null,\n \"BlkioDeviceReadIOps\": null,\n \"BlkioDeviceWriteIOps\": null,\n \"CpuPeriod\": 0,\n \"CpuQuota\": 0,\n \"CpuRealtimePeriod\": 0,\n \"CpuRealtimeRuntime\": 0,\n \"CpusetCpus\": \"\",\n \"CpusetMems\": \"\",\n \"Devices\": [],\n \"DeviceCgroupRules\": null,\n \"DiskQuota\": 0,\n \"KernelMemory\": 0,\n \"MemoryReservation\": 0,\n \"MemorySwap\": 0,\n \"MemorySwappiness\": null,\n \"OomKillDisable\": false,\n \"PidsLimit\": 0,\n \"Ulimits\": null,\n \"CpuCount\": 0,\n \"CpuPercent\": 0,\n \"IOMaximumIOps\": 0,\n \"IOMaximumBandwidth\": 0\n },\n \"GraphDriver\": {\n \"Data\": {\n \"LowerDir\": \"/var/lib/docker/overlay2/3c2ffa08bf46b885be9debff94b97ea1048b7b0f9792b68b3ba9aa20f7460c50-init/diff:/var/lib/docker/overlay2/a737d69e224bcbf4dc775b0ba363a4e93a6d3f511a45f2e2b24cc80c40cdaa8f/diff:/var/lib/docker/overlay2/f025bc430ffd1d9adeb41427bdcbd522d889cb1be808d8287755b5628f4caf2a/diff\",\n \"MergedDir\": \"/var/lib/docker/overlay2/3c2ffa08bf46b885be9debff94b97ea1048b7b0f9792b68b3ba9aa20f7460c50/merged\",\n \"UpperDir\": \"/var/lib/docker/overlay2/3c2ffa08bf46b885be9debff94b97ea1048b7b0f9792b68b3ba9aa20f7460c50/diff\",\n \"WorkDir\": \"/var/lib/docker/overlay2/3c2ffa08bf46b885be9debff94b97ea1048b7b0f9792b68b3ba9aa20f7460c50/work\"\n },\n \"Name\": \"overlay2\"\n },\n \"Mounts\": [\n {\n \"Type\": \"volume\",\n \"Name\": \"testvolume\",\n \"Source\": \"/var/lib/docker/volumes/testvolume/_data\",\n \"Destination\": \"/tester\",\n \"Driver\": \"local\",\n \"Mode\": \"z\",\n \"RW\": true,\n \"Propagation\": \"\"\n },\n {\n \"Type\": \"bind\",\n \"Source\": \"/home/user2\",\n \"Destination\": \"/usermount\",\n \"Mode\": \"\",\n \"RW\": true,\n \"Propagation\": \"rprivate\"\n }\n ],\n \"Config\": {\n \"Hostname\": \"4c3d00d1870c\",\n \"Domainname\": \"\",\n \"User\": \"\",\n \"AttachStdin\": true,\n \"AttachStdout\": true,\n \"AttachStderr\": true,\n \"Tty\": true,\n \"OpenStdin\": true,\n \"StdinOnce\": true,\n \"Env\": null,\n \"Cmd\": [\n \"bash\"\n ],\n \"Image\": \"debian:8.2\",\n \"Volumes\": null,\n \"WorkingDir\": \"\",\n \"Entrypoint\": null,\n \"OnBuild\": null,\n \"Labels\": {}\n },\n \"NetworkSettings\": {\n \"Bridge\": \"\",\n \"SandboxID\": \"9f08edac4afb92186d5ad7598efb3d56e44ca756446ebd08b48a6efa3b457a04\",\n \"HairpinMode\": false,\n \"LinkLocalIPv6Address\": \"\",\n \"LinkLocalIPv6PrefixLen\": 0,\n \"Ports\": {},\n \"SandboxKey\": \"/var/run/docker/netns/9f08edac4afb\",\n \"SecondaryIPAddresses\": null,\n \"SecondaryIPv6Addresses\": null,\n \"EndpointID\": \"\",\n \"Gateway\": \"\",\n \"GlobalIPv6Address\": \"\",\n \"GlobalIPv6PrefixLen\": 0,\n \"IPAddress\": \"\",\n \"IPPrefixLen\": 0,\n \"IPv6Gateway\": \"\",\n \"MacAddress\": \"\",\n \"Networks\": {\n \"bridge\": {\n \"IPAMConfig\": null,\n \"Links\": null,\n \"Aliases\": null,\n \"NetworkID\": \"8c235ddd93b4786b2f91bef9c1ff85dc5d2727978b75b9b6d9bcaad64b15dea6\",\n \"EndpointID\": \"\",\n \"Gateway\": \"\",\n \"IPAddress\": \"\",\n \"IPPrefixLen\": 0,\n \"IPv6Gateway\": \"\",\n \"GlobalIPv6Address\": \"\",\n \"GlobalIPv6PrefixLen\": 0,\n \"MacAddress\": \"\",\n \"DriverOpts\": null\n }\n }\n }\n }\n]\n'''\ncontainer_inspect_data = json.loads(container_inspect_string)\n\nclass TestDockerutil(TestCase):\n def test_get_image_command_success(self):\n '''get_image_command works'''\n assert_true(uut.get_image_command('debian:8.2'))\n\n def test_get_image_command_bad_image(self):\n '''get_image_command raises an exception for a bad image name'''\n assert_raises(uut.DockerError, uut.get_image_command, 'nosuchimageZZZZZZZZ')\n\n def test_get_image_no_docker(self):\n '''get_image_command raises an exception if docker is not installed'''\n\n real_Popen = subprocess.Popen\n def mocked_popen(popen_args, *args, **kw):\n assert_equal(popen_args[0], 'docker')\n popen_args[0] = 'dockerZZZZ'\n return real_Popen(popen_args, *args, **kw)\n\n with mock.patch('subprocess.Popen', side_effect=mocked_popen) as popen_mock:\n assert_raises(uut.DockerError, uut.get_image_command, 'n/a')\n\n def test__get_image_command__pulls_image_if_missing(self):\n '''get_image_command pulls an image if missing'''\n image = 'busybox:latest'\n\n # First remove the image\n subprocess.call(['docker', 'rmi', image])\n\n # Now try to get the image's Command\n result = uut.get_image_command(image)\n\n # Should return a non-empty string\n self.assertTrue(result)\n\n def test_get_image_entrypoint(self):\n '''get_image_entrypoint works'''\n result = uut.get_image_entrypoint('scuba/entrypoint-test')\n self.assertEqual(1, len(result))\n assert_str_equalish('/entrypoint.sh', result[0])\n\n def test_get_image_entrypoint__none(self):\n '''get_image_entrypoint works for image with no entrypoint'''\n result = uut.get_image_entrypoint('debian')\n self.assertEqual(None, result)\n\n\n def test_make_vol_opt_no_opts(self):\n assert_equal(\n uut.make_vol_opt('/hostdir', '/contdir'),\n '--volume=/hostdir:/contdir'\n )\n\n def test_make_vol_opt_one_opt(self):\n assert_equal(\n uut.make_vol_opt('/hostdir', '/contdir', 'ro'),\n '--volume=/hostdir:/contdir:ro'\n )\n\n def test_make_vol_opt_multi_opts(self):\n assert_equal(\n uut.make_vol_opt('/hostdir', '/contdir', ['ro', 'z']),\n '--volume=/hostdir:/contdir:ro,z'\n )\n\n def test_inspect_container_success(self):\n '''Inspect container with success'''\n\n container_id = container_inspect_data[0]['Id']\n def mocked_popen(popen_args, stdout = None, stderr = None):\n assert_equal(len(popen_args), 3)\n assert_equal(popen_args[0], 'docker')\n assert_equal(popen_args[1], 'inspect')\n assert_equal(popen_args[2], container_id)\n\n def mocked_communicate(self, stdin = None):\n return (self.stdout.read(), self.stderr.read())\n\n ret = type('popen_mock_obj', (), {})()\n ret.stdout = scuba.compat.StringIO(container_inspect_string)\n ret.stderr = scuba.compat.StringIO()\n ret.returncode = 0\n ret.communicate = MethodType(mocked_communicate, ret)\n return ret\n\n with mock.patch('scuba.dockerutil.Popen', mocked_popen) as popen_mock:\n out = uut.docker_inspect_container(container_id)\n assert_equal(out['Id'], container_id)\n\n def test_inspect_container_no_docker(self):\n '''Inspect container with missing docker'''\n\n container_id = '4c3d00d1870c72d99b4f6c0828ab1456e37712d0fafc1ac6d2507bc6e39c3987'\n real_Popen = subprocess.Popen\n def mocked_popen(popen_args, *args, **kw):\n assert_equal(popen_args[0], 'docker')\n popen_args[0] = 'dockerZZZZ'\n return real_Popen(popen_args, *args, **kw)\n\n with mock.patch('subprocess.Popen', side_effect=mocked_popen) as popen_mock:\n assert_raises(uut.DockerError, uut.docker_inspect_container, 'n/a')\n\n\n def test_inspect_container_no_container(self):\n '''Inspect missing container'''\n\n container_id = 'fd670db82f3a02a7fd15869d1bab235698dc23ca3b132f01e8fec5655f33af4d'\n def mocked_popen(popen_args, stdout = None, stderr = None):\n assert_equal(len(popen_args), 3)\n assert_equal(popen_args[0], 'docker')\n assert_equal(popen_args[1], 'inspect')\n assert_equal(popen_args[2], container_id)\n\n def mocked_communicate(self, stdin = None):\n print(\"mocked_communicate\")\n return (self.stdout.read(), self.stderr.read())\n\n ret = type('popen_mock_obj', (), {})()\n ret.stdout = scuba.compat.StringIO('[]')\n ret.stderr = scuba.compat.StringIO('Error: No such object: {0}'.format(container_id))\n ret.returncode = 1\n ret.communicate = MethodType(mocked_communicate, ret)\n return ret\n\n with mock.patch('scuba.dockerutil.Popen', mocked_popen) as popen_mock:\n assert_raises(uut.NoSuchContainerError, uut.docker_inspect_container, container_id)\n\n def test_inspect_container_other_fail(self):\n '''Inspect failure'''\n\n container_id = 'fd670db82f3a02a7fd15869d1bab235698dc23ca3b132f01e8fec5655f33af4d'\n def mocked_popen(popen_args, stdout = None, stderr = None):\n assert_equal(len(popen_args), 3)\n assert_equal(popen_args[0], 'docker')\n assert_equal(popen_args[1], 'inspect')\n assert_equal(popen_args[2], container_id)\n\n def mocked_communicate(self, stdin = None):\n print(\"mocked_communicate\")\n return (self.stdout.read(), self.stderr.read())\n\n ret = type('popen_mock_obj', (), {})()\n ret.stdout = scuba.compat.StringIO('[]')\n ret.stderr = scuba.compat.StringIO('Error: Failed')\n ret.returncode = 1\n ret.communicate = MethodType(mocked_communicate, ret)\n return ret\n\n with mock.patch('scuba.dockerutil.Popen', mocked_popen) as popen_mock:\n assert_raises(uut.DockerError, uut.docker_inspect_container, container_id)\n\n def test_get_my_container_id_no_container(self):\n '''Get container ID when not in docker'''\n cgroup_contents = '''12:devices:/user.slice\n11:hugetlb:/\n10:perf_event:/\n9:net_cls,net_prio:/\n8:cpuset:/\n7:freezer:/\n6:rdma:/\n5:blkio:/user.slice\n4:cpu,cpuacct:/user.slice\n3:pids:/user.slice/user-1026.slice\n2:memory:/user.slice\n1:name=systemd:/user.slice/user-1026.slice/session-c1.scope\n'''\n mocked_open = mock.mock_open(read_data=cgroup_contents)\n\n with mock.patch('__builtin__.open', mocked_open):\n assert_equal(uut.get_my_container_id(), None)\n\n def test_get_my_container_id_in_container(self):\n '''Get container ID when in docker'''\n cgroup_contents = '''12:devices:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n11:hugetlb:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n10:perf_event:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n9:net_cls,net_prio:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n8:cpuset:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n7:freezer:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n6:rdma:/\n5:blkio:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n4:cpu,cpuacct:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n3:pids:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n2:memory:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n1:name=systemd:/docker/ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb\n'''\n mocked_open = mock.mock_open(read_data=cgroup_contents)\n\n with mock.patch('__builtin__.open', mocked_open):\n assert_equal(uut.get_my_container_id(), 'ebfaf125efecfe74d3a99b2d4ef584c3dc23fc290e834ff052ad87b16f5f0dbb')\n\n def test_get_path_mount_no_container(self):\n '''Get path mount when not in docker'''\n cgroup_contents = '''12:devices:/user.slice\n11:hugetlb:/\n10:perf_event:/\n9:net_cls,net_prio:/\n8:cpuset:/\n7:freezer:/\n6:rdma:/\n5:blkio:/user.slice\n4:cpu,cpuacct:/user.slice\n3:pids:/user.slice/user-1026.slice\n2:memory:/user.slice\n1:name=systemd:/user.slice/user-1026.slice/session-c1.scope\n'''\n mocked_open = mock.mock_open(read_data=cgroup_contents)\n\n with mock.patch('__builtin__.open', mocked_open):\n test_path = '/tmp/test/path'\n host_path, mount_path, rel_path, mount_options = uut.get_path_mount(test_path)\n assert_equal(host_path, test_path)\n assert_equal(mount_path, test_path)\n assert_equal(rel_path, None)\n assert_equal(mount_options, None)\n\n def test_get_path_mount_volume_mount(self):\n '''Get path mount for a named volume'''\n \n container_id = container_inspect_data[0]['Id']\n def mocked_popen(popen_args, stdout = None, stderr = None):\n assert_equal(len(popen_args), 3)\n assert_equal(popen_args[0], 'docker')\n assert_equal(popen_args[1], 'inspect')\n assert_equal(popen_args[2], container_id)\n\n def mocked_communicate(self, stdin = None):\n return (self.stdout.read(), self.stderr.read())\n\n ret = type('popen_mock_obj', (), {})()\n ret.stdout = scuba.compat.StringIO(container_inspect_string)\n ret.stderr = scuba.compat.StringIO()\n ret.returncode = 0\n ret.communicate = MethodType(mocked_communicate, ret)\n return ret\n\n with mock.patch('scuba.dockerutil.Popen', mocked_popen):\n host_path, mount_path, rel_path, mount_options = uut.get_path_mount(\"/tester/workspace\", container_id)\n assert_equal(host_path, \"testvolume\")\n assert_equal(mount_path, \"/tester\")\n assert_equal(rel_path, \"workspace\")\n assert_equal(mount_options, [\"z\"])\n\n def test_get_path_mount_bind_mount(self):\n '''Get path mount for a host volume'''\n \n container_id = container_inspect_data[0]['Id']\n def mocked_popen(popen_args, stdout = None, stderr = None):\n assert_equal(len(popen_args), 3)\n assert_equal(popen_args[0], 'docker')\n assert_equal(popen_args[1], 'inspect')\n assert_equal(popen_args[2], container_id)\n\n def mocked_communicate(self, stdin = None):\n return (self.stdout.read(), self.stderr.read())\n\n ret = type('popen_mock_obj', (), {})()\n ret.stdout = scuba.compat.StringIO(container_inspect_string)\n ret.stderr = scuba.compat.StringIO()\n ret.returncode = 0\n ret.communicate = MethodType(mocked_communicate, ret)\n return ret\n\n with mock.patch('scuba.dockerutil.Popen', mocked_popen):\n host_path, mount_path, rel_path, mount_options = uut.get_path_mount(\"/usermount/hostwork\", container_id)\n assert_equal(host_path, \"/home/user2\")\n assert_equal(mount_path, \"/usermount\")\n assert_equal(rel_path, \"hostwork\")\n assert_equal(mount_options, [\"\"])\n\n","sub_path":"tests/test_dockerutil.py","file_name":"test_dockerutil.py","file_ext":"py","file_size_in_byte":18783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"236237027","text":"\"\"\"djangoframe URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url,include\nfrom django.contrib import admin\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'book/',include('book.urls'),{'switch':'true'}),\n url(r'music/',include('music.urls')),\n # url(r'^hello/$', views.index),\n # url(r'^hello/python/$',views.hello_python),\n # url(r'^hello/take/$',views.hello_taka),\n # url(r'^hello/([a-z]+)/(/d+)$',views.hello_course),\n # url(r'^hahaha/(?P\\w+)/(?P\\d+)$',views.hello_django),\n # url(r'^hello/(\\d+)/(\\d+)/$',views.add),\n]\n","sub_path":"djangoframe/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"639586750","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2018/6/5 21:28\n# @Author : chen\n# @File : 课后作业.py\n\n\n# class Student:\n# school = 'Luffycity'\n# count = 0\n#\n# def __init__(self, name, age, sex):\n# self.name = name\n# self.age = age\n# self.sex = sex\n# self.count += 1\n#\n# def learn(self):\n# print('%s is learning' % self.name)\n#\n# stu1 = Student('alex', 'male', 38)\n# stu2 = Student('jinxin', 'female', 78)\n# stu3 = Student('Egon', 'male', 18)\n#\n# print(Student.count)\n# print(stu1.count)\n# print(stu2.count)\n# print(stu3.count)\n#\n# print(stu1.__dict__)\n# print(stu2.__dict__)\n# print(stu3.__dict__)\n\n\nclass Hero:\n def __init__(self, nick_name, life_value, aggresivity):\n self.nick_name = nick_name\n self.life_value = life_value\n self.aggresivity = aggresivity\n\n def attack(self, enemy):\n enemy.life_value -= self.aggresivity\n\n\n# r1 = Hero('可爱的锐雯雯', 80, 50)\n# g1 = Hero('草丛伦', 100, 30)\n# print(r1.life_value)\n# g1.attack(r1)\n# print(r1.life_value)\n\n\nclass Riven(Hero):\n camp = 'Noxus'\n\n # def __init__(self, nick_name, life_value, aggresivity):\n # super(Riven, self).__init__(nick_name, life_value, aggresivity)\n\n\n# class Garen(Hero):\n# camp = 'Demacia'\n#\n# # def __init__(self, nick_name, life_value, aggresivity):\n# # super(Garen, self).__init__(nick_name, life_value, aggresivity)\n\n\n# class Garen(Hero):\n# camp = 'Demacia'\n#\n# def __init__(self, n, nick_name, life_value, aggresivity):\n# self.n =n\n# super(Garen, self).__init__(nick_name, life_value, aggresivity)\n\nclass Garen(Hero):\n camp = 'Demacia'\n\n def sss(self):\n pass\n\n\nr1 = Riven('可爱的锐雯雯', 80, 50)\ng1 = Garen('草丛伦', 100, 30)\nprint(r1.life_value)\ng1.attack(r1)\nprint(r1.life_value)\nprint(r1.camp)\nprint(g1.camp)\nprint(r1.__dict__)\nprint(r1.__dir__())\nprint(g1.__dict__)\n# 通过继���的方式新建类B,让B继承A,B会‘遗传’A的所有属性(数据属性和函数属性),实现代码重用\n","sub_path":"类与网络编程/课后作业.py","file_name":"课后作业.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"460859166","text":"from jr.parser import Parser\nfrom jr.extension import Extension\nfrom jr.common import Node\nfrom jr.render import Render\nimport yaml\nimport json\nimport os\n\n\nclass PipelineExtension(Extension):\n\n def __init__(self, template_path=None):\n self.source = None\n self.data = None\n self.template_path = template_path\n\n def preprocess(self, source):\n \"\"\"\n DSL预处理程序,将输入的source做预处理,默认直接返回,不做任何处理\n :param source:\n :return:\n \"\"\"\n self.data = yaml.load(source)\n source = json.dumps(self.data)\n return source\n\n def semantic_parse(self, grammar_model):\n \"\"\"语义分析\n \"\"\"\n\n global_member = self.get_global_member(self.data)\n pipeline_member = self.get_pipeline_member(self.data)\n g_model = pipeline_member.copy()\n local_data = self.parse_model(pipeline_member, 'g_model', list())\n\n while local_data[0][1] != 'pipeline':\n for ae in local_data:\n node = Node(name=ae[1], member=ae[2], global_member=global_member)\n code = self.render_node([node])\n locals()['code'] = code\n m_str = ae[0] + \"['\" + ae[1] + \"'] = code\"\n exec(m_str)\n g_model = locals()['g_model']\n local_data = self.parse_model(g_model, 'g_model', list())\n\n pipeline_code = self.get_pipeline_code(local_data[-1][-1])\n g_model = [Node(name='pipeline', member={'pipeline': pipeline_code}, global_member=global_member)]\n return g_model\n\n def code_optimize(self, out_code):\n \"\"\"代码优化\n \"\"\"\n return out_code\n\n @staticmethod\n def get_pipeline_code(model):\n pipeline_code = ''\n for k, v in model.items():\n pipeline_code = pipeline_code + '\\n' + v\n return pipeline_code\n\n @staticmethod\n def get_pipeline_member(model):\n pipeline_member = {'pipeline': model.get('pipeline')}\n return pipeline_member\n\n @staticmethod\n def get_global_member(model):\n global_member = dict()\n for k, v in model.items():\n if isinstance(v, str):\n global_member.update({k: v})\n return global_member\n\n def render_node(self, nodes):\n render = Render(self.template_path)\n return render.run(nodes)\n\n def parse_model(self, model, path, result_data=list()):\n if isinstance(model, dict):\n for first_k, first_v in model.items():\n if isinstance(first_v, dict) or isinstance(first_v, list):\n is_last_node = True\n\n if isinstance(first_v, dict):\n for second_k, second_v in first_v.items():\n if not isinstance(second_v, str):\n is_last_node = False\n break\n elif isinstance(first_v, list):\n for ele in first_v:\n if not (len(ele) == 1 and\n isinstance(ele, dict)\n and isinstance(ele[list(ele.keys())[0]], str)):\n is_last_node = False\n break\n\n if is_last_node:\n result_data.append((path, first_k, first_v))\n else:\n next_path = path + \"['\" + first_k + \"']\"\n self.parse_model(first_v, next_path, result_data)\n\n return result_data\n elif isinstance(model, list):\n for index, model_ele in enumerate(model):\n self.parse_model(model_ele, path + \"[\" + str(index) + \"]\", result_data)\n\n\nif __name__ == \"__main__\":\n my_path = os.path.dirname(os.path.abspath(__file__))\n my_template_path = os.path.join(my_path, 'template/')\n lang_tx_str = open('./rule/pipeline.tx').read()\n dsl_str = open('./input/demo_dsl.yaml').read()\n my_parser = Parser(dsl_str, lang_tx_str, my_template_path, PipelineExtension(template_path=my_template_path))\n print(my_parser.parse())\n","sub_path":"demo/pipeline/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"305413988","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lit', '0009_auto_20141208_2040'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='publication',\n name='_type',\n field=models.ForeignKey(to='lit.Type', null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"other/django/mysite/lit/migrations/0010_auto_20141208_2043.py","file_name":"0010_auto_20141208_2043.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"342749491","text":"print(\"Numero mas repetido\\n\")\r\n\r\nrepetido=[1,3,1,4,6]\r\nclon = repetido\r\nr=0\r\nc=i-1\r\nfor i in range(5):\r\n if repetido[i]==clon[i-1]:\r\n r=repetido[i]\r\n \r\nprint(r)\r\n \r\n \r\n\r\n \r\n ","sub_path":"A_Programas/Programas iniciales/inicios_progra/Moda(num).py","file_name":"Moda(num).py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"88977454","text":"import pygame,sys, os, game,random\nclass Character:\n def __init__(self, name, portrait, int, cha, str, agi, ste, com, imm, app, hp, xp, lvl, nextlvlxp,active, points):\n self.name= name\n self.portrait= portrait\n self.int = int\n self.cha = cha\n self.str = str\n self.agi = agi\n self.ste = ste\n self.com = com\n self.imm = imm\n self.app = app\n self.hp= hp\n self.xp = xp\n self.lvl = lvl\n self.nextlvlxp = nextlvlxp\n self.active = active\n self.points = points\nclass Map:\n def __init__(self, terrain, infrastructure, threat,posx,posy):\n self.terrain= terrain\n ##### 1.lake\n ##### 2.plain\n ##### 3.forrest\n ##### 4.hills\n ##### 5.mountains\n\n self.infrastructure=infrastructure\n self.threat=threat\n self.posx=posx\n self.posy=posy\ndef newgamef(window, box1, box2,logo,window_height,window_width,font,ticks,time):\n while True:\n pos = pygame.mouse.get_pos()\n pygame.draw.rect(window, (19, 31, 109), pygame.Rect(0, 0, window_width, window_height))\n selectdificulity = font.render('Select dificulity level: ', True, [255, 255, 255])\n easy = font.render('Easy', True, [255, 255, 255])\n normal = font.render('Normal', True, [0, 0, 0])\n hard = font.render('Hard', True, [255, 0, 0])\n back = font.render('Back', True, [0, 0, 0])\n easybox = pygame.Rect(window_width / 2 - normal.get_rect().width / 2 - 5, (window_height / 2) - 105,\n normal.get_rect().width + 10, 50)\n normalbox = pygame.Rect(window_width / 2 - normal.get_rect().width / 2 - 5, (window_height / 2) -5,\n normal.get_rect().width + 10, 50)\n hardbox = pygame.Rect(window_width / 2 - normal.get_rect().width / 2 - 5,\n (window_height / 2) + 95, normal.get_rect().width + 10, 50)\n backbox = pygame.Rect(window_width / 2 - normal.get_rect().width / 2 - 5,\n (window_height / 2) + 195, normal.get_rect().width + 10, 50)\n pygame.draw.rect(window, (0, 178, 255), easybox)\n pygame.draw.rect(window, (0, 178, 255), normalbox)\n pygame.draw.rect(window, (0, 178, 255), hardbox)\n pygame.draw.rect(window, (0, 178, 255), backbox)\n window.blit(selectdificulity, (window_width / 2 - selectdificulity.get_rect().width / 2, window_height / 2 - 200))\n window.blit(easy, (window_width / 2 - easy.get_rect().width / 2, window_height / 2 - 100))\n window.blit(normal, (window_width / 2 - normal.get_rect().width / 2, window_height / 2))\n window.blit(hard, (window_width / 2 - hard.get_rect().width / 2, window_height / 2 + 100))\n window.blit(back, (window_width / 2 - back.get_rect().width / 2, window_height / 2 + 200))\n window.blit(logo, (window_width / 2 - logo.get_rect().width / 2, 0))\n pygame.draw.rect(window, (0, 178, 255), box1)\n pygame.draw.rect(window, (0, 178, 255), box2)\n\n if easybox.collidepoint(pos):\n pygame.draw.rect(window, (240, 240, 0), easybox)\n window.blit(easy, (window_width / 2 - easy.get_rect().width / 2, window_height / 2 - 100))\n elif normalbox.collidepoint(pos):\n pygame.draw.rect(window, (240, 240, 0), normalbox)\n window.blit(normal, (window_width / 2 - normal.get_rect().width / 2, window_height / 2 ))\n elif hardbox.collidepoint(pos):\n pygame.draw.rect(window, (240, 240, 0), hardbox)\n window.blit(hard, (window_width / 2 - hard.get_rect().width / 2, window_height / 2 + 100))\n elif backbox.collidepoint(pos):\n pygame.draw.rect(window, (240, 240, 0), backbox)\n window.blit(back, (window_width / 2 - back.get_rect().width / 2, window_height / 2 + 200))\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit(0)\n if event.type == pygame.MOUSEBUTTONDOWN:\n if pos[1] > window_height / 2 + 200 and pos[1] < window_height / 2 + 250:\n return 0\n elif pos[1] > window_height / 2 - 100 and pos[1] < window_height / 2 - 50:\n return 1\n elif pos[1] > window_height / 2 and pos[1] < window_height / 2 + 50:\n return 2\n elif pos[1] > window_height / 2 + 100 and pos[1] < window_height / 2 + 150:\n return 3\ndef createpartyf(window,box1, box2,window_height,window_width,clock, font,maxfps, diff):\n images=os.listdir(\"graphics/images\")\n time=0\n ticks=0\n p1=0\n p2=0\n p3=0\n active1 = 0\n active2 = 0\n active3 = 0\n name1text= ''\n name2text = ''\n name3text = ''\n fontsmall=pygame.font.SysFont(\"Times New Roman, Arial\", 24)\n inteligence = [3,3,3]\n charisma = [3,3,3]\n strenght = [3,3,3]\n agility = [3,3,3]\n stealth = [3,3,3]\n combat = [3,3,3]\n immunity = [3,3,3]\n apperance = [3,3,3]\n points = [10,10,10]\n pluswhite=pygame.image.load('graphics/plus_white.png')\n plusorange = pygame.image.load('graphics/plus_orange.png')\n minuswhite = pygame.image.load('graphics/minus_white.png')\n minusorange = pygame.image.load('graphics/minus_orange.png')\n pluswhite = pygame.transform.scale(pluswhite, (24,24))\n plusorange = pygame.transform.scale(plusorange, (24, 24))\n minuswhite = pygame.transform.scale(minuswhite, (24, 24))\n minusorange = pygame.transform.scale(minusorange, (24, 24))\n rightarrow = pygame.image.load('graphics/rightarrow.png')\n leftarrow = pygame.image.load('graphics/leftarrow.png')\n rightarrow = pygame.transform.scale(rightarrow, (30, 60))\n leftarrow = pygame.transform.scale(leftarrow, (30, 60))\n while True:\n pos = pygame.mouse.get_pos()\n time += clock.tick() / 1000.0\n pygame.draw.rect(window, (19, 31, 109), pygame.Rect(0, 0, window_width, window_height))\n while time > 1 / maxfps and ticks <= window_height / 4:\n pygame.event.set_grab(False)\n time -= 1 / maxfps\n ticks += 1\n # Visual Evects\n box1.width -= 2\n box2.y += 2\n box2.x += 2\n\n back = font.render('Back', True, [0, 0, 0])\n play = font.render('Play', True, [0, 0, 0])\n playbox = pygame.Rect(window_width - 120, window_height - 70, play.get_rect().width + 10, 50)\n backbox = pygame.Rect(window_width-260,window_height-70, back.get_rect().width + 10, 50)\n pygame.draw.rect(window, (0, 178, 255), box1)\n pygame.draw.rect(window, (0, 178, 255), box2)\n name = font.render('Name', True, [0, 0, 0])\n intfont=fontsmall.render('INT',True, [0, 0, 0])\n chafont = fontsmall.render('CHA',True, [0, 0, 0])\n strfont = fontsmall.render('STR',True, [0, 0, 0])\n agifont = fontsmall.render('AGI',True, [0, 0, 0])\n stefont = fontsmall.render('STE',True, [0, 0, 0])\n comfont = fontsmall.render('COM',True, [0, 0, 0])\n immfont = fontsmall.render('IMM',True, [0, 0, 0])\n appfont = fontsmall.render('APP',True, [0, 0, 0])\n pointsfont = fontsmall.render('POINTS',True, [0, 0, 0])\n portrait1 = pygame.image.load('graphics/images/{}'.format(images[p1]))\n portrait2 = pygame.image.load('graphics/images/{}'.format(images[p2]))\n portrait3 = pygame.image.load('graphics/images/{}'.format(images[p3]))\n portrait1 = pygame.transform.scale(portrait1,(int(4 * window_width / 16 - 60), int(4 * window_width / 16 - 60)))\n portrait2 = pygame.transform.scale(portrait2,(int(4 * window_width / 16 - 60), int(4 * window_width / 16 - 60)))\n portrait3 = pygame.transform.scale(portrait3,(int(4 * window_width / 16 - 60), int(4 * window_width / 16 - 60)))\n if ticks>=window_height/4:\n pygame.draw.rect(window, (0, 178, 255), backbox)\n name1box = pygame.Rect(window_width / 16+portrait1.get_rect().width/2-window_width/8, 4 * window_width / 16 + 50, 4*window_width / 16, 30)\n name2box = pygame.Rect(6*window_width / 16 + portrait1.get_rect().width / 2 - window_width/8,4 * window_width / 16 + 50, 4*window_width / 16, 30)\n name3box = pygame.Rect(11*window_width / 16 + portrait1.get_rect().width / 2 - window_width/8,4 * window_width / 16 + 50, 4*window_width / 16, 30)\n pygame.draw.rect(window, (0, 178, 255), name1box)\n pygame.draw.rect(window, (0, 178, 255), name2box)\n pygame.draw.rect(window, (0, 178, 255), name3box)\n window.blit(name, (window_width / 16+portrait1.get_rect().width/2-name.get_rect().width/2, 4 * window_width / 16))\n window.blit(name, (6*window_width / 16 + portrait1.get_rect().width / 2 - name.get_rect().width / 2, 4 * window_width / 16))\n window.blit(name, (11*window_width / 16 + portrait1.get_rect().width / 2 - name.get_rect().width / 2, 4 * window_width / 16))\n window.blit(back, (window_width-255, window_height -70))\n pygame.draw.rect(window, (0, 178, 255), playbox)\n window.blit(play, (window_width - 115, window_height - 70))\n #######STATS#$$$$$\n positionx=window_width/16\n for i in range(0,3):\n ##int#######################################################################################################################################\n window.blit(intfont, (positionx-30, 4 * window_width / 16 + 110))\n if inteligence[i]==0:\n window.blit(minusorange, (positionx - 25+chafont.get_rect().width, 4 * window_width / 16 + 110))\n else:\n window.blit(minuswhite, (positionx - 25 + chafont.get_rect().width, 4 * window_width / 16 + 110))\n if inteligence[i]==10 or points[i]==0:\n window.blit(plusorange, (positionx + 25 + chafont.get_rect().width, 4 * window_width / 16 + 110))\n else:\n window.blit(pluswhite, (positionx + 25 + chafont.get_rect().width, 4 * window_width / 16 + 110))\n inteligence0=str(inteligence[i])\n intval=fontsmall.render(inteligence0,True, [0, 0, 0])\n window.blit(intval,(positionx+chafont.get_rect().width+minuswhite.get_rect().width-12-(intval.get_rect().width/2), 4 * window_width / 16 + 110))\n ##char#######################################################################################################################################\n window.blit(chafont, (positionx - 30, 4 * window_width / 16 + 150))\n if charisma[i] == 0:\n window.blit(minusorange, (positionx - 25 + chafont.get_rect().width, 4 * window_width / 16 + 150))\n else:\n window.blit(minuswhite, (positionx - 25 + chafont.get_rect().width, 4 * window_width / 16 + 150))\n if charisma[i]==10 or points[i]==0:\n window.blit(plusorange, (positionx + 25 + chafont.get_rect().width, 4 * window_width / 16 + 150))\n else:\n window.blit(pluswhite, (positionx + 25 + chafont.get_rect().width, 4 * window_width / 16 + 150))\n charisma0 = str(charisma[i])\n chaval = fontsmall.render(charisma0, True, [0, 0, 0])\n window.blit(chaval, (positionx+chafont.get_rect().width+minuswhite.get_rect().width-12-(chaval.get_rect().width/2), 4 * window_width / 16 + 150))\n #str#######################################################################################################################################\n window.blit(strfont, (positionx - 30, 4 * window_width / 16 + 190))\n if strenght[i]==0:\n window.blit(minusorange, (positionx - 25 + chafont.get_rect().width, 4 * window_width / 16 + 190))\n else:\n window.blit(minuswhite, (positionx - 25 + chafont.get_rect().width, 4 * window_width / 16 + 190))\n if strenght[i]==10 or points[i]==0:\n window.blit(plusorange, (positionx + 25 + chafont.get_rect().width, 4 * window_width / 16 + 190))\n else:\n window.blit(pluswhite, (positionx + 25 + chafont.get_rect().width, 4 * window_width / 16 + 190))\n strenght0 = str(strenght[i])\n strval = fontsmall.render(strenght0, True, [0, 0, 0])\n window.blit(strval, (positionx + chafont.get_rect().width + minuswhite.get_rect().width - 12 - (strval.get_rect().width / 2), 4 * window_width / 16 + 190))\n ##agi#######################################################################################################################################\n window.blit(agifont, (positionx - 30, 4 * window_width / 16 + 230))\n if agility[i]==0:\n window.blit(minusorange, (positionx - 25 + chafont.get_rect().width, 4 * window_width / 16 + 230))\n else:\n window.blit(minuswhite, (positionx - 25 + chafont.get_rect().width, 4 * window_width / 16 + 230))\n if agility[i]==10 or points[i]==0:\n window.blit(plusorange, (positionx + 25 + chafont.get_rect().width, 4 * window_width / 16 + 230))\n else:\n window.blit(pluswhite, (positionx + 25 + chafont.get_rect().width, 4 * window_width / 16 + 230))\n agility0 = str(agility[i])\n agival = fontsmall.render(agility0, True, [0, 0, 0])\n window.blit(agival, (positionx + chafont.get_rect().width + minuswhite.get_rect().width - 12 - (agival.get_rect().width / 2), 4 * window_width / 16 + 230))\n ##ste#######################################################################################################################################\n window.blit(stefont, (positionx+2*window_width / 16 - 30, 4 * window_width / 16 + 110))\n if stealth[i]==0:\n window.blit(minusorange, (positionx + 2 * window_width / 16 - 25 + chafont.get_rect().width, 4 * window_width / 16 + 110))\n else:\n window.blit(minuswhite, (positionx+2*window_width / 16 - 25 + chafont.get_rect().width, 4 * window_width / 16 + 110))\n if stealth[i]==10 or points[i]==0:\n window.blit(plusorange, (positionx + 2 * window_width / 16 + 25 + chafont.get_rect().width, 4 * window_width / 16 + 110))\n else:\n window.blit(pluswhite, (positionx+2*window_width / 16 + 25 + chafont.get_rect().width, 4 * window_width / 16 + 110))\n stealth0 = str(stealth[i])\n steval = fontsmall.render(stealth0, True, [0, 0, 0])\n window.blit(steval, (positionx+2*window_width / 16 + chafont.get_rect().width + minuswhite.get_rect().width - 12 - (steval.get_rect().width / 2), 4 * window_width / 16 + 110))\n ##com#######################################################################################################################################\n window.blit(comfont, (positionx + 2 * window_width / 16 - 30, 4 * window_width / 16 + 150))\n if combat[i]==0:\n window.blit(minusorange, (positionx + 2 * window_width / 16 - 25 + chafont.get_rect().width, 4 * window_width / 16 + 150))\n else:\n window.blit(minuswhite, (positionx + 2 * window_width / 16 - 25 + chafont.get_rect().width, 4 * window_width / 16 + 150))\n if combat[i]==10 or points[i]==0:\n window.blit(plusorange, (positionx + 2 * window_width / 16 + 25 + chafont.get_rect().width, 4 * window_width / 16 + 150))\n else:\n window.blit(pluswhite, (positionx + 2 * window_width / 16 + 25 + chafont.get_rect().width, 4 * window_width / 16 + 150))\n combat0 = str(combat[i])\n comval = fontsmall.render(combat0, True, [0, 0, 0])\n window.blit(comval, (positionx + 2 * window_width / 16 + chafont.get_rect().width + minuswhite.get_rect().width - 12 - (comval.get_rect().width / 2), 4 * window_width / 16 + 150))\n ##imm#######################################################################################################################################\n window.blit(immfont, (positionx + 2 * window_width / 16 - 30, 4 * window_width / 16 + 190))\n if immunity[i]==0:\n window.blit(minusorange, (positionx + 2 * window_width / 16 - 25 + chafont.get_rect().width, 4 * window_width / 16 + 190))\n else:\n window.blit(minuswhite, (positionx + 2 * window_width / 16 - 25 + chafont.get_rect().width, 4 * window_width / 16 + 190))\n if immunity[i]==10 or points[i]==0:\n window.blit(plusorange, (positionx + 2 * window_width / 16 + 25 + chafont.get_rect().width, 4 * window_width / 16 + 190))\n else:\n window.blit(pluswhite, (positionx + 2 * window_width / 16 + 25 + chafont.get_rect().width, 4 * window_width / 16 + 190))\n immunity0 = str(immunity[i])\n immval = fontsmall.render(immunity0, True, [0, 0, 0])\n window.blit(immval, (positionx + 2 * window_width / 16 + chafont.get_rect().width + minuswhite.get_rect().width - 12 - (immval.get_rect().width / 2), 4 * window_width / 16 + 190))\n ##app#######################################################################################################################################\n window.blit(appfont, (positionx + 2 * window_width / 16 - 30, 4 * window_width / 16 + 230))\n if apperance[i]==0:\n window.blit(minusorange, (positionx + 2 * window_width / 16 - 25 + chafont.get_rect().width, 4 * window_width / 16 + 230))\n else:\n window.blit(minuswhite, (positionx + 2 * window_width / 16 - 25 + chafont.get_rect().width, 4 * window_width / 16 + 230))\n if apperance[i]==10 or points[i]==0:\n window.blit(plusorange, (positionx + 2 * window_width / 16 + 25 + chafont.get_rect().width, 4 * window_width / 16 + 230))\n else:\n window.blit(pluswhite, (positionx + 2 * window_width / 16 + 25 + chafont.get_rect().width, 4 * window_width / 16 + 230))\n apperance0 = str(apperance[i])\n appval = fontsmall.render(apperance0, True, [0, 0, 0])\n window.blit(appval, (positionx + 2 * window_width / 16 + chafont.get_rect().width + minuswhite.get_rect().width - 12 - (appval.get_rect().width / 2), 4 * window_width / 16 + 230))\n ##points#######################################################################################################################################\n window.blit(pointsfont, (positionx - 30, 4* window_width / 16 + 280))\n points0 = str(points[i])\n poival = fontsmall.render(points0, True, [0, 0, 0])\n window.blit(poival, (positionx + chafont.get_rect().width+30, 4 * window_width / 16 + 280))\n positionx += 5 * window_width / 16\n playdescription = fontsmall.render('You have to use all your points and name characters first!', True, [0, 0, 0])\n intdescription = fontsmall.render('Inteligence - ability to think logically and draw conclusions.', True, [0, 0, 0])\n chadescription = fontsmall.render('Charisma - ability to persuade others to your opinions.', True, [0, 0, 0])\n strdescription = fontsmall.render('Strenght - measurement of physical power.', True, [0, 0, 0])\n agidescription = fontsmall.render('Agility - ability to perform actions quickly and easily.', True, [0, 0, 0])\n stedescription = fontsmall.render('Stealth - ability to move around unnoticed.', True, [0, 0, 0])\n comdescription = fontsmall.render('Combat - proficiency in using weapons/experience in fighting.', True, [0, 0, 0])\n immdescription = fontsmall.render('Immunity - natural resistance to injuries, weather and illnesses.', True, [0, 0, 0])\n appdescription = fontsmall.render('Appearance - how character look - it can affect others opinions', True, [0, 0, 0])\n infobox = pygame.Rect(window_width/4, 9*window_height/10, window_width/2, intdescription.get_rect().height)\n positionx = window_width / 16\n for i in range(0, 3):\n if pos[0]>positionx-30 and pos[0]4*window_width / 16 + 110 and pos[1]<4*window_width / 16 + 110+chafont.get_rect().height:\n pygame.draw.rect(window, (0, 178, 255), infobox)\n window.blit(intdescription, (window_width/2-intdescription.get_rect().width/2, 9*window_height/10))\n elif pos[1]>4*window_width / 16 + 150 and pos[1]<4*window_width / 16 + 150+chafont.get_rect().height:\n pygame.draw.rect(window, (0, 178, 255), infobox)\n window.blit(chadescription, (window_width/2-chadescription.get_rect().width/2, 9*window_height/10))\n elif pos[1]>4*window_width / 16 + 190 and pos[1]<4*window_width / 16 + 190+chafont.get_rect().height:\n pygame.draw.rect(window, (0, 178, 255), infobox)\n window.blit(strdescription, (window_width/2-strdescription.get_rect().width/2, 9*window_height/10))\n elif pos[1]>4*window_width / 16 + 230 and pos[1]<4*window_width / 16 + 230+chafont.get_rect().height:\n pygame.draw.rect(window, (0, 178, 255), infobox)\n window.blit(agidescription, (window_width/2-agidescription.get_rect().width/2, 9*window_height/10))\n elif pos[0]>positionx-30+2*window_width/16 and pos[0] 4 * window_width / 16 + 110 and pos[1] < 4 * window_width / 16 + 110 + chafont.get_rect().height:\n pygame.draw.rect(window, (0, 178, 255), infobox)\n window.blit(stedescription,(window_width / 2 - stedescription.get_rect().width / 2, 9 * window_height / 10))\n elif pos[1] > 4 * window_width / 16 + 150 and pos[1] < 4 * window_width / 16 + 150 + chafont.get_rect().height:\n pygame.draw.rect(window, (0, 178, 255), infobox)\n window.blit(comdescription,(window_width / 2 - comdescription.get_rect().width / 2, 9 * window_height / 10))\n elif pos[1] > 4 * window_width / 16 + 190 and pos[1] < 4 * window_width / 16 + 190 + chafont.get_rect().height:\n pygame.draw.rect(window, (0, 178, 255), infobox)\n window.blit(immdescription,(window_width / 2 - immdescription.get_rect().width / 2, 9 * window_height / 10))\n elif pos[1] > 4 * window_width / 16 + 230 and pos[1] < 4 * window_width / 16 + 230 + chafont.get_rect().height:\n pygame.draw.rect(window, (0, 178, 255), infobox)\n window.blit(appdescription,(window_width / 2 - appdescription.get_rect().width / 2, 9 * window_height / 10))\n positionx += 5 * window_width / 16\n if playbox.collidepoint(pos) and (points[0]+points[1]+points[2]!=0 or (name1text=='' or name2text=='' or name3text=='')):\n pygame.draw.rect(window, (0, 178, 255), infobox)\n window.blit(playdescription, (window_width/2-playdescription.get_rect().width/2, 9*window_height/10))\n\n window.blit(portrait1, (window_width / 16 , 20))\n window.blit(portrait2, (6*window_width / 16 , 20))\n window.blit(portrait3, (11*window_width / 16, 20))\n name1 = fontsmall.render(name1text, True, [0, 0, 0])\n name2 = fontsmall.render(name2text, True, [0, 0, 0])\n name3 = fontsmall.render(name3text, True, [0, 0, 0])\n window.blit(name1, (window_width / 16 + portrait1.get_rect().width / 2 - name1.get_rect().width / 2, 4 * window_width / 16 +50))\n window.blit(name2, (6*window_width / 16 + portrait1.get_rect().width / 2 - name2.get_rect().width / 2,\n 4 * window_width / 16 + 50))\n window.blit(name3, (11*window_width / 16 + portrait1.get_rect().width / 2 - name3.get_rect().width / 2,\n 4 * window_width / 16 + 50))\n window.blit(leftarrow, (window_width / 16-30, -10+portrait1.get_rect().height/2))\n window.blit(leftarrow, (6*window_width / 16 - 30, -10 + portrait2.get_rect().height / 2))\n window.blit(leftarrow, (11*window_width / 16 - 30, -10 + portrait3.get_rect().height / 2))\n window.blit(rightarrow, (window_width / 16+portrait1.get_rect().width, -10 + portrait1.get_rect().height / 2))\n window.blit(rightarrow, (6*window_width / 16+portrait2.get_rect().width, -10 + portrait2.get_rect().height / 2))\n window.blit(rightarrow, (11*window_width / 16+portrait3.get_rect().width, -10 + portrait3.get_rect().height / 2))\n if backbox.collidepoint(pos) and ticks>=window_height/4:\n pygame.draw.rect(window, (240, 240, 0), backbox)\n window.blit(back, (window_width-255, window_height - 70))\n if playbox.collidepoint(pos) and ticks>=window_height/4:\n pygame.draw.rect(window, (240, 240, 0), playbox)\n window.blit(play, (window_width - 115, window_height - 70))\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if active1:\n if event.key == pygame.K_BACKSPACE:\n name1text = name1text[:-1]\n elif len(name1text)<20:\n name1text += event.unicode\n if active2:\n if event.key == pygame.K_BACKSPACE:\n name2text = name2text[:-1]\n elif len(name2text) < 20:\n name2text += event.unicode\n if active3:\n if event.key == pygame.K_BACKSPACE:\n name3text = name3text[:-1]\n elif len(name3text) < 20:\n name3text += event.unicode\n\n if event.type == pygame.QUIT:\n sys.exit(0)\n if event.type == pygame.MOUSEBUTTONDOWN:\n if backbox.collidepoint(event.pos):\n return 0\n if playbox.collidepoint(event.pos) and points[0]!=0 and points[1]!=0 and points[2]!=0 and name1text=='' and name2text=='' and name3text=='':\n char1= Character(name1text, images[p1], inteligence[0], charisma[0],strenght[0],agility[0],stealth[0],combat[0],immunity[0],apperance[0],10+immunity[0], 0, 1, 100,1,0)\n char2 = Character(name2text, images[p2], inteligence[1], charisma[1], strenght[1], agility[1],stealth[1], combat[1], immunity[1], apperance[1], 10 + immunity[1], 0, 1, 100,1,0)\n char3 = Character(name3text, images[p3], inteligence[2], charisma[2], strenght[2], agility[2],stealth[2], combat[2], immunity[2], apperance[2], 10 + immunity[2],0, 1, 100,1,0)\n char4 = Character('', 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)\n gamemap=[]\n for i in range(0,40):\n for j in range (0,25):\n terrain=random.randint(1,50)\n threat=random.randint(1,50)\n infrastructure=random.randint(0,4)\n if terrain<=5:\n terrain=1\n elif terrain<20:\n terrain=2\n elif terrain<35:\n terrain=3\n elif terrain<45:\n terrain=4\n elif terrain>=45:\n terrain=5\n if terrain==1:\n infrastructure=0\n if terrain==2 or terrain==3 or terrain==4:\n if threat<=5:\n threat=0\n elif threat <= 10:\n threat = 1\n elif threat<=20:\n threat=2\n elif threat<=30:\n threat=3\n elif threat<=40:\n threat=4\n else:\n threat=5\n elif terrain==5 or terrain==1:\n if threat <= 5:\n threat = 0\n elif threat <= 15:\n threat = 1\n elif threat <= 30:\n threat = 2\n elif threat <= 45:\n threat = 3\n elif threat <= 48:\n threat = 4\n else:\n threat = 5\n gamemap.append(Map(terrain, infrastructure, threat, i, j))\n ok=0\n while ok==0:\n randx=random.randint(17,22)\n randy=random.randint(9,14)\n for i in gamemap:\n if i.posx==randx and i.posy==randy:\n if i.terrain!=1:\n i.infrastructure=5\n ok=1\n mainmenu=game.gamef(window,window_height,window_width,char1, char2, char3,char4, diff, gamemap,[randx, randy])\n return 0\n if name1box.collidepoint(event.pos):\n active1=1\n else:\n active1=0\n if name2box.collidepoint(event.pos):\n active2=1\n else:\n active2=0\n if name3box.collidepoint(event.pos):\n active3=1\n else:\n active3=0\n positionx = window_width / 16\n for i in range(0, 3):\n #stats minus\n if pos[0]>positionx-25+chafont.get_rect().width and pos[0]4 * window_width / 16 + 110 and pos[1]<4 * window_width / 16 + 110+minuswhite.get_rect().height and inteligence[i]>0:\n inteligence[i]-=1\n points[i]+=1\n elif pos[1]>4 * window_width / 16 + 150 and pos[1]<4 * window_width / 16 + 150+minuswhite.get_rect().height and charisma[i]>0:\n charisma[i]-=1\n points[i]+=1\n elif pos[1]>4 * window_width / 16 + 190 and pos[1]<4 * window_width / 16 + 190+minuswhite.get_rect().height and strenght[i]>0:\n strenght[i]-=1\n points[i]+=1\n elif pos[1]>4 * window_width / 16 + 230 and pos[1]<4 * window_width / 16 + 230+minuswhite.get_rect().height and agility[i]>0:\n agility[i]-=1\n points[i]+=1\n elif pos[0]>positionx+2 * window_width / 16-30+chafont.get_rect().width and pos[0]4 * window_width / 16 + 110 and pos[1]<4 * window_width / 16 + 110+minuswhite.get_rect().height and stealth[i]>0:\n stealth[i]-=1\n points[i]+=1\n elif pos[1]>4 * window_width / 16 + 150 and pos[1]<4 * window_width / 16 + 150+minuswhite.get_rect().height and combat[i]>0:\n combat[i]-=1\n points[i]+=1\n elif pos[1]>4 * window_width / 16 + 190 and pos[1]<4 * window_width / 16 + 190+minuswhite.get_rect().height and immunity[i]>0:\n immunity[i]-=1\n points[i]+=1\n elif pos[1]>4 * window_width / 16 + 230 and pos[1]<4 * window_width / 16 + 230+minuswhite.get_rect().height and apperance[i]>0:\n apperance[i]-=1\n points[i]+=1\n ##plus stats\n elif pos[0]>positionx+25+chafont.get_rect().width and pos[0]4 * window_width / 16 + 110 and pos[1]<4 * window_width / 16 + 110+minuswhite.get_rect().height and inteligence[i]<10 and points[i]>0:\n inteligence[i]+=1\n points[i]-=1\n elif pos[1]>4 * window_width / 16 + 150 and pos[1]<4 * window_width / 16 + 150+minuswhite.get_rect().height and charisma[i]<10 and points[i]>0:\n charisma[i]+=1\n points[i]-=1\n elif pos[1]>4 * window_width / 16 + 190 and pos[1]<4 * window_width / 16 + 190+minuswhite.get_rect().height and strenght[i]<10 and points[i]>0:\n strenght[i]+=1\n points[i]-=1\n elif pos[1]>4 * window_width / 16 + 230 and pos[1]<4 * window_width / 16 + 230+minuswhite.get_rect().height and agility[i]<10 and points[i]>0:\n agility[i]+=1\n points[i]-=1\n elif pos[0]>positionx+2 * window_width / 16+25+chafont.get_rect().width and pos[0]4 * window_width / 16 + 110 and pos[1]<4 * window_width / 16 + 110+minuswhite.get_rect().height and stealth[i]<10 and points[i]>0:\n stealth[i]+=1\n points[i]-=1\n elif pos[1]>4 * window_width / 16 + 150 and pos[1]<4 * window_width / 16 + 150+minuswhite.get_rect().height and combat[i]<10 and points[i]>0:\n combat[i]+=1\n points[i]-=1\n elif pos[1]>4 * window_width / 16 + 190 and pos[1]<4 * window_width / 16 + 190+minuswhite.get_rect().height and immunity[i]<10 and points[i]>0:\n immunity[i]+=1\n points[i]-=1\n elif pos[1]>4 * window_width / 16 + 230 and pos[1]<4 * window_width / 16 + 230+minuswhite.get_rect().height and apperance[i]<10 and points[i]>0:\n apperance[i]+=1\n points[i]-=1\n positionx +=5*window_width/16\n\n\n\n\n if pos[1]>-10 + portrait1.get_rect().height / 2 and pos[1]<50 + portrait1.get_rect().height / 2:\n if pos[0] > window_width / 16 - 30 and pos[0] < window_width / 16:\n if p1==0:\n p1=len(images)-1\n elif p1>0:\n p1-=1\n elif pos[0] > 6*window_width / 16 - 30 and pos[0] < 6*window_width / 16:\n if p2==0:\n p2=len(images)-1\n elif p2>0:\n p2-=1\n elif pos[0] > 11*window_width / 16 - 30 and pos[0] < 11*window_width / 16:\n if p3==0:\n p3=len(images)-1\n elif p3>0:\n p3-=1\n elif pos[0] > window_width / 16+portrait2.get_rect().width and pos[0] < window_width / 16+portrait2.get_rect().width+30:\n if p1 == len(images)-1:\n p1 = 0\n elif p1 < len(images)-1:\n p1 += 1\n elif pos[0] > 6*window_width / 16+portrait2.get_rect().width and pos[0] < 6*window_width / 16+portrait2.get_rect().width+30:\n if p2 == len(images) - 1:\n p2 = 0\n elif p2 < len(images) - 1:\n p2 += 1\n elif pos[0] > 11*window_width / 16+portrait2.get_rect().width and pos[0] < 11*window_width / 16+portrait2.get_rect().width+30:\n if p3 == len(images) - 1:\n p3 = 0\n elif p3 < len(images) - 1:\n p3 += 1\n\n\n\n\n pygame.display.update()","sub_path":"ng.py","file_name":"ng.py","file_ext":"py","file_size_in_byte":37429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"360843096","text":"#Print out all of the ages of your friends/family that are less than 30 (or any number where some ages will not be printed!).\n#Find and output the age of the oldest person in your friends/family list.\n#Count how many times you flipped 'heads' using the coin flips list.\n#You realize one of the performing artists in your list is no longer a favourite. Remove one of them from the list.\n#Pick a city in your city population dictionary and change its population.\n\n\n\nages_of_siblings = [32, 27, 22, 21]\nfor age in ages_of_siblings:\n if (age > 30):\n print(None)\n elif (age < 30):\n print(age)\n\nprint(max(ages_of_siblings))\n\nheads_coin_flip = ['yes', 'yes', 'yes', 'no', 'yes']\ncount = 0\nfor heads in heads_coin_flip:\n if (heads == 'yes'):\n count += 1\n print(count)\n\nfav_artists.pop(\"Creed\")\n\ncities_and_populations = {\n 'Rome': 2873000,\n 'London': 8136000,\n 'Berlin': 3575000\n }\ncities_and_populations['London'] = 15\n","sub_path":"collectionsANDiteration-part1/Collection-IterationP1ex4.py","file_name":"Collection-IterationP1ex4.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"190761096","text":"#!flask/bin/python\r\nfrom flask import Flask, jsonify\r\nimport random\r\napp = Flask(__name__)\r\n\r\ntasks = [\r\n {\r\n 'id': 1,\r\n 'Adi': u'Bilal',\r\n 'Soyadi': u'TAS',\r\n 'Mail': u'bilaltas9@gmail.com',\r\n 'Sifre': u'1234',\r\n 'SifreTekrar': u'1234',\r\n 'Numara': u'532797210',\r\n 'Cinsiyet': u'Erkek',\r\n 'DTarihi': u'13/06/1996'\r\n },\r\n {\r\n 'id': 2,\r\n 'Adi': u'Ayşe',\r\n 'Soyadi': u'Çelik',\r\n 'Mail': u'berkay@gmail.com',\r\n 'Sifre': u'1234',\r\n 'SifreTekrar': u'1234',\r\n 'Numara': u'123456789',\r\n 'Cinsiyet': u'Kadin',\r\n 'DTarihi': u'22/01/2000'\r\n },\r\n {\r\n 'id': 3,\r\n 'Adi': u'Ahmet',\r\n 'Soyadi': u'Demir',\r\n 'Mail': u'Çelikel@gmail.com',\r\n 'Sifre': u'12345',\r\n 'SifreTekrar': u'12345',\r\n 'Numara': u'9876543',\r\n 'Cinsiyet': u'Erkek',\r\n 'DTarihi': u'13/06/1996'\r\n },\r\n {\r\n 'id': 4,\r\n 'Adi': u'Duygu',\r\n 'Soyadi': u'Manarga',\r\n 'Mail': u'duygu@gmail.com',\r\n 'Sifre': u'1234',\r\n 'SifreTekrar': u'1234',\r\n 'Numara': u'555666777',\r\n 'Cinsiyet': u'Kadin',\r\n 'DTarihi': u'13/06/1996'\r\n },\r\n {\r\n 'id': 5,\r\n 'Adi': u'Erhan',\r\n 'Soyadi': u'Güneş',\r\n 'Mail': u'erhan@gmail.com',\r\n 'Sifre': u'1111',\r\n 'SifreTekrar': u'1111',\r\n 'Numara': u'56712390',\r\n 'Cinsiyet': u'Erkek',\r\n 'DTarihi': u'13/06/1996'\r\n }\r\n]\r\n\r\n@app.route('/get/signUpData', methods=['GET'])\r\ndef get_task():\r\n task = [task for task in tasks if task['id']== random.randint(1,5)]\r\n if len(task) == 0:\r\n abort(404)\r\n return jsonify({'task': task[0]})\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n\r\n","sub_path":"SignUpData.py","file_name":"SignUpData.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"495658286","text":"import requests\nimport re\nimport json\nimport xlsxwriter\nimport threading\n\n\nclass cbghandle(object):\n def __init__(self, serStart, serLenth, sheetName):\n self.headers = {\n \"connection\": \"close\",\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"max-age=0\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\"\n }\n # self.url = \"https://jianghu.cbg.163.com/cgi/api/query?view_loc=equip_list&search_type=role&order_by=&page=\"\n # self.url =\n # \"https://jianghu.cbg.163.com/cgi/api/query?search_type=role&order_by=&page=\"\n self.url = \"https://jianghu.cbg.163.com/cgi/api/query?search_type=role&serverid=\"\n self.thisPage = 1\n self.soup = \"\"\n self.result = [1]\n self.is_last_page = False # 识别是否到底\n self.OrdList = [] # 用于存储id\n self.serverid = \"\" # 服务器id\n self.ordersn = \"\" # 账号id\n self.area_name = \"\" # \"服务器\": usitem[\"area_name\"],\n self.format_equip_name = \"\" # \"门派\": usitem[\"format_equip_name\"],\n self.server_name = \"\" # \"区服\": usitem[\"server_name\"],\n self.price = \"\" # \"价格\": usitem[\"price\"]\n self.basic_attrs = \"\" # 修为\n self.level_desc = \"\" # 等级\n self.count = 0 # 计数器\n self.workbook = \"\"\n self.worksheet = \"\"\n self.row = 0\n self.s = \"\"\n self.xiezi = \"无\"\n # 判断各类物品\n self.res_feng = \"无\"\n self.res_chui = \"无\"\n self.res_man = \"无\"\n self.res_juan = \"无\"\n self.res_tao = \"无\"\n self.res_deng = \"无\"\n self.res_qian = \"无\"\n self.res_han = \"无\"\n self.res_rui = \"无\"\n self.res_wan = \"无\"\n self.res_hua = \"无\"\n self.res_tian = \"无\"\n self.res_chang = \"无\"\n self.res_yhuo = \"无\"\n self.res_bjing = \"无\"\n self.res_hxie = \"无\"\n self.res_chuchen = \"否\"\n self.res_yyun = \"否\"\n self.res_lfeng = \"否\"\n self.res_jhong = \"否\"\n self.res_zyou = \"否\"\n self.res_wuhua = \"否\"\n self.urlstr = \"\"\n self.serLenth = serLenth\n self.serStart = serStart\n self.sheetName = sheetName\n self.res_caicao = \"\"\n self.res_wakuang = \"\"\n self.res_famu = \"\"\n self.res_shenghuo = \"\"\n self.res_dazao = \"\"\n self.res_miyao = \"\"\n self.platform = \"\" # 平台\n self.fairShow = \"\" # 公示\n self.res_chuanw = \"无\"\n\n def sendUrl(self):\n # 发送请求\n print(\"正在采集第\" +\n str(self.thisPage) +\n \"页藏宝阁数据...服务器id为:\" +\n str(self.serStart))\n response = requests.get(self.url + str(self.serStart) + \"&order_by=&page=\" +\n str(self.thisPage), headers=self.headers)\n response.encoding = 'unicode-escape'\n res = json.loads(response.text)\n print(res)\n if len(res[\"result\"]) == 0:\n self.serStart += 1\n self.thisPage = 0\n if res[\"paging\"][\"is_last_page\"] == True:\n self.is_last_page = True\n self.result = res[\"result\"]\n self.getOrdersn()\n\n def getOrdersn(self):\n # 获取账号id 写入OrdList\n lengths = len(self.result)\n res = self.result\n for index in range(0, lengths):\n print(\"正在写入藏宝阁第\" + str(self.thisPage) +\n \"页第\" + str(index) + \"条数据...\")\n self.ordersn = res[index][\"game_ordersn\"]\n self.serverid = res[index][\"serverid\"]\n self.area_name = res[index][\"area_name\"]\n self.format_equip_name = res[index][\"format_equip_name\"]\n self.server_name = res[index][\"server_name\"]\n self.price = res[index][\"price\"]\n self.basic_attrs = res[index][\"other_info\"][\"basic_attrs\"]\n self.level_desc = res[index][\"level_desc\"]\n\n self.fairShow = \"\"\n self.platform = \"\"\n if res[index][\"pass_fair_show\"] == 0:\n self.fairShow = \"公示中\"\n else:\n self.fairShow = \"在售\"\n if res[index][\"platform_type\"] == 1:\n self.platform = \"iPhone\"\n else:\n self.platform = \"Android\"\n # 获取账号详细参数\n self.getUserInfo()\n\n def shenghuojin(self):\n # 生活技能\n pat_caicao = re.compile(\n r'\"1\": {\"level\": (.*?), \"name\": \"采草\", \"icon\": \"life_caiyao_icon_black2', re.S)\n self.res_caicao = re.findall(pat_caicao, self.s)\n if len(self.res_caicao) == 0:\n self.res_caicao = [0]\n\n pat_wakuang = re.compile(\n r'\"3\": {\"level\": (.*?), \"name\": \"挖矿\", \"icon\": \"life_wakuang_icon_black2', re.S)\n self.res_wakuang = re.findall(pat_wakuang, self.s)\n\n if len(self.res_wakuang) == 0:\n self.res_wakuang = [0]\n\n pat_famu = re.compile(\n r'\"2\": {\"level\": (.*?), \"name\": \"伐木\", \"icon\": \"life_fawu_icon_black2', re.S)\n self.res_famu = re.findall(pat_famu, self.s)\n\n if len(self.res_famu) == 0:\n self.res_famu = [0]\n\n pat_shenghuo = re.compile(\n r'anqi_icon_black2\"}, \"3\": {\"level\": (.*?), \"name\": \"生活装备\", \"icon\": \"life_gongju_icon_black2', re.S)\n self.res_shenghuo = re.findall(pat_shenghuo, self.s)\n\n if len(self.res_shenghuo) == 0:\n self.res_shenghuo = [0]\n\n pat_miyao = re.compile(\n r'ngju_icon_black2\"}, \"2\": {\"level\": (.*?), \"name\": \"秘药炼制\", \"icon\": \"life_duyao_icon_black2\"', re.S)\n self.res_miyao = re.findall(pat_miyao, self.s)\n\n if len(self.res_miyao) == 0:\n self.res_miyao = [0]\n\n pat_dazao = re.compile(\n r'yao_icon_black2\"}, \"4\": {\"level\": (.*?), \"name\": \"打造台制作\", \"icon\": \"life_dazao_icon_black2\"', re.S)\n self.res_dazao = re.findall(pat_dazao, self.s)\n\n if len(self.res_dazao) == 0:\n self.res_dazao = [0]\n\n def checkW(self):\n self.chuanw()\n self.xieZi()\n # 判断各类信息\n pat_zyou = re.compile(r'\"name\": \"紫游\"', re.S)\n res_zyou = re.findall(pat_zyou, self.s)\n if len(res_zyou) == 1:\n self.res_zyou = \"是\"\n else:\n self.res_zyou = \"否\"\n\n pat_chuchen = re.compile(r'\"name\": \"出尘\"', re.S)\n res_chuchen = re.findall(pat_chuchen, self.s)\n if len(res_chuchen) == 1:\n self.res_chuchen = \"是\"\n else:\n self.res_chuchen = \"否\"\n\n pat_yyun = re.compile(r'\"name\": \"月韵\"', re.S)\n res_yyun = re.findall(pat_yyun, self.s)\n if len(res_yyun) == 1:\n self.res_yyun = \"是\"\n else:\n self.res_yyun = \"否\"\n\n pat_lfeng = re.compile(r'\"name\": \"流风\"', re.S)\n res_lfeng = re.findall(pat_lfeng, self.s)\n if len(res_lfeng) == 1:\n self.res_lfeng = \"是\"\n else:\n self.res_lfeng = \"否\"\n\n pat_jhong = re.compile(r'\"name\": \"惊鸿\"', re.S)\n res_jhong = re.findall(pat_jhong, self.s)\n if len(res_jhong) == 1:\n self.res_jhong = \"是\"\n else:\n self.res_jhong = \"否\"\n\n pat_wuhua = re.compile(r'五花马', re.S)\n res_wuhua = re.findall(pat_wuhua, self.s)\n if len(res_wuhua) == 1:\n self.res_wuhua = \"有\"\n else:\n self.res_wuhua = \"无\"\n\n pat_feng = re.compile(r'风盈香', re.S)\n res_feng = re.findall(pat_feng, self.s)\n if len(res_feng) == 1:\n self.res_feng = \"有\"\n else:\n self.res_feng = \"无\"\n\n pat_chui = re.compile(r'垂玉', re.S)\n res_chui = re.findall(pat_chui, self.s)\n if len(res_chui) == 1:\n self.res_chui = \"有\"\n else:\n self.res_chui = \"无\"\n\n pat_man = re.compile(r'蔓萝纤', re.S)\n res_man = re.findall(pat_man, self.s)\n if len(res_man) == 1:\n self.res_man = \"有\"\n else:\n self.res_man = \"无\"\n\n pat_juan = re.compile(r'卷游尘', re.S)\n res_juan = re.findall(pat_juan, self.s)\n if len(res_juan) == 1:\n self.res_juan = \"有\"\n else:\n self.res_juan = \"无\"\n\n pat_tao = re.compile(r'桃花驹', re.S)\n res_tao = re.findall(pat_tao, self.s)\n if len(res_tao) == 1:\n self.res_tao = \"有\"\n else:\n self.res_tao = \"无\"\n\n pat_deng = re.compile(r'灯如昼', re.S)\n res_deng = re.findall(pat_deng, self.s)\n if len(res_deng) == 1:\n self.res_deng = \"有\"\n else:\n self.res_deng = \"无\"\n\n pat_qian = re.compile(r'流光·乾坤一掷', re.S)\n res_qian = re.findall(pat_qian, self.s)\n if len(res_qian) == 1:\n self.res_qian = \"有\"\n else:\n self.res_qian = \"无\"\n\n pat_han = re.compile(r'流光·寒彻', re.S)\n res_han = re.findall(pat_han, self.s)\n if len(res_han) == 1:\n self.res_han = \"有\"\n else:\n self.res_han = \"无\"\n\n pat_rui = re.compile(r'流光·瑞云', re.S)\n res_rui = re.findall(pat_rui, self.s)\n if len(res_rui) == 1:\n self.res_rui = \"有\"\n else:\n self.res_rui = \"无\"\n\n pat_wan = re.compile(r'流光·万钧', re.S)\n res_wan = re.findall(pat_wan, self.s)\n if len(res_wan) == 1:\n self.res_wan = \"有\"\n else:\n self.res_wan = \"无\"\n\n pat_hua = re.compile(r'流光·花楹', re.S)\n res_hua = re.findall(pat_hua, self.s)\n if len(res_hua) == 1:\n self.res_hua = \"有\"\n else:\n self.res_hua = \"无\"\n\n pat_tian = re.compile(r'流光·天外', re.S)\n res_tian = re.findall(pat_tian, self.s)\n if len(res_tian) == 1:\n self.res_tian = \"有\"\n else:\n self.res_tian = \"无\"\n\n pat_chang = re.compile(r'流光·长生', re.S)\n res_chang = re.findall(pat_chang, self.s)\n if len(res_chang) == 1:\n self.res_chang = \"有\"\n else:\n self.res_chang = \"无\"\n\n pat_yhuo = re.compile(r'悠游·萤火', re.S)\n res_yhuo = re.findall(pat_yhuo, self.s)\n if len(res_yhuo) == 1:\n self.res_yhuo = \"有\"\n else:\n self.res_yhuo = \"无\"\n\n pat_bjing = re.compile(r'悠游·冰晶', re.S)\n res_bjing = re.findall(pat_bjing, self.s)\n if len(res_bjing) == 1:\n self.res_bjing = \"有\"\n else:\n self.res_bjing = \"无\"\n\n pat_hxie = re.compile(r'悠游·花谢', re.S)\n res_hxie = re.findall(pat_hxie, self.s)\n if len(res_hxie) == 1:\n self.res_hxie = \"有\"\n else:\n self.res_hxie = \"无\"\n def xieZi(self):\n # 判断是否有金鞋子\n xieArr = [r'尘微履',r'尚复履', r'海霁履',r'须弥履',r'山倾履',r'春岚履',r'怀神履']\n remake = 0\n self.xiezi = \"无\"\n for item in xieArr:\n if remake == 0:\n xiezi = re.compile(item, re.S)\n res_xiezi = re.findall(xiezi, self.s)\n if len(res_xiezi) == 1:\n self.xiezi = \"有\"\n remake = 1\n\n\n def chuanw(self):\n # 判断各门派传武\n wudang = re.compile(r'凌宸', re.S)\n res_wd = re.findall(wudang, self.s)\n if len(res_wd) == 1 :\n self.res_chuanw = \"有\"\n else:\n self.res_chuanw = \"无\"\n\n def getUserInfo(self):\n self.count += 1\n userUrl = \"https://jianghu.cbg.163.com/cgi/api/get_equip_detail\"\n data = {\"serverid\": str(self.serverid), \"ordersn\": str(self.ordersn)}\n response = requests.post(userUrl, data=data, headers=self.headers)\n response.encoding = 'unicode-escape'\n self.s = response.text.encode(\n 'utf-8').decode('unicode_escape') # 转换成中文\n self.checkW()\n self.row += 1\n self.worksheet.write(self.row, 0, self.area_name) # 第4行的第1列设置值为35.5\n self.worksheet.write(\n self.row, 1, self.format_equip_name) # 第4行的第1列设置值为35.5\n self.worksheet.write(self.row, 2, self.server_name) # 第4行的第1列设置值为35.5\n self.worksheet.write(self.row, 3, self.level_desc) # 第4行的第1列设置值为35.5\n self.worksheet.write(\n self.row, 4, self.basic_attrs[0][1]) # 第4行的第1列设置值为35.5\n self.worksheet.write(self.row, 5, self.price / 100) # 第4行的第1列设置值为35.5\n # 秘籍 特技\n self.worksheet.write(self.row, 6, self.basic_attrs[1][1]) # 秘籍\n self.worksheet.write(self.row, 7, self.basic_attrs[2][1]) # 特技\n # 状态 平台\n self.worksheet.write(self.row, 8, self.fairShow)\n self.worksheet.write(self.row, 9, self.platform)\n\n self.worksheet.write(self.row, 27, self.res_feng)\n self.worksheet.write(self.row, 28, self.res_chui)\n self.worksheet.write(self.row, 29, self.res_man)\n self.worksheet.write(self.row, 30, self.res_juan)\n self.worksheet.write(self.row, 31, self.res_wuhua)\n\n self.worksheet.write(self.row, 32, self.res_tao)\n\n self.worksheet.write(self.row, 33, self.res_deng)\n\n self.worksheet.write(self.row, 10, self.res_chuanw)\n\n self.worksheet.write(self.row, 11, self.xiezi)\n\n self.worksheet.write(self.row, 12, self.res_qian)\n self.worksheet.write(self.row, 13, self.res_han)\n self.worksheet.write(self.row, 14, self.res_rui)\n self.worksheet.write(self.row, 15, self.res_wan)\n self.worksheet.write(self.row, 16, self.res_hua)\n self.worksheet.write(self.row, 17, self.res_tian)\n self.worksheet.write(self.row, 18, self.res_chang)\n self.worksheet.write(self.row, 19, self.res_yhuo)\n self.worksheet.write(self.row, 20, self.res_bjing)\n self.worksheet.write(self.row, 21, self.res_hxie)\n self.worksheet.write(self.row, 22, self.res_yyun)\n self.worksheet.write(self.row, 23, self.res_chuchen)\n self.worksheet.write(self.row, 24, self.res_lfeng)\n self.worksheet.write(self.row, 25, self.res_jhong)\n self.worksheet.write(self.row, 26, self.res_zyou)\n\n # 生活技能\n # self.worksheet.write(self.row, 29, self.res_caicao[0]) # 特技\n # self.worksheet.write(self.row, 30, self.res_wakuang[0]) # 特技\n # self.worksheet.write(self.row, 31, self.res_famu[0]) # 特技\n # self.worksheet.write(self.row, 32, self.res_shenghuo[0]) # 特技\n # self.worksheet.write(self.row, 33, self.res_miyao[0]) # 特技\n # self.worksheet.write(self.row, 34, self.res_dazao[0]) # 特技\n item = {}\n resch = []\n\n def run(self):\n self.creatSheet()\n self.worksheet.write(0, 0, \"服务器\") # 第4行的第1列设置值为35.5\n self.worksheet.write(0, 1, \"门派\") # 第4行的第1列设置值为35.5\n self.worksheet.write(0, 2, \"区服\") # 第4行的第1列设置值为35.5\n self.worksheet.write(0, 3, \"等级\") # 第4行的第1列设置值为35.5\n self.worksheet.write(0, 4, \"修为\") # 第4行的第1列设置值为35.5\n self.worksheet.write(0, 5, \"价格\") # 第4行的第1列设置值为35.5\n\n self.worksheet.write(0, 6, \"金秘笈\")\n self.worksheet.write(0, 7, \"金紫色特技\")\n self.worksheet.write(0, 8, \"状态\")\n self.worksheet.write(0, 9, \"平台\")\n self.worksheet.write(0, 10, \"传武\")\n\n self.worksheet.write(0, 11, \"金鞋子\")\n self.worksheet.write(0, 12, \"流光·乾坤一掷\")\n self.worksheet.write(0, 13, \"流光·寒彻\")\n self.worksheet.write(0, 14, \"流光·瑞云\")\n self.worksheet.write(0, 15, \"流光·万钧\")\n self.worksheet.write(0, 16, \"流光·花楹\")\n self.worksheet.write(0, 17, \"流光·天外\")\n self.worksheet.write(0, 18, \"流光·长生\")\n self.worksheet.write(0, 19, \"悠游·萤火\")\n self.worksheet.write(0, 20, \"悠游·冰晶\")\n self.worksheet.write(0, 21, \"悠游·花谢\")\n self.worksheet.write(0, 22, \"月韵\")\n self.worksheet.write(0, 23, \"出尘\")\n self.worksheet.write(0, 24, \"流风\")\n self.worksheet.write(0, 25, \"惊鸿\")\n self.worksheet.write(0, 26, \"紫游\")\n self.worksheet.write(0, 27, \"风盈香\")\n self.worksheet.write(0, 28, \"垂玉\")\n self.worksheet.write(0, 29, \"蔓萝纤\")\n self.worksheet.write(0, 30, \"卷游尘\")\n self.worksheet.write(0, 31, \"五花马\")\n self.worksheet.write(0, 32, \"桃花驹\")\n\n self.worksheet.write(0, 33, \"灯如昼\")\n # self.worksheet.write(0, 29, \"采集\")\n # self.worksheet.write(0, 30, \"挖矿\")\n # self.worksheet.write(0, 31, \"伐木\")\n # self.worksheet.write(0, 32, \"生活装备\")\n # self.worksheet.write(0, 33, \"炼药\")\n # self.worksheet.write(0, 34, \"打造台\")\n while self.serStart < self.serLenth:\n self.sendUrl()\n self.thisPage += 1\n self.closeSheet()\n\n def creatSheet(self):\n self.workbook = xlsxwriter.Workbook(\n self.sheetName + '.xlsx') # 创建一个excel文件\n self.worksheet = self.workbook.add_worksheet(\n u'sheet1') # 在文件中创建一个名为TEST的sheet,不加名字默认为sheet1\n\n def closeSheet(self):\n self.workbook.close()\n\n\nclass myThread (threading.Thread):\n def __init__(self, serStart, serLenth, sheetName):\n threading.Thread.__init__(self)\n self.serStart = serStart\n self.serLenth = serLenth\n self.sheetName = sheetName\n\n def run(self):\n cbghandle(self.serStart, self.serLenth, self.sheetName).run()\n\n\nif __name__ == \"__main__\":\n # cbghandle(1, 163).run()\n thread1 = myThread(1, 5, \"cbg1\")\n thread2 = myThread(5, 10, \"cbg2\")\n thread3 = myThread(10, 30, \"cbg3\")\n thread4 = myThread(30, 70, \"cbg4\")\n thread5 = myThread(70, 110, \"cbg5\")\n thread6 = myThread(110, 160, \"cbg6\")\n\n thread1.start()\n thread2.start()\n thread3.start()\n thread4.start()\n thread5.start()\n thread6.start()\n\n thread1.join()\n thread2.join()\n thread3.join()\n thread4.join()\n thread5.join()\n thread6.join()\n\n","sub_path":"cbg_index.py","file_name":"cbg_index.py","file_ext":"py","file_size_in_byte":18891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"166192565","text":"def sol(InputArr):\n count = 0\n endTime = 0\n\n for i in range(len(InputArr)):\n if endTime <= InputArr[i][0]:\n endTime = InputArr[i][1]\n count += 1\n\n\n return count\n\n\nN = int(input())\nInputArr = []\n\nfor i in range(N):\n a, b = list(map(int, input().split()))\n InputArr.append([a, b])\n\n\nInputArr.sort(key=lambda x: (x[1], x[0]))\n\nprint(sol(InputArr))","sub_path":"BaekJoon/Greedy/Q1931.py","file_name":"Q1931.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"165505447","text":"# Utility to read Futoshiki puzzles from text files, and display Futoshiki puzzles to a screen.\n\nimport Snapshot\nimport pygame\n\n\n\n# Define some colors\nblack = ( 0, 0, 0)\nwhite = ( 255, 255, 255)\nred = (255, 0, 0)\n\n# This sets the width and height of each grid location\nwidth=60\nheight=60\n \n# This sets the margin between each cell\nmargin=30\n\ndef loadPuzzle(puzzlefile):\n file = open(puzzlefile)\n content = file.readlines()\n newsnapshot = Snapshot.snapshot()\n rownumber = 0\n \n for rownumber in range(5): \n newrow = [int(x) for x in content[rownumber].split()] \n for columnnumber in range(5):\n newsnapshot.setCellVal(rownumber, columnnumber, newrow[columnnumber]) \n constraints = content[5:]\n for c in constraints:\n newconstraint = [int(x) for x in c.split()] \n newsnapshot.setConstraint(newconstraint)\n file.close()\n return newsnapshot\n \ndef displayPuzzle(snapshot, screen):\n # Set the screen background\n screen.fill(black)\n \n # Draw the grid squares\n color = white\n myfont = pygame.font.SysFont(\"Comic Sans MS\", 30)\n for row in range(5):\n for column in range(5): \n pygame.draw.rect(screen,color,[(margin+width)*column+margin,(margin+height)*row+margin,width,height])\n printVal = snapshot.getCellVal(row, column)\n if printVal == 0:\n label = myfont.render(\".\", 1, black)\n else:\n label = myfont.render(str(printVal), 1, black)\n screen.blit(label, ((margin+width)*column+margin+25,(margin+height)*row+margin+10))\n myfont = pygame.font.SysFont(\"Comic Sans MS\", 50)\n for c in snapshot.getConstraints():\n r1 = c[0][0]\n c1 = c[0][1]\n r2 = c[1][0]\n c2 = c[1][1]\n if (c1 < c2):\n label = myfont.render(\"<\", 1, red)\n screen.blit(label, ((margin+width)*(c1+1)+10,(margin+height)*r2+20))\n elif (c2 < c1):\n label = myfont.render(\">\", 1, red)\n screen.blit(label, ((margin+width)*(c2+1)+10,(margin+height)*r2+20))\n elif (r1 < r2):\n label = myfont.render(\"^\", 1, red)\n screen.blit(label, ((margin+width)*c1+margin+15,(margin+height)*(r1+1)-5))\n else:\n label = myfont.render(\"v\", 1, red)\n screen.blit(label, ((margin+width)*c1+margin+15,(margin+height)*(r2+1)-25))\n \n \n \n","sub_path":"Futoshiki/Futoshiki/src/Futoshiki_IO.py","file_name":"Futoshiki_IO.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"295037444","text":"print('\\n############################################ PROGRAM STARTED ############################################')\nfrom scipy.integrate import odeint\nimport os, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nfrom tam_sim import *\nimport time\n\n################################## CHOOSE DISCRETIZATION ###############################################################\nmax_time = 4 * 60 * 60 # hours to seconds\nresolution = 5000 # Number of outputs, TODO: more for nn, k estimation?\nn_features = 4 # X_C, X_A, Y, T_G, T_P, AM\n\n######################################### SETUP ########################################################################\ndiscrete_time = np.linspace(0, max_time, resolution)\ntabs = 40\n\nmoisture_gas_initial = moisture_gas_initial_bed\nmoisture_particle_initial_cryst = moisture_cryst_particle_initial\nprint('Old moisture cryst initial'.ljust(tabs), '{:.5f}'.format(moisture_particle_initial_cryst))\n# moisture_particle_initial_cryst = compute_GAB_equilibrium_moisture_cryst(relative_humidity_bed_initial)\n# print('New moisture cryst initial'.ljust(tabs), '{:.5f}'.format(moisture_particle_initial_cryst))\n# moisture_particle_initial_cryst = 0\n\nmoisture_particle_initial_am = moisture_am_particle_initial\n# moisture_particle_initial_am = 0\n\n# moisture_cryst_particle_saturated = compute_GAB_equilibrium_moisture_cryst(relative_humidity_gas_inlet)\n\ntemp_gas = temp_initial\ntemp_particle_initial = temp_initial\namorphous_material_initial = amorphous_material_initial\namorphous_material_initial = 1\nrelative_humidity = relative_humidity_gas_inlet\n\n########################################## COMPUTE #####################################################################\ninitial_system = moisture_particle_initial_cryst, moisture_particle_initial_am, temp_particle_initial, amorphous_material_initial\n # change_m_particle_cryst, change_m_particle_am, change_temp_particle, change_amorphous_material\ncomputed_system = np.zeros([resolution, n_features])\n\n\nprint('Initial moisture cryst powder:'.ljust(tabs), '{:.5f}'.format(moisture_cryst_particle_initial))\nprint('Initial moisture am powder:'.ljust(tabs), '{:.4f}'.format(moisture_am_particle_initial))\nprint(f'Saturated moisture cryst powder:'.ljust(tabs), '{:.5f}'.format(moisture_cryst_particle_saturated))\nprint(f'Saturated moisture am powder:'.ljust(tabs), '{:.4f}'.format(moisture_am_particle_saturated))\n\nprint('\\n *** STARTING COMPUTATION *** ')\nrun_time_start = time.time()\n\ncomputed_system = odeint(conditioning_column, initial_system, discrete_time, args=(temp_gas, relative_humidity))\n\nelapsed = time.time() - run_time_start\nprint(f' *** COMPUTATION COMPLETE IN {elapsed:.2f} SECONDS *** \\n')\n\n########################################## SPLIT #######################################################################\nmoisture_particle_cryst_vector = computed_system[:, 0]\nmoisture_particle_am_vector = computed_system[:, 1]\ntemp_particle_vector = computed_system[:, 2]\namorphous_material_vector = computed_system[:, 3]\n\ntotal_moisture_vector = moisture_particle_cryst_vector * (1 - amorphous_material_vector) + moisture_particle_am_vector * amorphous_material_vector\n\n# amorphous_material_vector = normalize_data(amorphous_material_vector)\ncryst_start_index = np.where(amorphous_material_vector != 1)[0][0]\n# index = cryst_start_index\nprint(cryst_start_index)\n\nglass_temp_vector = compute_glass_temp_mix( 1-moisture_particle_am_vector, glass_temp_lactose, glass_temp_water_1 )\ntemp_diff = temp_particle_vector - glass_temp_vector\n\ndiff_heat_flow_powder = (temp_particle_vector - temp_initial) * particle_heat_capacity\n\n############################################ PLOT ######################################################################\n# Convert to easier-to-read units\nseconds = max_time\nhours = seconds / 3600\nminutes = seconds / 60\ndiscrete_time /= 60\n\nfig, ax = plt.subplots(2, 3, figsize=(20, 10))\nfig.suptitle(f'Moisture, temperature & amorphous material over time. '\n f'Total time: {int(minutes)} minutes. RH: {relative_humidity}, T: {temp_initial-kelvin}', fontsize=16)\nax[0, 0].set_title('Moisture Particle Am')\nax[0, 1].set_title('Moisture Particle Cryst')\nax[0, 2].set_title('Moisture Particle Total')\nax[1, 0].set_title('Amount amorphous')\nax[1, 1].set_title('Temp Particle')\n\n# Set styles\nmoisture_color = 'navy'\ntemp_color = 'orangered'\ninitial_line = 'dashed'\ninitial_color = 'gray'\nsaturated_color = 'lightcoral'\namorph_color = 'green'\n\neps = 0.01\n\nax[0, 0].set_ylabel(f'M_am', rotation=0.5, size='large')\nax[0, 0].plot(discrete_time, moisture_particle_am_vector, c=moisture_color)\n# patch = mpatches.Patch(color=moisture_color, label=f'Y {step}')\n# ax[0, 0].set_ylim(moisture_particle_initial_am-eps, moisture_am_particle_saturated+eps)\nax[0, 0].hlines(moisture_particle_initial_am, 0, discrete_time[-1], colors=initial_color, linestyles=initial_line)\nax[0, 0].text(minutes * 4 / 5, moisture_gas_initial_bed, ('{:.4f}'.format(moisture_particle_initial_am)), ha='left', va='center')\nax[0, 0].hlines(moisture_am_particle_saturated, 0, discrete_time[-1], colors=saturated_color, linestyles=initial_line)\nax[0, 0].vlines(discrete_time[cryst_start_index], moisture_particle_initial_am-eps, moisture_am_particle_saturated+eps, colors=saturated_color, linestyles=initial_line)\n# ax[0, 0].text(1, moisture_gas_initial_in, ('{:.4f}'.format(moisture_gas_initial_in)), ha='left', va='center')\n\n# ax[0, 1].set_ylabel(f'M_cr', rotation=0, size='large')\n# ax[0, 1].plot(discrete_time, moisture_particle_cryst_vector, c=moisture_color)\n# # patch = mpatches.Patch(color=moisture_color, label=f'Y {step}')\n# ax[0, 1].set_ylim(moisture_particle_initial_cryst-eps*0.001, moisture_cryst_particle_saturated+eps*0.001)\n# ax[0, 1].hlines(moisture_particle_initial_cryst, 0, discrete_time[-1], colors=initial_color, linestyles=initial_line)\n# ax[0, 1].text(minutes * 4 / 5, moisture_gas_initial_bed, ('{:.4f}'.format(moisture_particle_initial_cryst)), ha='left', va='center')\n# ax[0, 1].hlines(moisture_cryst_particle_saturated, 0, discrete_time[-1], colors=saturated_color, linestyles=initial_line)\n# ax[0, 1].vlines(discrete_time[cryst_start_index], 0, 1, colors=saturated_color, linestyles=initial_line)\n\nax[0, 1].set_ylabel(f'Heat flow', rotation=0, size='large')\nax[0, 1].plot(discrete_time, diff_heat_flow_powder, c=temp_color)\n# ax[0, 1].set_ylim(moisture_particle_initial_cryst-eps*0.001, moisture_cryst_particle_saturated+eps*0.001)\n# ax[0, 1].hlines(moisture_particle_initial_cryst, 0, discrete_time[-1], colors=initial_color, linestyles=initial_line)\n\nax[0, 2].set_ylabel(f'M_tot', rotation=0, size='large')\nax[0, 2].plot(discrete_time, total_moisture_vector, c=moisture_color)\nax[0, 2].set_ylim(moisture_particle_initial_cryst-eps*0.001, moisture_am_particle_saturated+eps*0.001)\nax[0, 2].hlines(moisture_particle_initial_cryst, 0, discrete_time[-1], colors=initial_color, linestyles=initial_line)\nax[0, 2].hlines(moisture_cryst_particle_saturated, 0, discrete_time[-1], colors=saturated_color, linestyles=initial_line)\nax[0, 2].hlines(moisture_am_particle_saturated, 0, discrete_time[-1], colors=saturated_color, linestyles=initial_line)\nax[0, 2].vlines(discrete_time[cryst_start_index], 0, 1, colors=saturated_color, linestyles=initial_line)\n\nax[1, 0].set_ylabel(f'Am g/g', rotation=0, size='large')\nax[1, 0].plot(discrete_time, amorphous_material_vector, c=amorph_color)\nax[1, 0].set_ylim(0-eps, 1+eps)\nax[1, 0].hlines(amorphous_material_initial, 0, discrete_time[-1], colors=initial_color, linestyles=initial_line)\nax[1, 0].vlines(discrete_time[cryst_start_index], -1, 1, colors=saturated_color, linestyles=initial_line)\n\nax[1, 1].set_ylabel(f'T', rotation=0, size='large')\nax[1, 1].plot(discrete_time, temp_particle_vector-kelvin, c=temp_color)\n# ax[1, 1].set_ylim(temp_initial-eps - kelvin, np.max(temp_particle_vector) + eps - kelvin)\nax[1, 1].hlines(temp_initial-kelvin, 0, discrete_time[-1], colors=initial_color, linestyles=initial_line)\nax[1, 1].vlines(discrete_time[cryst_start_index], 0, 30, colors=saturated_color, linestyles=initial_line)\n\nax[1, 2].set_ylabel(f'T - Tg', rotation=0, size='large')\nax[1, 2].plot(discrete_time, temp_diff, c=temp_color)\n# ax[1, 2].set_ylim(temp_initial-eps - kelvin, np.max(temp_particle_vector) + eps - kelvin)\nax[1, 2].hlines(temp_initial-kelvin, 0, discrete_time[-1], colors=initial_color, linestyles=initial_line)\nax[1, 2].vlines(discrete_time[cryst_start_index], 0, 30, colors=saturated_color, linestyles=initial_line)\n\n\nplt.show()\n\nprint('\\n############################################ PROGRAM ENDED ############################################')","sub_path":"ode_version_2/tam_main.py","file_name":"tam_main.py","file_ext":"py","file_size_in_byte":9005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"69981734","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n#If this code works, it was written by Seunghyun Lee(www.bluebaynetworks.co.kr).\n#If not, I don't know who wrote it\n\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\nimport tensorflow as tf\nimport input_korean\nimport numpy as np\nimport math\nimport sys, time\n\n\naccuracy = None\ntrain_step = None\nsaver = None\ny_conv = None\nx = None\ny_ = None\nsess = None\nkeep_prob = None\n\n# 텐서플로우 helper 함수\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef make_tf_model(FONT_COUNT, adam_learning_rate, fully_connected_layer_neuron):\n global accuracy, train_step, saver, y_conv, x, y_, sess, keep_prob\n\n x = tf.placeholder(tf.float32, shape=[None, 784])\n y_ = tf.placeholder(tf.float32, shape=[None, FONT_COUNT])\n\n W_conv1 = weight_variable([3, 3, 1, 32])\n b_conv1 = bias_variable([32])\n x_image = tf.reshape(x, [-1,28,28,1])\n\n # 2 step network\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n\n W_fc1 = weight_variable([7 * 7 * 64, fully_connected_layer_neuron])\n b_fc1 = bias_variable([fully_connected_layer_neuron])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n #h_fc1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1\n\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, 0.8)\n\n W_fc2 = weight_variable([fully_connected_layer_neuron, FONT_COUNT])\n b_fc2 = bias_variable([FONT_COUNT])\n\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n #y_conv=tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)\n\n \"\"\"\n 가끔씩 훈련과정에서 인식률이 0으로 떨어지는 경우가 있다. \n y_conv, -y가 0이 되면 무한대 * 0이 되어 이상하게 된다.\n 따라서 아주 작은 값 1e-8을 더해줘 이런 에러를 예방한다.\n https://github.com/tensorflow/tensorflow/issues/1997 참조\n \"\"\"\n cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv + 1e-8))\n train_step = tf.train.AdamOptimizer(adam_learning_rate).minimize(cross_entropy)\n correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n saver = tf.train.Saver()\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n\n\ndef do_train(dataset, train_cnt, batch_cnt, checkpoint_dir):\n start_time = time.time()\n count = int(train_cnt / 10)\n\n for loop in range (0, 10):\n for i in range(count):\n image, label = dataset.basic.next_batch(batch_cnt)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x:image, y_: label, keep_prob: 1.0})\n print(\"index:%d basic step %d, training accuracy %.4f\"%(loop, i, train_accuracy))\n train_step.run(feed_dict={x: image, y_: label, keep_prob: 0.8})\n\n for i in range(count):\n image, label = dataset.shift1.next_batch(batch_cnt)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x:image, y_: label, keep_prob: 1.0})\n print(\"index:%d shift1 step %d, training accuracy %.4f\"%(loop, i, train_accuracy))\n train_step.run(feed_dict={x: image, y_: label, keep_prob: 0.8})\n\n for i in range(count):\n image, label = dataset.shift2.next_batch(batch_cnt)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x:image, y_: label, keep_prob: 1.0})\n print(\"index:%d shift2 step %d, training accuracy %.4f\"%(loop, i, train_accuracy))\n train_step.run(feed_dict={x: image, y_: label, keep_prob: 0.8})\n\n for i in range(count):\n image, label = dataset.shift3.next_batch(batch_cnt)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x:image, y_: label, keep_prob: 1.0})\n print(\"index:%d shift3 step %d, training accuracy %.4f\"%(loop, i, train_accuracy))\n train_step.run(feed_dict={x: image, y_: label, keep_prob: 0.8})\n\n for i in range(count):\n image, label = dataset.shift4.next_batch(batch_cnt)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x:image, y_: label, keep_prob: 1.0})\n print(\"index:%d shift4 step %d, training accuracy %.4f\"%(loop, i, train_accuracy))\n train_step.run(feed_dict={x: image, y_: label, keep_prob: 0.8})\n\n for i in range(count):\n image, label = dataset.rotate1.next_batch(batch_cnt)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x:image, y_: label, keep_prob: 1.0})\n print(\"index:%d rotate1 step %d, training accuracy %.4f\"%(loop, i, train_accuracy))\n train_step.run(feed_dict={x: image, y_: label, keep_prob: 0.8})\n\n for i in range(count):\n image, label = dataset.rotate2.next_batch(batch_cnt)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x:image, y_: label, keep_prob: 1.0})\n print(\"index:%d rotate2 step %d, training accuracy %.4f\"%(loop, i, train_accuracy))\n train_step.run(feed_dict={x: image, y_: label, keep_prob: 0.8})\n\n for i in range(count):\n image, label = dataset.rotate3.next_batch(batch_cnt)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x:image, y_: label, keep_prob: 1.0})\n print(\"index:%d rotate3 step %d, training accuracy %.4f\"%(loop, i, train_accuracy))\n train_step.run(feed_dict={x: image, y_: label, keep_prob: 0.8})\n\n for i in range(count):\n image, label = dataset.rotate4.next_batch(batch_cnt)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x:image, y_: label, keep_prob: 1.0})\n print(\"index:%d rotate4 step %d, training accuracy %.4f\"%(loop, i, train_accuracy))\n train_step.run(feed_dict={x: image, y_: label, keep_prob: 0.8})\n\n saver.save(sess, checkpoint_dir+'nanum.ckpt')\n print('******** Train END ***********')\n correct = 0.0\n loop = 0.0\n for i in range(100):#test image is 500\n batch = dataset.basic.next_batch(batch_cnt)\n acc = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1})\n #print(\"step %d, accuracy %.4f\"%(i, acc))\n correct += acc\n loop += 1.0\n\n print(\"test accuracy %5.3f\" % (correct / loop))\n duration = time.time() - start_time\n print(\"Total Time %.2f\" % duration) \n\n\ndef check_point_restore(dir):\n ckpt = tf.train.get_checkpoint_state(dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n print ('No checkpoint found')\n exit(1)\n","sub_path":"08 machine learning/1.0 Version code/tf_model.py","file_name":"tf_model.py","file_ext":"py","file_size_in_byte":6940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"647269864","text":"from toneai_bot.labeling import labeling_funcs\n\n\ndef analize_text(text: str) -> str:\n\n lines = []\n\n for func in labeling_funcs:\n lines.append(f'`{func.__name__}`: *{func(text)}*')\n\n response = '\\n'.join(lines)\n return response\n","sub_path":"toneai_bot/text_analizer.py","file_name":"text_analizer.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"554933423","text":"def quicksort(data,left,right):\n\tpivot=data[int((left+right)/2)]\n\ti=left\n\tj=right\n\twhile i<=j:\n\t\twhile data[i]pivot:\n\t\t\tj-=1\n\t\tif i<=j:\n\t\t\ttemp=data[i]\n\t\t\tdata[i]=data[j]\n\t\t\tdata[j]=temp\n\t\t\ti+=1\n\t\t\tj-=1\n\t\t\t\n\tif left.*)$', 'django.views.static.serve', {\r\n 'document_root': settings.MEDIA_ROOT,\r\n }),\r\n )\r\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"198945347","text":"#!/usr/bin/python2\nimport shlex\nfrom functools import partial\nfrom time import sleep\n\nimport click\n\nfrom core import delete_local_files, run_container, stop_container, build_base_container, run_in_container as _run_in_container\nfrom config import SSH_SERVER_IP, ROOT_PW\n\n@click.group()\ndef cli():\n pass\n\n@cli.command()\n@click.argument('dir')\n@click.option('--container_name', default='ltsp', help='Name of runc container')\ndef clean(dir, container_name):\n stop_container(container_name)\n delete_local_files(dir)\n\n@cli.command()\n@click.argument('dir')\n@click.option('--container_name', default='ltsp', help='Name of runc container')\ndef install(dir, container_name):\n \"\"\"\n Pulls a new ubuntu container and does minimal setup\n \"\"\"\n container_name = container_name.encode('utf-8')\n def run_in_container(cmd):\n print(_run_in_container(container_name, shlex.split(cmd)))\n build_base_container(dir)\n run_container(dir, container_name)\n # Update the apt-cache on the host (important for )\n run_in_container(\"apt-get update\")\n\n # TODO: I'm having trouble with apt when my host is on a different version\n # of ubuntu than the client. This is obviously pretty exciting, and I'm\n # not sure why this is a problem now but wasn't last year. More work is\n # required - although at this stage blowing away this ubuntu install is\n # not a bad choice.\n\n # Install the very minimum needed to get the client to build\n # We don't want to install the SSH server because we assume that the host\n # is already running SSH, and systemd will actually try to do useful things\n # wth the server if it's being installed, which will fail if the port is\n # unavailable, and give lots of prompting about overwriting the SSH config\n # which is bind-mounted from the host.\n run_in_container('apt-get install -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" -y ltsp-server')\n\n # Hack around broken timezone symlinking\n # https://bugs.launchpad.net/ltsp/+bug/1660392\n # TODO: Check if this is still needed\n run_in_container(\"rm -f /usr/share/ltsp/plugins/ltsp-build-client/Ubuntu/035-copy-timezone\")\n\n # Build the fat client (we use lubuntu because of the low resource use,\n # is easier to use than unity and will run on the crutech P4's)\n run_in_container(\"ltsp-build-client --fat-client-desktop lubuntu-desktop\")\n\n\n # Update the keys for the host which we're running under, not the container\n # This causes a duplicate in the ssh_known_hosts which is a little annoying\n run_in_container(\"ltsp-update-sshkeys {}\".format(SSH_SERVER_IP))\n # Fix the ssh known hosts so users don't get a DNS Spoofing warning\n run_in_container(\"sed 's/^[^# ][^ ]* /* /' -i /opt/ltsp/amd64/etc/ssh/ssh_known_hosts\")\n\n # Set up the fat client root key\n run_in_container('ltsp-chroot bash -c \"echo root:{} | chpasswd\"'.format(ROOT_PW))\n # Remove some of the bloatware\n run_in_container(\"ltsp-chroot apt-get remove xscreensaver abiword audacious blueman bluez gnumeric pidgin transmission guvcview sylpheed cups evolution* --auto-remove -y\")\n # Install elective requirements\n ## Install lmms\n run_in_container(\"ltsp-chroot apt-get install -y lmms\")\n ## Install dependancies for pybotwar\n run_in_container(\"ltsp-chroot apt-get install -y python3-pip python3-pyqt4 build-essential python-dev swig\")\n run_in_container(\"ltsp-chroot -r pip3 install -y box2d\")\n ## Install dependancies for NavUber\n run_in_container(\"ltsp-chroot -r pip3 install -y matplotlib networkx\")\n ## The rest of pybotwar is copied manually to peoples home directories at present.\n # Set up the updated wine ppa (needs to be http so we can proxy it)\n # We need to allow i386 packages because wine is the worst\n run_in_container(\"ltsp-chroot dpkg --add-architecture i386\")\n # Need to use http otherwise we can't proxy the packages\n run_in_container(\"ltsp-chroot apt-add-repository 'http://dl.winehq.org/wine-builds/ubuntu/'\")\n run_in_container(\"ltsp-chroot apt-get update\")\n run_in_container(\"ltsp-chroot apt-get install -y --install-recommends winehq-stable\")\n # wine_gecko and wine_mono (using pre-downloaded msi's because upstream is terrible)\n run_in_container(\"mkdir -p /opt/ltsp/amd64/usr/share/wine/gecko/\")\n run_in_container(\"mv /root/wine_gecko-2.47-x86_64.msi /opt/ltsp/amd64/usr/share/wine/gecko/\")\n run_in_container(\"mkdir -p /opt/ltsp/amd64/usr/share/wine/mono/\")\n run_in_container(\"mv /root/wine-mono-4.6.4.msi /opt/ltsp/amd64/usr/share/wine/mono/\")\n # TODO: Install everything we need for Game Strategy\n run_in_container(\"ltsp-chroot apt-get install -y bzflag\")\n # === Do other chroot setup here ===\n\n run_in_container(\"ltsp-update-image\")\n\n stop_container(container_name)\n\n\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"tech.py","file_name":"tech.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"287898814","text":"import random\n\ndef ran_func():\n for i in range(10):\n print (random.uniform(0,1))\n\ndef swap(arr,i,j):\n temp=arr[i]\n arr[i]=arr[j]\n arr[j]=temp\n\ndef partition(arr,low,high):\n i=low-1\n pivot=arr[high]\n for j in range(low, high):\n if(arr[j]<=pivot):\n i=i+1\n swap(arr,i,j)\n swap(arr,high,i+1)\n return i+1\n\ndef quickSort(arr,low,high):\n if(low=result[0]:\r\n result.insert(0, i)\r\n else:\r\n result.append(i)\r\n return ''.join(result)\r\n\r\ncase = int(input())\r\nword = [input() for i in range(case)]\r\nfor i, n in enumerate(word):\r\n print(\"Case #{}: {}\".format(i+1, winningLastWord(n)))\r\n","sub_path":"codes/BuildLinks1.10/test_input/CJ_16_1/16_1_1_lolweyne_q1.py","file_name":"16_1_1_lolweyne_q1.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"132901722","text":"import os\nimport sys\nfrom .models import *\nimport xlrd\nimport django\nimport json\nfrom django.shortcuts import get_object_or_404\nfrom django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist\nfrom django.db import IntegrityError\nfrom django.template.defaultfilters import slugify\nfrom unidecode import unidecode\nos.environ['DJANGO_SETTINGS_MODULE'] = 'untitled1.settings'\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\ndjango.setup()\n\n\nclass UploadingProducts(object):\n foreign_key_field = [\"offer_tag\"]\n foreign_key_field_availability = [\"offer_availability\"]\n foreign_key_field_publish = [\"offer_publish\"]\n key_field = [\"offer_url\"]\n model = Offers\n\n def __init__(self, data):\n data = data\n self.uploaded_file = data.get(\"file\")\n self.format_file = data.get(\"format_file\")\n\n def getting_related_model(self, field_name):\n related_model = self.model._meta.get_field(field_name).rel.to\n return related_model\n\n def getting_headers(self):\n s = self.s\n headers = dict()\n for column in range(s.ncols):\n value = s.cell(0, column).value\n headers[column] = value\n return headers\n\n def parsing(self):\n uploaded_file = self.uploaded_file\n if self.format_file == 'xls' or self.format_file == 'xlsx':\n wb = xlrd.open_workbook(file_contents=uploaded_file.read())\n s = wb.sheet_by_index(0)\n self.s = s\n headers = self.getting_headers()\n product_bulk_list = list()\n sub_bulk_list = []\n sub_key_bulk_list = []\n sub_bulk_cat_list = []\n for row in range(1, s.nrows):\n row_dict = {}\n for column in range(s.ncols):\n value = s.cell(row, column).value\n field_name = headers[column]\n if field_name == 'id' and not value:\n continue\n if field_name == \"offer_subtags\":\n continue\n\n if field_name in self.foreign_key_field_availability:\n related_model = self.getting_related_model(field_name)\n instance, created = related_model.objects.get_or_create(availability_title=value)\n value = instance\n\n if field_name in self.foreign_key_field_publish:\n related_model = self.getting_related_model(field_name)\n instance, created = related_model.objects.get_or_create(publish_title=value)\n value = instance\n if field_name in self.foreign_key_field:\n related_model = self.getting_related_model(field_name)\n try:\n instance = related_model.objects.get(tag_title=value)\n except ObjectDoesNotExist:\n related_model.objects.create(tag_url=slugify(unidecode(value)), tag_title=value,\n tag_publish=True, tag_priority=1)\n instance = related_model.objects.get(tag_title=value)\n value = instance\n row_dict[field_name] = value\n # product_bulk_list.append(Offers(**row_dict))\n Offers.objects.update_or_create(**row_dict)\n key = row_dict[\"offer_url\"]\n sub_bulk_cat = row_dict[\"offer_tag\"]\n sub_bulk_cat_list.append(sub_bulk_cat)\n sub_bulk_list.append(key)\n # Offers.objects.bulk_create(product_bulk_list)\n for row in range(1, s.nrows):\n sub_dict = []\n sub_tag_list = []\n for column in range(s.ncols):\n value = s.cell(row, column).value\n field_name = headers[column]\n if field_name in \"offer_subtags\":\n for st in range(len(sub_bulk_cat_list)):\n a = [v for v in value.split(\", \")]\n sub_tag_list.append(a)\n for i in range(len(sub_tag_list[st])):\n try:\n instance = Subtags.objects.get(tag_title=a[i])\n except ObjectDoesNotExist:\n Subtags.objects.create(tag_url=slugify(unidecode(a[i])), tag_title=a[i],\n tag_parent_tag=sub_bulk_cat_list[st])\n instance = get_object_or_404(Subtags, tag_title=a[i])\n value_sub = instance\n sub_dict.append(value_sub)\n sub_key_bulk_list.append(sub_dict)\n ThroughModel = Offers.offer_subtags.through\n for i in range(len(sub_bulk_list)):\n for j in range(len(sub_key_bulk_list[i])):\n try:\n try:\n ThroughModel.objects.bulk_create([\n ThroughModel(offers_id=get_object_or_404(Offers, offer_url=sub_bulk_list[i]).id,\n subtags_id=get_object_or_404(Subtags, tag_title=sub_key_bulk_list[i][j]).id),\n ])\n except (MultipleObjectsReturned, IntegrityError):\n continue\n except IndexError:\n continue\n return True\n elif self.format_file == 'json':\n x = json.loads(uploaded_file.read())\n js = []\n self.err = []\n self.add = [\"Добавлены поля по стандарту\", ]\n ThroughModel = Offers.offer_subtags.through\n for i in range(len(x)):\n d = dict()\n try:\n d[\"offer_title\"] = x[i][\"offer_title\"]\n except KeyError:\n self.err.append(\"offer_title\")\n return False\n try:\n if x[i][\"offer_url\"] == \"\":\n print(\"WOAP\")\n d[\"offer_url\"] = slugify(unidecode(x[i][\"offer_title\"]))\n else:\n d[\"offer_url\"] = x[i][\"offer_url\"]\n except KeyError:\n d[\"offer_url\"] = slugify(unidecode(x[i][\"offer_title\"]))\n self.add.append(\"offer_url:{}\".format(slugify(unidecode(x[i][\"offer_title\"]))))\n return False\n try:\n d[\"offer_price\"] = x[i][\"offer_price\"].replace(\",\", \".\")\n except KeyError:\n self.err.append(\"offer_price\")\n return False\n try:\n d[\"offer_valuta\"] = x[i][\"offer_valuta\"]\n except KeyError:\n d[\"offer_valuta\"] = \"руб.\"\n self.add.append(\"offer_valuta: руб.\")\n try:\n d[\"offer_value\"] = x[i][\"offer_value\"]\n except KeyError:\n self.err.append(\"offer_value\")\n return False\n try:\n d[\"offer_minorder\"] = x[i][\"offer_minorder\"]\n except KeyError:\n d[\"offer_minorder\"] = \"1\"\n self.add.append(\"offer_minorder: 1\")\n try:\n d[\"offer_minorder_value\"] = x[i][\"offer_minorder_value\"]\n except KeyError:\n self.err.append(\"offer_minorder_value\")\n return False\n try:\n d[\"offer_pre_text\"] = x[i][\"offer_pre_text\"]\n except KeyError:\n d[\"offer_pre_text\"] = \"\"\n self.add.append(\"offer_pre_text: \"\"\")\n try:\n d[\"offer_text\"] = x[i][\"offer_text\"]\n except KeyError:\n d[\"offer_text\"] = \"\"\n self.add.append(\"offer_text: \"\"\")\n try:\n d[\"offer_image_url\"] = x[i][\"offer_image_url\"]\n except KeyError:\n d[\"offer_image_url\"] = x[i][\"image_link\"]\n try:\n d[\"offer_availability\"], created = Availability.objects.get_or_create(\n availability_title=x[i][\"offer_availability\"])\n except KeyError:\n d[\"offer_availability\"], created = Availability.objects.get_or_create(\n availability_title=\"Под заказ\")\n self.add.append(\"offer_availability: Под заказ\")\n try:\n if x[i][\"offer_publish\"] == \"\":\n d[\"offer_publish\"], created = Publish.objects.get_or_create(publish_title=\"Публикуемый\")\n else:\n d[\"offer_publish\"], created = Publish.objects.get_or_create(publish_title=x[i][\"offer_publish\"])\n except KeyError:\n d[\"offer_publish\"], created = Publish.objects.get_or_create(publish_title=\"Публикуемый\")\n self.add.append(\"offer_publish: Публикуемый\")\n try:\n try:\n d[\"offer_tag\"] = Tags.objects.get(tag_title=x[i][\"offer_tag\"])\n except ObjectDoesNotExist:\n Tags.objects.create(tag_url=slugify(unidecode(x[i][\"offer_tag\"])),\n tag_title=x[i][\"offer_tag\"],\n tag_publish=True, tag_priority=1)\n d[\"offer_tag\"] = get_object_or_404(Tags, tag_title=x[i][\"offer_tag\"])\n except KeyError:\n return False\n js.append(d)\n Offers.objects.update_or_create(**d)\n for k in range(len(x)):\n try:\n for j in range(len(x[k][\"offer_subtags\"].split(\", \"))):\n try:\n v = Subtags.objects.get(tag_title=x[k][\"offer_subtags\"].split(\", \")[j])\n print(v)\n except ObjectDoesNotExist:\n try:\n Subtags.objects.create(tag_url=slugify(unidecode(x[k][\"offer_subtags\"].split(\", \")[j])),\n tag_title=x[k][\"offer_subtags\"].split(\", \")[j],\n tag_parent_tag=Tags.objects.get(tag_title=x[k][\"offer_tag\"]))\n v = get_object_or_404(Subtags, tag_title=x[k][\"offer_subtags\"].split(\", \")[j])\n except IntegrityError:\n continue\n try:\n ThroughModel.objects.bulk_create([\n ThroughModel(offers_id=get_object_or_404(Offers, offer_url=slugify(\n unidecode(x[k][\"offer_title\"])) if x[k][\"offer_url\"] == \"\"\n else x[k][\"offer_url\"]).id,\n subtags_id=get_object_or_404(Subtags, tag_title__icontains=v).id),\n ])\n except (MultipleObjectsReturned, IntegrityError):\n continue\n except KeyError:\n continue\n return True\n","sub_path":"pages/import_export_views.py","file_name":"import_export_views.py","file_ext":"py","file_size_in_byte":11703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"517905272","text":"class Node(object):\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nlist1 = Node(0)\nlist1.next = Node(2)\nlist1.next.next = Node(4)\nlist1.next.next.next = Node(6)\nlist1.next.next.next.next = Node(8)\n\nlist2 = Node(1)\nlist2.next = Node(3)\nlist2.next.next = Node(5)\nlist2.next.next.next = Node(7)\n\n\ndef merge(list1, list2):\n if list1 is None:\n return list2\n if list2 is None:\n return list1\n\n if list1.data < list2.data:\n list1.next = merge(list1.next, list2)\n return list1\n else:\n list2.next = merge(list2.next, list1)\n return list2\n\n\nmerged = merge(list1, list2)\n\nwhile merged.next is not None:\n print(merged.data)\n merged = merged.next\n","sub_path":"questions/merge_sorted_linked_lists.py","file_name":"merge_sorted_linked_lists.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"229456248","text":"#文本相似度计算\n#使用TF-IDF计算文本相似度\n\nfrom gensim import corpora,models,similarities\n\nimport jieba\nfrom collections import defaultdict\n\nf=open(\"词频.txt\",\"w\")\nstopkey=[line.strip() for line in open('stopword.txt').readlines()]#加载停用词\ndoc1=\"doc1.txt\"\ndoc2=\"doc2.txt\"\ndoc3=\"doc3.txt\"\ndoc4=\"doc4.txt\"\ndoc5=\"doc5.txt\"\ndoc6=\"doc6.txt\"\nd1=open(doc1).read()\nd2=open(doc2).read()\nd3=open(doc3).read()\nd4=open(doc4).read()\nd5=open(doc5).read()\nd6=open(doc6).read()\n#分词\ndata1=jieba.cut(d1.strip())\ndata2=jieba.cut(d2.strip())\ndata3=jieba.cut(d3.strip())\ndata4=jieba.cut(d4.strip())\ndata5=jieba.cut(d5.strip())\ndata6=jieba.cut(d6.strip())\n\ndata11=\"\"\nfor item in data1:\n if item not in stopkey:\n data11+=item+\" \"\ndata21=\"\"\nfor item in data2:\n if item not in stopkey:\n data21+=item+\" \"\ndata31=\"\"\nfor item in data3:\n if item not in stopkey:\n data31+=item+\" \"\ndata41=\"\"\nfor item in data4:\n if item not in stopkey:\n data41+=item+\" \"\ndata51=\"\"\nfor item in data5:\n if item not in stopkey:\n data51+=item+\" \"\ndata61=\"\"\nfor item in data6:\n if item not in stopkey:\n data61+=item+\" \"\n\ndocuments=[data11,data21,data31,data41,data51,data61]\ntexts=[[word for word in document.split()]\n for document in documents]\n#统计各个词的频率\nfrequency=defaultdict(int)\nfor text in texts:\n for token in text:\n frequency[token]+=1\n#将词频保存文档中\nf.write(\"词项 词频\\n\")\nfor item in frequency.keys():\n f.write(item+\" \"+str(frequency[item])+\"\\n\")\n#构造词典\ndictionary=corpora.Dictionary(texts)\n\nnew_vec1=dictionary.doc2bow(data11.split())#在词典中出现位置和频次\nnew_vec2=dictionary.doc2bow(data21.split())\nnew_vec3=dictionary.doc2bow(data31.split())\nnew_vec4=dictionary.doc2bow(data41.split())\nnew_vec5=dictionary.doc2bow(data51.split())\nnew_vec6=dictionary.doc2bow(data61.split())\n\ncorpus=[dictionary.doc2bow(text) for text in texts]\ntfidf=models.TfidfModel(corpus)#文档数,词典词项数+1\nnew_tfidf = tfidf[corpus]\nfeatureNum=len(dictionary.token2id.keys())#词典中词项数目\nindex=similarities.SparseMatrixSimilarity(new_tfidf,num_features=featureNum)\n\nnew_vec_tfidf1 = tfidf[new_vec1]\nnew_vec_tfidf2 = tfidf[new_vec2]\nnew_vec_tfidf3 = tfidf[new_vec3]\nnew_vec_tfidf4 = tfidf[new_vec4]\nnew_vec_tfidf5 = tfidf[new_vec5]\nnew_vec_tfidf6 = tfidf[new_vec6]\n#print(new_vec_tfidf)\nsim1=index[new_vec_tfidf1]\nsim2=index[new_vec_tfidf2]\nsim3=index[new_vec_tfidf3]\nsim4=index[new_vec_tfidf4]\nsim5=index[new_vec_tfidf5]\nsim6=index[new_vec_tfidf6]\nprint(\"文档1对应相关性:\")\nprint(sim1)\nprint(\"文档2对应相关性:\")\nprint(sim2)\nprint(\"文档3对应相关性:\")\nprint(sim3)\nprint(\"文档4对应相关性:\")\nprint(sim4)\nprint(\"文档5对应相关性:\")\nprint(sim5)\nprint(\"文档6对应相关性:\")\nprint(sim6)\n","sub_path":"sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"178259088","text":"import pandas as pd\nimport changebasis\n\n\nclass NormalizeData(changebasis.ChangeBasis):\n def perform(self, data, parameters):\n if not isinstance(data, pd.DataFrame):\n return False\n if len(parameters) >= 2:\n max_value = parameters[1]\n min_value = parameters[0]\n else:\n if len(parameters) == 1:\n max_value = parameters[0]\n min_value = [0] * len(data[0])\n else:\n min_value = [0] * len(data[0])\n max_value = [1] * len(data[0])\n if (len(max_value) != data.shape[1]):\n return False\n if (len(min_value) != data.shape[1]):\n return False\n min_in_data = data.min()\n dif_in_data = data.max() - min_in_data\n multiplier = max_value\n for i in range(len(max_value)):\n multiplier[i] -= min_value[i]\n\n for i in data.index:\n data.loc[i] = (data.loc[i] - min_in_data) / dif_in_data * multiplier + min_value\n # while not iterator.finished:\n # iterator[0] = (iterator[0] - min_in_data[iterator.multi_index[1]]) / \\\n # (dif_in_data[iterator.multi_index[1]]) * multiplier[iterator.multi_index[1]] + \\\n # min_value[iterator.multi_index[1]]\n # iterator.iternext()\n return True\n","sub_path":"normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"289349321","text":"\"\"\"The base Controller API\n\nProvides the BaseController class for subclassing, and other objects\nutilized by Controllers.\n\"\"\"\nimport logging, time, cgi\nfrom pylons import c, cache, config, g, request, response, session\nfrom pylons.controllers import WSGIController\nfrom pylons.controllers.util import abort, etag_cache, redirect_to\nfrom pylons.decorators import jsonify, validate, rest\nfrom pylons.i18n import _, ungettext, N_\nfrom pylons.templating import render\nfrom paste.deploy.converters import asbool\n\nfrom demisauce.lib.filter import FilterList\nimport demisauce.lib.helpers as h\nimport demisauce.lib.sanitize as libsanitize\nimport demisauce.model as model\nfrom demisauce.model import mapping, meta\nfrom demisauce.model.person import Person\nimport tempita\nfrom functools import wraps\nfrom decorator import decorator\n\nlog = logging.getLogger(__name__)\n\n# create scheduler\nfrom demisauce.lib import scheduler\nscheduler.start()\n\ndef send_emails(email_template,recipient_list,substitution_dict=None):\n \"\"\"\n Gets an email template from demisauce and sends\n to recipient list using scheduler which runs in the background\n allowing this current request to continue processing\n \"\"\"\n from demisauce.lib import mail\n from demisaucepy import pylons_helper, demisauce_ws_get\n import urllib\n \n resource_id = urllib.quote_plus(email_template)\n response = demisauce_ws_get('email',resource_id,format='xml',cache=False)\n if response.success:\n t = response.model\n from string import Template\n if hasattr(t,'template'):\n s = Template(t.template)\n template = s.substitute(substitution_dict)\n mail.send_mail_toeach((t.subject,\n template, '%s<%s>' % (t.from_name,t.from_email), recipient_list))\n log.debug('sent emails to %s' % recipient_list)\n else:\n log.error('Error retrieving that template 1')\n elif not emails.success:\n log.error('Error retrieving that template 2')\n return False\n\n\nbase_url = h.base_url\n\ndef get_current_user():\n \"\"\"get current user\"\"\"\n user = None\n if 'user' in session and type(session['user']) == Person:\n user = session['user']\n elif 'dsu' in request.cookies:\n user = Person.by_unique(request.cookies['userkey'].lower())\n elif 'userkey' in request.cookies:\n user = Person.by_unique(request.cookies['userkey'].lower())\n return user\n\ndef get_current_site():\n \"\"\"gets site for current request\"\"\"\n site = None\n \n if 'apikey' in request.params:\n site = model.site.Site.by_apikey(request.params['apikey'])\n else:\n user = get_current_user()\n if user:\n site = model.site.Site.get(-1,user.site_id)\n return site\n \n\ndef requires_role(role):\n def wrapper(func,*args,**kwargs):\n user = get_current_user()\n if not user or user.has_role(role) == False:\n session['return_url'] = request.path_info\n session.save()\n if user:\n if request.environ['pylons.routes_dict']['controller'] == 'dashboard':\n h.add_alert('Not Authorized')\n log.info('403, current user doesnt have role=%s redirect to public page' % (role))\n # TODO: switch to abort instead of redirect\n #abort(403, 'Not authorized')\n redirect_wsave(h.url_for(controller='home',action='index'))\n else:\n h.add_alert('Not Authorized')\n log.info('not authorized' )\n redirect_wsave(h.url_for(controller='dashboard',action='index'))\n else:\n h.add_alert('Must Sign In')\n log.info('not logged in or wrong role, about to redirect to signin' )\n redirect_wsave(h.url_for(controller='account',action='signin'))\n else:\n return func(*args, **kwargs)\n return decorator(wrapper)\n\ndef rendertf(filename,vars=[]):\n \"\"\"Render a Tempita File\"\"\"\n fp = open(config['buffet.template_options']['mako.directories'][0]+filename)\n tmpl = tempita.Template(fp.read())\n return tmpl.substitute(vars)\n\ndef sanitize(text):\n return libsanitize.Sanitize(text)\n\ndef redirect_wsave(*args, **kwargs):\n \"\"\"\n allows redirect to a destination, but first saves alerts and current\n request messages to something that will still exist on that next\n request\n \"\"\"\n h.messages_tosession()\n redirect_to(*args, **kwargs)\n\ndef print_timing(func):\n \"\"\"prints how long method took\"\"\"\n def wrapper(*arg):\n t2 = time.clock()\n res = func(*arg)\n t3 = time.clock()\n url = request.environ['PATH_INFO']\n method = request.environ['REQUEST_METHOD']\n log.info('%s %s took %0.3fms %s, %s' % (method,url, (t3-t2)*1000.0,t3,t2))\n return res\n \n return wrapper\n\n# http://pythonisito.blogspot.com/2008/07/restfulness-in-turbogears.html\nclass RestMethod(object):\n def __call__(self,**kwargs):\n return self.result(**kwargs)\n \n def __init__(self,**kwargs):\n methodname = request.method.lower()\n if hasattr(self,methodname):\n self.result = getattr(self, methodname)\n \n\nclass BaseController(WSGIController):\n requires_auth = False\n \n def redirect(self,url):\n \"\"\"docstring for redirect\"\"\"\n redirect_wsave(url)\n \n def start_session(self,user,remember_me=False):\n if user:\n session['user'] = user\n site = user.site\n c.user = user\n session.save()\n if remember_me == True:\n expire_seconds = 60*60*24*31\n response.set_cookie('userkey', user.user_uniqueid,path='/',\n expires=expire_seconds)\n response.set_cookie('email', user.email,path='/',\n expires=expire_seconds, secure=True)\n response.set_cookie('test', user.email,path='/',\n expires=expire_seconds)\n log.debug('in base controller setting user ')\n \n def __before__(self):\n \"\"\"\n request.cookies['userkey']\n session['current_user_person'] = user\n \"\"\"\n c.form_errors = c.form_errors or {}\n self.user = get_current_user()\n self.site = get_current_site()\n c.user = self.user\n c.site = self.site\n c.debug = False\n if 'debug' in config:\n c.debug = asbool(config['debug']) \n if c.user:\n c.site_id = c.user.site_id\n self.filters = FilterList(site_id=c.site_id)\n request.environ['filters'] = self.filters\n c.base_url = h.base_url()\n c.help_url = h.help_url()\n c.adminsite_slug = config['demisauce.appname']\n c.demisauce_url = config['demisauce.url']\n \n @print_timing\n def __call__(self, environ, start_response):\n \"\"\"Invoke the Controller\"\"\"\n # WSGIController.__call__ dispatches to the Controller method the\n # request is routed to. This routing information is available in\n # environ['pylons.routes_dict']\n try:\n return WSGIController.__call__(self, environ, start_response)\n finally:\n #log.debug('in base controller __call__ remove session' )\n if meta.DBSession:\n meta.DBSession.remove()\n \n\nclass SecureController(BaseController):\n requires_auth = True\n \n @requires_role('admin')\n def __before__(self):\n BaseController.__before__(self)\n \n\nclass NeedsadminController(BaseController):\n requires_auth = True\n \n @requires_role('sysadmin')\n def __before__(self):\n BaseController.__before__(self)\n \n\n\n# Include the '_' function in the public names\n__all__ = [__name for __name in locals().keys() if not __name.startswith('_') \\\n or __name == '_']\n","sub_path":"demisauce/trunk/demisauce/lib/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"145441338","text":"######### Import your libraries #######\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport os\n\n###### Set up variables\nlist_of_choices=['punch', 'body-slam', 'round-house kick to the face']\ngithublink = 'https://github.com/austinlasseter/chuck_norris_execution'\nimage1='chucknorris.jpg'\nheading1='Chuck Norris execution method'\n\n########### Initiate the app\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\napp.title='Chuck'\n\n####### Layout of the app ########\napp.layout = html.Div([\n html.H2(heading1),\n html.Img(src=app.get_asset_url(image1), style={'width': 'auto', 'height': '10%'}),\n dcc.Dropdown(id='your-input-here',\n options=[{'label': i, 'value': i} for i in list_of_choices],\n value='punch',\n style={'width': '500px'}),\n html.Br(),\n html.Div(id='your-output-here', children=''),\n html.Br(),\n html.A('Code on Github', href=githublink),\n\n])\n\n\n######### Interactive callbacks go here #########\n@app.callback(dash.dependencies.Output('your-output-here', 'children'),\n [dash.dependencies.Input('your-input-here', 'value')])\ndef display_value(whatever_you_chose):\n return f'Chuck Norris will now execute you with a {whatever_you_chose}.'\n\n\n######### Run the app #########\nif __name__ == '__main__':\n app.run_server(debug=True)\n","sub_path":"dash-examples/02-simple-callbacks/chucknorris.py","file_name":"chucknorris.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"541063413","text":"from linebot.models.send_messages import TextSendMessage\nfrom app.flexmodules import flexmessages\nfrom linebot.models import FlexSendMessage\nfrom app import line_bot_api, handler\nfrom app.dataSQL import connectDB\n\ndef query_menu(event):\n if '菜單查詢' in event.message.text:\n line_bot_api.reply_message(\n event.reply_token,\n FlexSendMessage(alt_text='query record: index',contents=flexmessages.flex_index())\n )\n return True\n else:\n return False\n\ndef query_menu_back(event):\n query = event.postback.data\n print(query)\n if '=' in query:\n print(query.split('=')[1])\n data = connectDB.queryItem(query.split('=')[1])\n menu_name = [i[2] for i in data]\n line_bot_api.reply_message(\n event.reply_token,\n FlexSendMessage(\n alt_text=f\"query record: column {query}\",\n contents= flexmessages.flex_menu_prize(query,menu_name)\n )\n )\n return True\n elif '菜單' in query:\n data = connectDB.showallMenu()\n menu_name = [i[1] for i in data]\n line_bot_api.reply_message(\n event.reply_token,\n FlexSendMessage(\n alt_text=f\"query record: column {query}\",\n contents= flexmessages.flex_menu(query,menu_name)\n )\n )\n return True\n else:\n return False","sub_path":"ch17/LineBot/app/flexmodules/flextalks.py","file_name":"flextalks.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"114771518","text":"from .server_agent import Agent\nfrom .config import ConfigLoader\nimport asyncio\nimport logging\nimport yaml\nimport argparse\n\nparser = argparse.ArgumentParser(\n description=\"\"\"Server agent which read data from mqtt broker and write into postgres database\"\"\",\n)\nparser.add_argument('-c', '--config', type=str, help='Configuration file location')\nargs = parser.parse_args()\n\n\nasync def run_server_agent(logger=None, config=None):\n server_agent = Agent()\n\n\nasync def main(conf_path=None):\n logging.basicConfig()\n logger = logging.getLogger(__name__)\n agent = Agent(logger=logger, config_path=conf_path)\n await agent.run()\n\n\nif args.config:\n asyncio.run(main(conf_path=args.config))\nelse:\n asyncio.run(main(conf_path=None))\n\n\n\n\n","sub_path":"agent/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"341359545","text":"\"\"\"An implementation of stack aggregator.\n\nGathers component data from the graph database and aggregate the data to be presented\nby stack-analyses endpoint\n\nOutput: TBD\n\n\"\"\"\nimport datetime\nimport requests\nimport copy\nfrom collections import defaultdict\nfrom utils import (select_latest_version, LICENSE_SCORING_URL_REST,\n execute_gremlin_dsl, GREMLIN_SERVER_URL_REST, persist_data_in_db,\n GREMLIN_QUERY_SIZE, format_date)\nimport logging\n\nlogger = logging.getLogger(__file__)\n\n\ndef get_recommended_version(ecosystem, name, version):\n \"\"\"Fetch the recommended version in case of CVEs.\"\"\"\n query = \"g.V().has('ecosystem', '{eco}').has('name', '{pkg}')\" \\\n \".out('has_version').not(out('has_cve')).values('version');\"\\\n .format(eco=ecosystem, pkg=name)\n payload = {'gremlin': query}\n result = execute_gremlin_dsl(url=GREMLIN_SERVER_URL_REST, payload=payload)\n if result:\n versions = result['result']['data']\n if len(versions) == 0:\n return None\n else:\n return None\n rec_version = version\n for ver in versions:\n rec_version = select_latest_version(\n ver,\n rec_version\n )\n if rec_version == version:\n return None\n return rec_version\n\n\ndef extract_component_details(component):\n \"\"\"Extract details from given component.\"\"\"\n date = format_date(component.get(\"package\", {}).get(\"gh_refreshed_on\", [\"N/A\"])[0])\n github_details = {\n \"dependent_projects\":\n component.get(\"package\", {}).get(\"libio_dependents_projects\", [-1])[0],\n \"dependent_repos\": component.get(\"package\", {}).get(\"libio_dependents_repos\", [-1])[0],\n \"total_releases\": component.get(\"package\", {}).get(\"libio_total_releases\", [-1])[0],\n \"latest_release_duration\":\n str(datetime.datetime.fromtimestamp(component.get(\"package\", {}).get(\n \"libio_latest_release\", [1496302486.0])[0])),\n \"first_release_date\": \"Apr 16, 2010\",\n \"issues\": {\n \"month\": {\n \"opened\": component.get(\"package\", {}).get(\"gh_issues_last_month_opened\", [-1])[0],\n \"closed\": component.get(\"package\", {}).get(\"gh_issues_last_month_closed\", [-1])[0]\n }, \"year\": {\n \"opened\": component.get(\"package\", {}).get(\"gh_issues_last_year_opened\", [-1])[0],\n \"closed\": component.get(\"package\", {}).get(\"gh_issues_last_year_closed\", [-1])[0]\n }},\n \"pull_requests\": {\n \"month\": {\n \"opened\": component.get(\"package\", {}).get(\"gh_prs_last_month_opened\", [-1])[0],\n \"closed\": component.get(\"package\", {}).get(\"gh_prs_last_month_closed\", [-1])[0]\n }, \"year\": {\n \"opened\": component.get(\"package\", {}).get(\"gh_prs_last_year_opened\", [-1])[0],\n \"closed\": component.get(\"package\", {}).get(\"gh_prs_last_year_closed\", [-1])[0]\n }},\n \"stargazers_count\": component.get(\"package\", {}).get(\"gh_stargazers\", [-1])[0],\n \"forks_count\": component.get(\"package\", {}).get(\"gh_forks\", [-1])[0],\n \"refreshed_on\": date,\n \"open_issues_count\": component.get(\"package\", {}).get(\"gh_open_issues_count\", [-1])[0],\n \"contributors\": component.get(\"package\", {}).get(\"gh_contributors_count\", [-1])[0],\n \"size\": \"N/A\"\n }\n used_by = component.get(\"package\", {}).get(\"libio_usedby\", [])\n used_by_list = []\n for epvs in used_by:\n slc = epvs.split(':')\n used_by_dict = {\n 'name': slc[0],\n 'stars': int(slc[1])\n }\n used_by_list.append(used_by_dict)\n github_details['used_by'] = used_by_list\n\n code_metrics = {\n \"code_lines\": component.get(\"version\", {}).get(\"cm_loc\", [-1])[0],\n \"average_cyclomatic_complexity\":\n component.get(\"version\", {}).get(\"cm_avg_cyclomatic_complexity\", [-1])[0],\n \"total_files\": component.get(\"version\", {}).get(\"cm_num_files\", [-1])[0]\n }\n\n cves = []\n recommended_latest_version = None\n name = component.get(\"version\", {}).get(\"pname\", [\"\"])[0]\n version = component.get(\"version\", {}).get(\"version\", [\"\"])[0]\n ecosystem = component.get(\"version\", {}).get(\"pecosystem\", [\"\"])[0]\n if len(component.get(\"cves\", [])) > 0:\n for cve in component.get(\"cves\", []):\n component_cve = {\n 'CVE': cve.get('cve_id')[0],\n 'CVSS': cve.get('cvss_v2', [''])[0]\n }\n cves.append(component_cve)\n recommended_latest_version = component.get(\"package\", {}).get(\"latest_non_cve_version\", \"\")\n if not recommended_latest_version:\n recommended_latest_version = get_recommended_version(ecosystem, name, version)\n\n licenses = component.get(\"version\", {}).get(\"declared_licenses\", [])\n\n latest_version = select_latest_version(\n version,\n component.get(\"package\", {}).get(\"libio_latest_version\", [\"\"])[0],\n component.get(\"package\", {}).get(\"latest_version\", [\"\"])[0],\n name\n )\n component_summary = {\n \"ecosystem\": ecosystem,\n \"name\": name,\n \"version\": version,\n \"licenses\": licenses,\n \"security\": cves,\n \"osio_user_count\": component.get(\"version\", {}).get(\"osio_usage_count\", 0),\n \"latest_version\": latest_version,\n \"recommended_latest_version\": recommended_latest_version,\n \"github\": github_details,\n \"code_metrics\": code_metrics\n }\n # Add transitive block for transitive deps\n if component.get('transitive', {}):\n if not cves:\n return None\n else:\n component_summary['transitive'] = component.get('transitive')\n return component_summary\n\n\ndef _extract_conflict_packages(license_service_output):\n \"\"\"Extract conflict licenses.\n\n This helper function extracts conflict licenses from the given output\n of license analysis REST service.\n\n It returns a list of pairs of packages whose licenses are in conflict.\n Note that this information is only available when each component license\n was identified ( i.e. no unknown and no component level conflict ) and\n there was a stack level license conflict.\n\n :param license_service_output: output of license analysis REST service\n :return: list of pairs of packages whose licenses are in conflict\n \"\"\"\n license_conflict_packages = []\n if not license_service_output:\n return license_conflict_packages\n\n conflict_packages = license_service_output.get('conflict_packages', [])\n for conflict_pair in conflict_packages:\n list_pkgs = list(conflict_pair.keys())\n assert len(list_pkgs) == 2\n d = {\n \"package1\": list_pkgs[0],\n \"license1\": conflict_pair[list_pkgs[0]],\n \"package2\": list_pkgs[1],\n \"license2\": conflict_pair[list_pkgs[1]]\n }\n license_conflict_packages.append(d)\n\n return license_conflict_packages\n\n\ndef _extract_unknown_licenses(license_service_output):\n \"\"\"Extract unknown licenses.\n\n This helper function extracts unknown licenses information from the given\n output of license analysis REST service.\n\n At the moment, there are two types of unknowns:\n\n a. really unknown licenses: those licenses, which are not understood by our system.\n b. component level conflicting licenses: if a component has multiple licenses\n associated then license analysis service tries to identify a representative\n license for this component. If some licenses are in conflict, then its\n representative license cannot be identified and this is another type of\n 'unknown' !\n\n This function returns both types of unknown licenses.\n\n :param license_service_output: output of license analysis REST service\n :return: list of packages with unknown licenses and/or conflicting licenses\n \"\"\"\n # TODO: reduce cyclomatic complexity\n really_unknown_licenses = []\n lic_conflict_licenses = []\n if not license_service_output:\n return really_unknown_licenses\n\n # TODO: refactoring\n if license_service_output.get('status', '') == 'Unknown':\n list_components = license_service_output.get('packages', [])\n for comp in list_components:\n license_analysis = comp.get('license_analysis', {})\n if license_analysis.get('status', '') == 'Unknown':\n pkg = comp.get('package', 'Unknown')\n comp_unknown_licenses = license_analysis.get('unknown_licenses', [])\n for lic in comp_unknown_licenses:\n really_unknown_licenses.append({\n 'package': pkg,\n 'license': lic\n })\n\n # TODO: refactoring\n if license_service_output.get('status', '') == 'ComponentLicenseConflict':\n list_components = license_service_output.get('packages', [])\n for comp in list_components:\n license_analysis = comp.get('license_analysis', {})\n if license_analysis.get('status', '') == 'Conflict':\n pkg = comp.get('package', 'Unknown')\n d = {\n \"package\": pkg\n }\n comp_conflict_licenses = license_analysis.get('conflict_licenses', [])\n list_conflicting_pairs = []\n for pair in comp_conflict_licenses:\n assert (len(pair) == 2)\n list_conflicting_pairs.append({\n 'license1': pair[0],\n 'license2': pair[1]\n })\n d['conflict_licenses'] = list_conflicting_pairs\n lic_conflict_licenses.append(d)\n\n output = {\n 'really_unknown': really_unknown_licenses,\n 'component_conflict': lic_conflict_licenses\n }\n return output\n\n\ndef _extract_license_outliers(license_service_output):\n \"\"\"Extract license outliers.\n\n This helper function extracts license outliers from the given output of\n license analysis REST service.\n\n :param license_service_output: output of license analysis REST service\n :return: list of license outlier packages\n \"\"\"\n outliers = []\n if not license_service_output:\n return outliers\n\n outlier_packages = license_service_output.get('outlier_packages', {})\n for pkg in outlier_packages.keys():\n outliers.append({\n 'package': pkg,\n 'license': outlier_packages.get(pkg, 'Unknown')\n })\n\n return outliers\n\n\ndef perform_license_analysis(license_score_list, dependencies):\n \"\"\"Pass given license_score_list to stack_license analysis and process response.\"\"\"\n license_url = LICENSE_SCORING_URL_REST + \"/api/v1/stack_license\"\n\n payload = {\n \"packages\": license_score_list\n }\n resp = {}\n flag_stack_license_exception = False\n # TODO: refactoring\n try:\n resp = execute_gremlin_dsl(url=license_url, payload=payload)\n # lic_response.raise_for_status() # raise exception for bad http-status codes\n if not resp:\n raise requests.exceptions.RequestException\n except requests.exceptions.RequestException:\n logger.error(\"Unexpected error happened while invoking license analysis!\")\n flag_stack_license_exception = True\n\n msg = None\n stack_license = []\n stack_license_status = None\n unknown_licenses = []\n license_conflict_packages = []\n license_outliers = []\n if not flag_stack_license_exception:\n list_components = resp.get('packages', [])\n for comp in list_components: # output from license analysis\n for dep in dependencies: # the known dependencies\n if dep.get('name', '') == comp.get('package', '') and \\\n dep.get('version', '') == comp.get('version', ''):\n dep['license_analysis'] = comp.get('license_analysis', {})\n\n msg = resp.get('message')\n _stack_license = resp.get('stack_license', None)\n if _stack_license is not None:\n stack_license = [_stack_license]\n stack_license_status = resp.get('status', None)\n unknown_licenses = _extract_unknown_licenses(resp)\n license_conflict_packages = _extract_conflict_packages(resp)\n license_outliers = _extract_license_outliers(resp)\n\n output = {\n \"reason\": msg,\n \"status\": stack_license_status,\n \"f8a_stack_licenses\": stack_license,\n \"unknown_licenses\": unknown_licenses,\n \"conflict_packages\": license_conflict_packages,\n \"outlier_packages\": license_outliers\n }\n return output, dependencies\n\n\ndef extract_user_stack_package_licenses(resolved, ecosystem):\n \"\"\"Extract user stack package licenses.\"\"\"\n epv_set = create_dependency_data_set(resolved, ecosystem)\n user_stack = get_dependency_data(epv_set)\n list_package_licenses = []\n if user_stack is not None:\n for component in user_stack.get('result', []):\n data = component.get(\"data\", None)\n if data:\n component_data = extract_component_details(data[0])\n license_scoring_input = {\n 'package': component_data['name'],\n 'version': component_data['version'],\n 'licenses': component_data['licenses']\n }\n list_package_licenses.append(license_scoring_input)\n\n return list_package_licenses\n\n\ndef aggregate_stack_data(stack, manifest_file, ecosystem, deps,\n manifest_file_path, persist, transitive_count):\n \"\"\"Aggregate stack data.\"\"\"\n dependencies = []\n licenses = []\n license_score_list = []\n for component in stack.get('result', []):\n data = component.get(\"data\", None)\n if data:\n component_data = extract_component_details(data[0])\n if component_data:\n # create license dict for license scoring\n license_scoring_input = {\n 'package': component_data['name'],\n 'version': component_data['version'],\n 'licenses': component_data['licenses']\n }\n dependencies.append(component_data)\n licenses.extend(component_data['licenses'])\n license_score_list.append(license_scoring_input)\n\n stack_distinct_licenses = set(licenses)\n\n # Call License Scoring to Get Stack License\n if persist:\n license_analysis, dependencies = perform_license_analysis(license_score_list, dependencies)\n stack_license_conflict = len(license_analysis.get('f8a_stack_licenses', [])) == 0\n else:\n license_analysis = dict()\n stack_license_conflict = None\n\n all_dependencies = {(dependency['package'], dependency['version']) for dependency in deps}\n analyzed_dependencies = {(dependency['name'], dependency['version'])\n for dependency in dependencies}\n unknown_dependencies = list()\n for name, version in all_dependencies.difference(analyzed_dependencies):\n unknown_dependencies.append({'name': name, 'version': version})\n\n data = {\n \"manifest_name\": manifest_file,\n \"manifest_file_path\": manifest_file_path,\n \"user_stack_info\": {\n \"ecosystem\": ecosystem,\n \"analyzed_dependencies_count\": len(dependencies),\n \"analyzed_dependencies\": dependencies,\n \"transitive_count\": transitive_count,\n \"unknown_dependencies\": unknown_dependencies,\n \"unknown_dependencies_count\": len(unknown_dependencies),\n \"recommendation_ready\": True, # based on the percentage of dependencies analysed\n \"total_licenses\": len(stack_distinct_licenses),\n \"distinct_licenses\": list(stack_distinct_licenses),\n \"stack_license_conflict\": stack_license_conflict,\n \"dependencies\": deps,\n \"license_analysis\": license_analysis\n }\n }\n return data\n\n\ndef create_dependency_data_set(resolved, ecosystem):\n \"\"\"Create direct and transitive set to reduce calls to graph.\"\"\"\n unique_epv_dict = {\n \"direct\": defaultdict(set),\n \"transitive\": defaultdict(set)\n }\n\n for pv in resolved:\n if pv.get('package') and pv.get('version'):\n key = ecosystem + \"|#|\" + pv.get('package') + \"|#|\" + pv.get('version')\n unique_epv_dict['direct'][key] = set()\n for trans_pv in pv.get('deps', []):\n trans_key = ecosystem + \"|#|\" + trans_pv.get('package') + \"|#|\" + \\\n trans_pv.get('version')\n unique_epv_dict['transitive'][trans_key].add(key)\n\n return unique_epv_dict\n\n\ndef remove_duplicate_cve_data(epv_list):\n \"\"\"Club all CVEs for an EPV.\"\"\"\n graph_dict = {}\n result = []\n for data in epv_list['result']['data']:\n pv = data.get('version').get('pname')[0] + \":\" + \\\n data.get('version').get('version')[0]\n if pv not in graph_dict:\n graph_dict[pv] = {}\n graph_dict[pv].update(data)\n if 'cves' not in graph_dict[pv]:\n graph_dict[pv]['cves'] = list()\n if 'cve' in data:\n cve = graph_dict[pv].pop('cve')\n # Fixes Issue\n # https://github.com/fabric8-analytics/fabric8-analytics-vscode-extension/issues/328\n if cve not in graph_dict[pv]['cves']:\n graph_dict[pv]['cves'].append(cve)\n\n # create a uniform structure for direct and transitive\n for x, y in graph_dict.items():\n z = list()\n z.append(y)\n data = {'data': z}\n result.append(data)\n\n return result\n\n\ndef add_transitive_details(epv_list, epv_set):\n \"\"\"Add transitive dict which affects direct dependencies.\"\"\"\n direct = epv_set['direct']\n transitive = epv_set['transitive']\n result = []\n\n cve_epv_list = remove_duplicate_cve_data(epv_list)\n # Add transitive dict as necessary\n for data in cve_epv_list:\n epv = data['data'][0]\n epv_str = epv['version']['pecosystem'][0] + \"|#|\" + \\\n epv['version']['pname'][0] + \"|#|\" + \\\n epv['version']['version'][0]\n\n if epv_str in direct:\n result.append(copy.deepcopy(data))\n if epv_str in transitive:\n affected_deps = transitive[epv_str]\n trans_dict = {\n 'isTransitive': True,\n 'affected_direct_deps': []\n }\n for dep in affected_deps:\n eco, name, version = dep.split(\"|#|\")\n if name and version:\n trans_dict['affected_direct_deps'].append(\n {\n \"package\": name,\n \"version\": version\n }\n )\n epv['transitive'] = trans_dict\n result.append(copy.deepcopy(data))\n\n return result\n\n\ndef get_dependency_data(epv_set):\n \"\"\"Get dependency data from graph.\"\"\"\n epv_list = {\n \"result\": {\n \"data\": [],\n \"unknown_deps\": []\n }\n }\n dep_list = {}\n unknown_deps_list = []\n query = \"epv=[];\"\n batch_query = \"a = g.V().has('pecosystem', '{eco}').has('pname', '{name}').\" \\\n \"has('version', '{ver}').dedup(); a.clone().as('version').\" \\\n \"in('has_version').dedup().as('package').select('version').\" \\\n \"coalesce(out('has_cve').as('cve').\" \\\n \"select('package','version','cve').by(valueMap()),\" \\\n \"select('package','version').by(valueMap())).\" \\\n \"fill(epv);\"\n i = 1\n epvs = [x for x, y in epv_set['direct'].items()]\n for epv in epvs:\n eco, name, ver = epv.split('|#|')\n dep_list[name] = ver\n query += batch_query.format(eco=eco, name=name, ver=ver)\n if i >= GREMLIN_QUERY_SIZE:\n i = 1\n # call_gremlin in batch\n payload = {'gremlin': query}\n result = execute_gremlin_dsl(url=GREMLIN_SERVER_URL_REST, payload=payload)\n if result:\n epv_list['result']['data'] += result['result']['data']\n query = \"epv=[];\"\n i += 1\n\n if i > 1:\n payload = {'gremlin': query}\n result = execute_gremlin_dsl(url=GREMLIN_SERVER_URL_REST, payload=payload)\n if result:\n epv_list['result']['data'] += result['result']['data']\n\n query = \"epv=[];\"\n batch_query = \"g.V().has('pecosystem', '{eco}').has('pname', '{name}').\" \\\n \"has('version', '{ver}').dedup().as('version').\" \\\n \"out('has_cve').as('cve').\" \\\n \"select('version','cve').by(valueMap()).\" \\\n \"fill(epv);\"\n i = 1\n epvs = [x for x, y in epv_set['transitive'].items()]\n for epv in epvs:\n eco, name, ver = epv.split('|#|')\n dep_list[name] = ver\n query += batch_query.format(eco=eco, name=name, ver=ver)\n if i >= GREMLIN_QUERY_SIZE:\n i = 1\n # call_gremlin in batch\n payload = {'gremlin': query}\n result = execute_gremlin_dsl(url=GREMLIN_SERVER_URL_REST, payload=payload)\n if result:\n epv_list['result']['data'] += result['result']['data']\n query = \"epv=[];\"\n i += 1\n\n if i > 1:\n payload = {'gremlin': query}\n result = execute_gremlin_dsl(url=GREMLIN_SERVER_URL_REST, payload=payload)\n if result:\n epv_list['result']['data'] += result['result']['data']\n\n # Identification of unknown dependencies\n epv_data = epv_list['result']['data']\n for k, v in dep_list.items():\n known_flag = False\n for knowndep in epv_data:\n version_node = knowndep['version']\n if k == knowndep['version']['pname'][0] and v == knowndep['version']['version'][0] \\\n and (version_node.get('licenses') or version_node.get('declared_licenses')):\n known_flag = True\n break\n if not known_flag:\n unknown_deps_list.append({'name': k, 'version': v})\n\n result = add_transitive_details(epv_list, epv_set)\n return {'result': result, 'unknown_deps': unknown_deps_list}\n\n\nclass StackAggregator:\n \"\"\"Aggregate stack data from components.\"\"\"\n\n @staticmethod\n def execute(aggregated=None, persist=True):\n \"\"\"Task code.\"\"\"\n started_at = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n stack_data = []\n external_request_id = aggregated.get('external_request_id')\n # TODO multiple license file support\n current_stack_license = aggregated.get('current_stack_license', {}).get('1', {})\n\n for result in aggregated['result']:\n resolved = result['details'][0]['resolved']\n ecosystem = result['details'][0]['ecosystem']\n manifest = result['details'][0]['manifest_file']\n manifest_file_path = result['details'][0]['manifest_file_path']\n epv_set = create_dependency_data_set(resolved, ecosystem)\n transitive_count = len(epv_set.get('transitive', []))\n finished = get_dependency_data(epv_set)\n if finished is not None:\n output = aggregate_stack_data(finished, manifest, ecosystem.lower(), resolved,\n manifest_file_path, persist, transitive_count)\n if output and output.get('user_stack_info'):\n output['user_stack_info']['license_analysis'].update({\n \"current_stack_license\": current_stack_license\n })\n stack_data.append(output)\n ended_at = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n audit = {\n 'started_at': started_at,\n 'ended_at': ended_at,\n 'version': 'v1'\n }\n stack_data = {\n 'stack_data': stack_data,\n '_audit': audit,\n '_release': 'None:None:None'\n }\n if persist:\n logger.info(\"Aggregation process completed for {}.\"\n \" Writing to RDS.\".format(external_request_id))\n persiststatus = persist_data_in_db(external_request_id=external_request_id,\n task_result=stack_data, worker='stack_aggregator_v2',\n started_at=started_at, ended_at=ended_at)\n else:\n persiststatus = {'stack_aggregator': 'success',\n 'external_request_id': external_request_id,\n 'result': stack_data}\n return persiststatus\n","sub_path":"src/stack_aggregator.py","file_name":"stack_aggregator.py","file_ext":"py","file_size_in_byte":24822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"402821760","text":"#-----------------BEGIN HEADERS-----------------\nimport numpy as np\nimport matplotlib.pyplot as plt\n#-----------------END HEADERS-----------------\n\nONI = []\ndata_y_clean = []\ndata_x_clean = []\ndata_5=[]\ndata_m5=[]\n\nbook = open('sst.dat', 'r')\ndata_y_string = list(book)\nfor i in range(len(data_y_string)):\n ONI.append(float(data_y_string[i]))\n\nfor i in range(len(ONI)/4):\n data_y_clean.append(ONI[4*i+1])\n data_x_clean.append(ONI[4*i+2] + ONI[4*i+3]/4.0)\n data_5.append(0.5)\n data_m5.append(-0.5)\n\ndel ONI\n\nplt.title(\"Nino3.4 temperature\")\nplt.xlabel('Year')\nplt.ylabel('Temperature anomaly (K)')\nplt.plot(data_x_clean, data_y_clean)\nplt.plot(data_x_clean, data_5)\nplt.plot(data_x_clean, data_m5)\nplt.savefig('plot.png')\n","sub_path":"YYSY2/surface_T/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"206503876","text":"def oddTuples(aTup):\n '''\n Write a procedure called oddTuples, which takes a tuple as input,\n and returns a new tuple as output, where every other element of the input\n tuple is copied, starting with the first one. So if test is the tuple\n ('I', 'am', 'a', 'test', 'tuple'), then evaluating oddTuples on this\n input would return the tuple ('I', 'a', 'tuple').\n \n aTup: a tuple\n \n returns: tuple, every other element of aTup. \n '''\n # Your Code Here\n ansTup = ()\n index = 0\n #iter through aTup to find odds\n while index < len(aTup):\n ansTup += (aTup[index],) #add odds to new tuple\n index += 2\n return ansTup\n \n","sub_path":"oddTuple.py","file_name":"oddTuple.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"368828787","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLower bound function with elaborate lb_corner like boundary called LB_ECorner\nPaper : Boundary-based lower bound functions for dynamic time warping and their indexing\n\"\"\"\n\nimport os\nimport numpy as np\nimport time\nimport csv\nfrom dtw import *\n# import matplotlib.pyplot as plt\n\ndef readInData(data_path):\n result = []\n with open(data_path) as f:\n for data in f.readlines():\n data = data.strip()\n x = data.split(',')\n x = [float(men) for men in x[0:]]\n result.append(np.array(x))\n return np.array(result)\n\n\ndef reduceDimension(s, n, step):\n rc = []\n for i in range(0, n, step):\n sumc = 0.0\n cnt = 0\n for x in s[i:i + step]:\n cnt += 1\n sumc += x\n if cnt != 0:\n rc.append(sumc / cnt)\n return rc # return reduced candidate\n\n\ndef reduceDimensionSet(set, step):\n # demension reduction n->N\n rs = []\n for s in set:\n n = len(s)\n rc = []\n for i in range(0, n, step):\n sumc = 0.0\n cnt = 0\n for x in s[i:i + step]:\n cnt += 1\n sumc += x\n if cnt != 0:\n rc.append(sumc / cnt)\n rs.append(rc)\n return rs # return reduced candidate\n\n\ndef dist_cal(a,b):\n return (a-b)*(a-b)\n\n\ndef distance_clb_elaborate(candidate,query,r,p):\n distance = float(\"inf\")\n n = len(candidate)\n if p>=1 and p<=r+1:\n for i in range(1,p+1):\n dist_temp = dist_cal(candidate[p-1],query[i-1])\n # print dist_temp\n if distance > dist_temp:\n distance = dist_temp\n for i in range(1,p+1):\n dist_temp = dist_cal(candidate[i-1], query[p-1])\n # print dist_temp\n if distance > dist_temp:\n distance = dist_temp\n if p>=r+2 and p<=np.ceil(n/2.0):\n for i in range(p-r,p+1):\n dist_temp = dist_cal(candidate[p-1],query[i-1])\n # print dist_temp\n if distance > dist_temp:\n distance = dist_temp\n for i in range(p-r,p+1):\n dist_temp = dist_cal(candidate[i-1],query[p-1])\n # print dist_temp\n if distance > dist_temp:\n distance = dist_temp\n if p>=np.ceil(n/2.0)+1 and p<= n-r-1:\n for i in range(p,p+r+1):\n dist_temp = dist_cal(candidate[p-1],query[i-1])\n # print dist_temp\n if distance > dist_temp:\n distance = dist_temp\n for i in range(p,p+r+1):\n dist_temp = dist_cal(candidate[i-1],query[p-1])\n # print dist_temp\n if distance > dist_temp:\n distance = dist_temp\n if p>=n-r and p<=n:\n for i in range(p,n+1):\n dist_temp = dist_cal(candidate[p-1],query[i-1])\n # print dist_temp\n if distance > dist_temp:\n distance = dist_temp\n for i in range(p,n+1):\n dist_temp = dist_cal(candidate[i-1],query[p-1])\n # print dist_temp\n if distance > dist_temp:\n distance = dist_temp\n # print distance\n return distance\n\n\ndef distance_clb(candidate,query,r,p):\n lb1 = np.inf\n lb2 = np.inf\n n = len(candidate)\n if p>=1 and p<=r+1:\n U = max(candidate[0:p])\n L = min(candidate[0:p])\n if query[p-1] > U:\n lb1 = (query[p-1] - U )**2\n if query[p-1] < L:\n lb1 = (query[p-1] - L )**2\n if query[p-1] >= L and query[p-1] <= U :\n lb1 = 0\n\n U = max(query[0:p])\n L = min(query[0:p])\n if candidate[p-1] > U:\n lb2 = (candidate[p-1] - U )**2\n if query[p-1] < L:\n lb2 = (candidate[p-1] - L )**2\n if candidate[p-1] >= L and candidate[p-1] <= U :\n lb2 = 0\n\n if p>=r+2 and p<=np.ceil(n/2.0):\n U = max(candidate[p-r-1 : p])\n L = min(candidate[p-r-1 : p])\n if query[p-1] > U:\n lb1 = (query[p] - U) ** 2\n if query[p-1] < L:\n lb1 = (query[p-1] - L) ** 2\n if query[p-1] >= L and query[p-1] <= U:\n lb1 = 0\n\n U = max(query[p-r-1 : p])\n L = min(query[p-r-1 : p])\n if candidate[p-1] > U:\n lb2 = (candidate[p-1] - U) ** 2\n if query[p-1] < L:\n lb2 = (candidate[p-1] - L) ** 2\n if candidate[p-1] >= L and candidate[p-1] <= U:\n lb2 = 0\n\n if p>=np.ceil(n/2.0)+1 and p<= n-r-1:\n U = max(candidate[p-1 : p+r])\n L = min(candidate[p-1 : p+r])\n if query[p-1] > U:\n lb1 = (query[p-1] - U) ** 2\n if query[p-1] < L:\n lb1 = (query[p-1] - L) ** 2\n if query[p-1] >= L and query[p-1] <= U:\n lb1 = 0\n\n U = max(query[p-1 : p+r])\n L = min(query[p-1 : p+r])\n if candidate[p-1] > U:\n lb2 = (candidate[p-1] - U) ** 2\n if query[p-1] < L:\n lb2 = (candidate[p-1] - L) ** 2\n if candidate[p-1] >= L and candidate[p-1] <= U:\n lb2 = 0\n\n if p>=n-r and p<=n:\n U = max(candidate[p-1 : n])\n L = min(candidate[p-1 : n])\n if query[p-1] > U:\n lb1 = (query[p-1] - U) ** 2\n if query[p-1] < L:\n lb1 = (query[p-1] - L) ** 2\n if query[p-1] >= L and query[p-1] <= U:\n lb1 = 0\n\n U = max(query[p-1 : n])\n L = min(query[p-1 : n])\n if candidate[p-1] > U:\n lb2 = (candidate[p-1] - U) ** 2\n if query[p-1] < L:\n lb2 = (candidate[p-1] - L) ** 2\n if candidate[p-1] >= L and candidate[p-1] <= U:\n lb2 = 0\n return min(lb1,lb2)\n\n\ndef lb_ecorner(query,candidate,lmd):\n r = int(lmd*len(query))\n lb_dtw = 0\n for p in range(1,len(candidate)+1):\n lb_dtw += distance_clb_elaborate(candidate,query,r,p)\n return np.sqrt(lb_dtw)\n\n\ndef lb_corner(query,candidate,lmd,n):\n N = len(query)\n k = float(n/N)\n r = int(lmd*len(query))\n lb_dtw = 0\n for p in range(1,len(candidate)+1):\n lb_dtw += distance_clb(candidate,query,r,p)\n return np.sqrt(k*lb_dtw)\n\n\ndef envelope(seq,lmd):\n n = len(seq)\n r = int(lmd*n)\n env = []\n for p in range(1,n+1):\n if p >= 1 and p <= r + 1:\n l = min(seq[0:p])\n u = max(seq[0:p])\n if p >= r + 2 and p <= np.ceil(n / 2.0):\n l = min(seq[p-r-1:p])\n u = max(seq[p-r-1:p])\n if p >= np.ceil(n / 2.0) + 1 and p <= n - r - 1:\n l = min(seq[p-1:p+r])\n u = max(seq[p-1:p+r])\n if p >= n - r and p <= n:\n l = min(seq[p-1:n])\n u = max(seq[p-1:n])\n env.append([l,u])\n return env\n\n\ndef dist(x, yl, yu):\n if x > yu:\n return (x - yu) ** 2\n elif x < yl:\n return (x - yl) ** 2\n else:\n return 0\n\n\ndef envdist(q,s,lmd,n):\n k = float(n)/len(q)\n env_q = envelope(q, lmd)\n env_s = envelope(s, lmd)\n d = 0\n for p in range(len(s)):\n d += min(dist(q[p],env_s[p][0],env_s[p][1]), dist(s[p],env_q[p][0],env_q[p][1]))\n return np.sqrt(k*d)\n\n\ndef rectdist(q,rect,n,lmd):\n N = len(q)\n k = int(float(n)/N)\n d = 0\n env_q = envelope(q,lmd)\n for p in range(N):\n if rect[p][0]>env_q[p][1]:\n d += (rect[p][0]-env_q[p][1])**2\n elif rect[p][1] e:\n pruning += 1\n\n # --- display ---\n # data = candidate # label == 31\n # x = np.linspace(0, 1, len(data[0]))\n # plt.figure()\n # for i in range(len(data)/10):\n # plt.plot(x, data[i])\n # plt.legend()\n #\n # lower = []\n # for xx in range(len(rect)):\n # lower.append(rect[xx][0])\n # upper = []\n # for xx in range(len(rect)):\n # upper.append(rect[xx][1])\n # plt.plot(x,lower,'*-')\n # plt.plot(x,upper,'*-')\n # plt.show()\n\n\n # if min(LB_ECorner) < best_so_far:\n # dtw = []\n # for x in candidate:\n # r = int(lmd*len(x))\n # dtw.append(DTWDistance(x, query, r))\n # true_dist = min(dtw)\n # if true_dist < best_so_far:\n # best_so_far = true_dist\n\n time_end = time.time()\n consumption = time_end - time_begin\n print('Dataset Name:', dataset)\n print('Average Distance: ', lb_sum / cnt)\n print(\"Time consumption: \", consumption)\n\n # -- save result ---\n\n with open(result_path, 'a') as csvfile:\n writer = csv.writer(csvfile)\n data = (dataset, str(lb_sum / cnt), consumption, str(lmd),pruning,cnt,str(float(pruning)/cnt),N)\n writer.writerow(data)\n\nprint('finished')\n","sub_path":"lb_corner/corner.py","file_name":"corner.py","file_ext":"py","file_size_in_byte":12101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"209765981","text":"try:\r\n import argparse\r\nexcept ImportError:\r\n print(\"Please check if module 'argparse' is installed\")\r\n quit()\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--levels', type=argparse.FileType('r'), required=True,\r\n help=\"File with phylostratigraphic levels described\")\r\nparser.add_argument('--bbh_merged', type=argparse.FileType('r'), required=True,\r\n help=\"Table with merged results of annotation with database (only best BLAST hits).\"\r\n \"The last column should contain information about species\")\r\nparser.add_argument('--phylostratr_merged', type=argparse.FileType('r'), required=True,\r\n help=\"Table with merged results of phylostratr analysis.\"\r\n \"The last column should contain information about species\")\r\nparser.add_argument('--db_tag', type=str, required=True,\r\n help=\"Tag for database used\")\r\nparser.add_argument('--output', type=str, required=True)\r\nargs = parser.parse_args()\r\n\r\n\r\ndef levels_parsing(levels, levels_dict):\r\n for line in levels:\r\n description = line.strip().split(\"\\t\")\r\n levels_dict[description[0][1:-1]] = description[1]\r\n\r\n\r\ndef phylostratr_parsing(phylostratr_merged, phylostratr_dict):\r\n header = phylostratr_merged.readline()\r\n for line in phylostratr_merged:\r\n description = line.strip().split(\"\\t\")\r\n protein_ID, phylostrata, species = description[0], description[-2][1:-1], description[-1]\r\n if species not in phylostratr_dict.keys():\r\n phylostratr_dict[species] = {}\r\n phylostratr_dict[species][protein_ID] = phylostrata\r\n\r\n\r\ndef bbh_parsing(bbh_merged, bbh_dict):\r\n header = bbh_merged.readline()\r\n for line in bbh_merged:\r\n description = line.strip().split(\"\\t\")\r\n protein_ID, species = description[0], description[-1]\r\n if species not in bbh_dict.keys():\r\n bbh_dict[species] = []\r\n bbh_dict[species].append(protein_ID)\r\n\r\n\r\ndef generalization(levels_dict, bbh_dict, phylostratr_dict, summary_dict):\r\n species_list = [key for key in bbh_dict.keys()]\r\n\r\n for phylostrata, level in levels_dict.items():\r\n summary_dict[level] = {species: [] for species in species_list}\r\n\r\n for species, protein_list in bbh_dict.items():\r\n for protein in protein_list:\r\n summary_dict[levels_dict[phylostratr_dict[species][protein]]][species].append(protein)\r\n\r\n\r\ndef output_writing(output, db_tag, summary_dict, bbh_dict):\r\n species_list = [key for key in bbh_dict.keys()]\r\n\r\n with open(\"{output}_proteins_with_bbh_with_{db}_in_phylostrates.tsv\".format(output=output,\r\n db=db_tag), 'a') as output_file:\r\n output_file.write(\"Levels\\t{species}\\n\".format(species=\"\\t\".join(species_list)))\r\n for level, species_values in summary_dict.items():\r\n values = [\"{len} ({percent}%)\".format(len=len(species_values[species]),\r\n percent=round((len(species_values[species])/len(bbh_dict[species]))*100, 2))\r\n for species in species_list]\r\n output_file.write(\"{level}\\t{values}\\n\".format(level=level, values=\"\\t\".join(values)))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n levels_dict, phylostratr_dict, bbh_dict, summary_dict = {}, {}, {}, {}\r\n levels_parsing(args.levels, levels_dict)\r\n phylostratr_parsing(args.phylostratr_merged, phylostratr_dict)\r\n bbh_parsing(args.bbh_merged, bbh_dict)\r\n generalization(levels_dict, bbh_dict, phylostratr_dict, summary_dict)\r\n output_writing(args.output, args.db_tag, summary_dict, bbh_dict)\r\n","sub_path":"BBH_and_phylostrates_summary.py","file_name":"BBH_and_phylostrates_summary.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"400000762","text":"import turtle\n\nqazi_turtle = turtle.Turtle()\nqazi_turtle.speed(15)\n\n# Square\ndef square():\n qazi_turtle.forward(100)\n qazi_turtle.right(90)\n qazi_turtle.forward(100)\n qazi_turtle.right(90)\n qazi_turtle.forward(100)\n qazi_turtle.right(90)\n qazi_turtle.forward(100)\n\n# Circle\ndef circle():\n for i in range(360):\n qazi_turtle.forward(1)\n qazi_turtle.right(1)\n\n# Typical FOR loop\nfor i in range(5): # range(starting value = 0 default and not required, max value, step or increment value - not required default 1)\n print(i) # - it counts in this example -> 0,1,2,3,4 [5], we use range() to loop through a set of code a specified number # of times\n qazi_turtle.forward(10)\n circle()\n square()\n\n\nprint(\"\")\n\n\n# Looping through a string\nfor x in \"pomidory\":\n if(x == \"p\"):\n continue # continue - stops the current iteration and continues with the next\n print(x)\n if(x == \"r\"):\n break # break - stops the loop\n\n \nprint(\"\")\n\n\nfor a in range(2, 10, 2):\n print(a)\nelse: # else - specifies a block of code to be executed when the loop is finished\n print(\"Finaly finished range(2, 10, 2)\")\n\n\n\nfor val in range(1, 20, 5):\n pass # we can use pass in for loop to create empty loop\n","sub_path":"basics/for_loop.py","file_name":"for_loop.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"335740696","text":"class Graph:\n\n \"\"\"Represent a graph as a dictionary of vertices mapping labels to edges.\"\"\"\n def __init__(self):\n self.vertices = {}\n\n def add_vertex(self, vertex_id):\n \"\"\"\n Add a vertex to the graph.\n \"\"\"\n if vertex_id not in self.vertices:\n self.vertices[vertex_id] = set()\n\n def add_edge(self, v1, v2):\n \"\"\"\n Add a directed edge to the graph.\n \"\"\"\n if v1 not in self.vertices:\n self.add_vertex(v1)\n\n if v2 not in self.vertices:\n self.add_vertex(v2)\n\n self.vertices[v1].add(v2)\n\n def get_neighbors(self, vertex_id):\n \"\"\"\n Get all neighbors (edges) of a vertex.\n \"\"\"\n return self.vertices[vertex_id]\n\n\nclass AncestorTree():\n\n \"\"\"\n Represents a tree of ancestors from a child.\n Also keeps track of maximum depth to make finding oldest ancestor easy.\n \"\"\"\n def __init__(self, node, depth = 0, parent_trees = None):\n self.node = node\n self.depth = depth\n if parent_trees is None:\n self.parent_trees = []\n else:\n self.parent_trees = parent_trees\n\n def add_node(self, parent_node, child_node):\n \"\"\"\n Try adding a new parent to the tree.\n Returns pair of booleans;\n * if the parent was added at all\n * if the depth increased\n \"\"\"\n\n # Get current max depth for later comparison\n if self.parent_trees != []:\n max_depth = max({ t.depth for t in self.parent_trees })\n else:\n max_depth = 0\n\n # If we're at the child node, try adding parent.\n if child_node == self.node:\n # If there are no current parents, add the new one and increase depth.\n if self.parent_trees == []:\n self.parent_trees.append(AncestorTree(parent_node))\n self.depth += 1\n return True, True\n\n # If the parent isn't present, add it.\n if parent_node not in [ t.node for t in self.parent_trees ]:\n self.parent_trees.append(AncestorTree(parent_node))\n return True, False\n\n # If there are no more parents to look at, fail at adding.\n if self.parent_trees == []:\n return False, False\n\n # Recurse to parent nodes, trying to add the new node.\n for t in self.parent_trees:\n added_Q, depth_inc = t.add_node(parent_node, child_node)\n if added_Q:\n # If a subtree of maximum depth increases, the whole depth does.\n if depth_inc and t.depth > max_depth:\n self.depth += 1\n return added_Q, depth_inc\n\n # If everything fails, then adding is failed.\n return False, False\n\n def deepest_nodes(self):\n \"\"\" Return a set of deepest nodes \"\"\"\n if self.parent_trees == []:\n return {self.node}\n\n max_depth = max({ t.depth for t in self.parent_trees })\n\n return { n for t in self.parent_trees\n for n in t.deepest_nodes()\n if t.depth == max_depth }\n\n\ndef earliest_ancestor(ancestors, starting_node):\n a_graph = Graph()\n for p, c in ancestors:\n a_graph.add_edge(c, p)\n\n if a_graph.get_neighbors(starting_node) == set():\n return -1\n\n a_tree = AncestorTree(starting_node)\n stack = [starting_node]\n\n while stack != []:\n node = stack.pop()\n new_nodes = list(a_graph.get_neighbors(node))\n stack += new_nodes\n for n in new_nodes:\n a_tree.add_node(n, node)\n\n # Make sure lowest number ancestor is returned first.\n return min(a_tree.deepest_nodes())\n","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25884397","text":"\"\"\"\nhttps://openpyxl.readthedocs.io/en/stable/formula.html\n参考了半天没有找到解析出公式值的办法,直接打开 Excel 可以看到数字是对的。\n\"\"\"\nimport openpyxl\n\nxlsx = '../resource/excel/writeFormula.xlsx'\n\nwb = openpyxl.Workbook()\nsheet = wb['Sheet']\n\nsheet['A1'] = 200\nsheet['A2'] = 300\nsheet['A3'] = '=SUM(A1:A2)'\n\nwb.save(xlsx)\n\nprint('Generate Success')\n\nwbFormulas = openpyxl.load_workbook(xlsx)\nsheet = wbFormulas.active\nprint(sheet['A3'].value)\n\n\nwbDataOnly = openpyxl.load_workbook(xlsx, data_only=True)\nsheet1 = wbDataOnly.active\nprint(sheet1['A3'].value)\n\n\"\"\"\nGenerate Success\n=SUM(A1:A2)\nNone\n\"\"\"\n","sub_path":"c12/p234_test129.py","file_name":"p234_test129.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"163579219","text":"#!/usr/bin/env python\nimport re\nimport sys\nimport gtk\nimport appindicator\n\nimport os\nfrom subprocess import check_output, CalledProcessError, STDOUT\n\nos.environ[\"LC_ALL\"] = \"C\"\ntry:\n interval = int(check_output([\"git\", \"config\", \"indicator.interval\"]))\nexcept Exception:\n interval = 60 # seconds\n\ndef scan_git(fetch):\n report = []\n for filename in os.listdir(\".\"):\n if os.path.isdir(filename):\n output = ''\n try:\n if fetch:\n check_output([\"git\", \"fetch\"], cwd=filename, stderr=STDOUT)\n output = check_output([\"git\", \"status\", \"-b\", \"--porcelain\"], cwd=filename, stderr=STDOUT).split(\"\\n\")[:-1]\n if len(output) > 1:\n report.append(\"%s has uncommitted files\" % filename)\n if re.search(r\"\\[ahead \\d+\\]\", output[0]):\n report.append(\"%s needs to push\" % filename)\n elif re.search(r\"\\[behind \\d+\\]\", output[0]):\n report.append(\"%s needs to pull\" % filename)\n elif re.search(r\"\\[ahead \\d+, behind \\d+\\]\", output[0]):\n report.append(\"%s needs to pull&push\" % filename)\n except CalledProcessError:\n report.append(\"Error checking %s\" % filename)\n return report\n\nclass GitMonitor(object):\n def __init__(self):\n self.ind = appindicator.Indicator(\"git-indicator\",\n \"active\",\n appindicator.CATEGORY_APPLICATION_STATUS)\n self.ind.set_status(appindicator.STATUS_ACTIVE)\n self.ind.set_icon_theme_path(os.path.abspath(os.path.dirname(__file__)))\n self.ind.set_attention_icon(\"attention\")\n\n self.menu_setup()\n self.ind.set_menu(self.menu)\n\n self.fetch = True\n\n def menu_setup(self):\n self.menu = gtk.Menu()\n\n self.status_item = gtk.MenuItem(\"No action required\")\n self.status_item.set_sensitive(False)\n self.menu.append(self.status_item)\n\n self.pause_item = gtk.MenuItem(\"Pause fetching\")\n self.pause_item.connect(\"activate\", self.toggle_fetching)\n self.pause_item.show()\n self.menu.append(self.pause_item)\n\n self.refresh_item = gtk.MenuItem(\"Refresh\")\n self.refresh_item.connect(\"activate\", self.check_git)\n self.refresh_item.show()\n self.menu.append(self.refresh_item)\n\n self.quit_item = gtk.MenuItem(\"Quit\")\n self.quit_item.connect(\"activate\", self.quit)\n self.quit_item.show()\n self.menu.append(self.quit_item)\n\n def main(self):\n gtk.timeout_add(50, self.check_git_first)\n gtk.timeout_add(interval * 1000, self.check_git)\n gtk.main()\n\n def quit(self, widget):\n sys.exit(0)\n\n def check_git_first(self, widget=None):\n self.check_git()\n return False\n\n def check_git(self, widget=None):\n report = scan_git(self.fetch)\n if report:\n self.status_item.set_label(\"\\n\".join(report))\n self.status_item.show()\n self.ind.set_status(appindicator.STATUS_ATTENTION)\n else:\n self.status_item.hide()\n self.ind.set_status(appindicator.STATUS_ACTIVE)\n return True\n\n def toggle_fetching(self, widget):\n self.fetch = not self.fetch\n if self.fetch:\n self.pause_item.set_label(\"Pause fetching\")\n else:\n self.pause_item.set_label(\"Resume fetching\")\n\nif __name__ == \"__main__\":\n indicator = GitMonitor()\n indicator.main()\n","sub_path":"git-indicator.py","file_name":"git-indicator.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"328124830","text":"import mysql.connector\nimport server\n\nclass WikiPage:\n def __init__ (self,id=0):\n if(not type(id)==int):\n id=int(id)\n query = \"SELECT id,title,page_content,last_modified,last_author FROM page where id=%d\" % id\n result_set = Database.getResult(query,True)\n self.id=id\n self.title = title\n self.content = page_content\n self.lastModified = last_modified\n self.lastAuthor = last_author\n if not result_set is None:\n self.title = result_set[1]\n return\n # def save(self):\n # entry = WikiPage.Database.\n # if:\n #\n #\n # else:\n #\n #\n # # if self.id > 0:\n # # return self.update()\n # # else:\n # # return self.insert()\n def insert(self):\n query = (\"insert into page (title,page_content,last_modified,last_author) values (\\\"%s\\\")\" % Database.escape(self.title, self.page_content, self.last_modified,self,last_modified,self.last_author))\n self.id=Database.doQuery(query)\n return self.id\n def update(self):\n query = \"update page set name='%s' where id=%d\" % (Database.escape(self.title, self.page_content, self.last_modified,self,last_modified,self.last_author),self.id)\n def delete(self):\n query = (\"update page set deleted=1 where id=%d\" % self.id)\n Database.doQuery(query)\n return True\n def __str__(self):\n return self.title\n @staticmethod\n def getObjects():\n query = \"SELECT id FROM page\"\n result_set = result_set = Database.getResult(query)\n pages = []\n for item in result_set:\n id = int(item[0])\n pages.append(WikiPage(id))\n return pages\n\n\n\n\nclass Database(object):\n @staticmethod\n def getConnection():\n return mysql.connector.connect(user=server.dbUser,password=server.dbPass,host=server.dbHost,database=server.dbName)\n @staticmethod\n def escape(value):\n return value.replace(\"'\",\"''\")\n @staticmethod\n def getResult(query,getOne=False):\n conn = Database.getConnection()\n cur = conn.cursor()\n cur.execute(query)\n if getOne:\n result_set = cur.fetchone()\n else:\n result_set = cur.fetchall()\n cur.close()\n conn.close()\n return result_set\n @staticmethod\n def doQuery(query):\n conn = Database.getConnection()\n cur = conn.cursor()\n cur.execute(query)\n conn.commit()\n lastId = cur.lastrowid\n cur.close()\n conn.close()\n return lastId\n","sub_path":"wiki/wikipageclass.py","file_name":"wikipageclass.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"384753842","text":"class ComputerPlayer():\n def random_move(self, low, high, rules, board):\n import random\n move = random.randint(low, high)\n return rules.possible_moves(board)[move]\n \n def get_next_move(self, board, rules):\n max_depth = 7\n def _minimax(current_depth, minimax_vals_for_depth):\n if current_depth >= max_depth: return 0\n if rules.is_game_over(board):\n if rules.get_winner(board) == None:\n return 0\n return -1\n\n for space in rules.possible_moves(board):\n board.fill_space(space, rules.active_team(board))\n minimax_vals_for_depth[space] = -1 * _minimax(current_depth+1, {})\n board.erase_space(space)\n\n best_move = max(minimax_vals_for_depth, key=minimax_vals_for_depth.get)\n if current_depth == 0:\n return best_move\n return minimax_vals_for_depth[best_move]\n \n if board.num_full_spaces() == 0: return self.random_move(0, len(rules.possible_moves(board))-1, rules, board)\n return _minimax(0, {})","sub_path":"src/computer_player.py","file_name":"computer_player.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"365123494","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 26 15:50:00 2016\r\n\r\n@author: Tedo\r\n\"\"\"\r\n\r\n\r\nimport os\r\npath = \"D:\\\\TEST_DW\\\\Procedures\"\r\ndir = os.listdir(path)\r\nfor filename in dir:\r\n if filename.startswith(\"Procedure\"):\r\n os.rename(filename, filename[10:])\r\n","sub_path":"templates/file_renamer.py","file_name":"file_renamer.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"119586970","text":"#!/home/sambarluc/installed/anaconda/bin\n\"\"\"\nAnalysis of LIS132 Mount Josephine data\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as ppl\nfrom NIOZhst.Chain import Chain\n\nppl.close('all')\n\nimport ConfigDetail\nreload(ConfigDetail)\nfrom ConfigDetail import *\n\n#Load calibration data\nCalib1 = CalibPath1+CalibName11\nCalib2 = CalibPath2+CalibName21\nCalib3 = CalibPath3+CalibName31\nCalib4 = CalibPath4+CalibName41\n\nC = Chain()\nC.append_therms('',Files3, Z3)\n\nC.import_calib(CalibChains=[Calib1,Calib2,Calib3,Calib4], Ex=Exclude3)\n# Save autoscale data\nC.autoscale(Method='polyval', Degree=3, Threshold=0.4, \\\n Plot=False, Range=(Start,End), Convention='yearday', \\\n Subsets=chunks)\n\nC.time_ascale(Plot='../figures/time_ascale_M3', stex=True, aveRange=1.5,\n Ex=Exclude3)\n\nC.quick_save(OutName3)\n\n#Plot data\nif 1:\n print ('Plot data')\n time, Temper = C.to_array(Range=(Start,End), Convention='yearday', \\\n Fill='missing', Skip=-ndt, Ex=Exclude2)\n\n F = ppl.figure()\n F.subplots_adjust(right=0.99, top=0.99)\n AX = F.add_subplot(111)\n Tmin = np.nanmin(Temper)\n Tmax = np.nanmax(Temper)\n levs = Tmin + (Tmax-Tmin)*np.linspace(0, 1, 80)\n dt = np.mean(np.diff(time)); dz = np.mean(np.diff(Z3))\n lims = (time[0]-0.5*dt,time[-1]+0.5*dt,Z3[0]-0.5*dz,Z3[-1]+0.5*dz)\n c = AX.imshow(Temper, aspect='auto', vmin=Tmin, vmax=Tmax,\\\n extent=lims)\n AX.set_xlabel('Time (yearday)')\n AX.set_ylabel('Depth (m)')\n #AX.invert_yaxis()\n clb = F.colorbar(c, format='%.2f')\n clb.set_label('Temp. ($^\\circ C$)')\n F.show()\n F.savefig(FigDir+'Day_{}_{}_detail_M3.png'.format(Start,End))\n\n# Load CTD data\nCasts = []; Lats = []; Lons = []; Stations = []\nfor file in CTDName:\n Casts.append(np.loadtxt(CTDPath+file, dtype=np.float64, \\\n usecols=(0,2,4), skiprows=1))\n nCast = file[14:16]\n ConfFile = CTDPath+file.replace('asc','hdr')\n f = open(ConfFile, 'r')\n lines = f.read().splitlines()\n LatLine = [line for line in lines if 'Latitude' in line]\n Lat = LatLine[0].lstrip('* NMEA Latitude = ').rstrip('N')\n Lat = Lat.split(); Lat = float(Lat[0]) + float(Lat[1])/60.0\n LonLine = [line for line in lines if 'Longitude' in line]\n Lon = LonLine[0].lstrip('* NMEA Longitude = ').rstrip('W')\n Lon = Lon.split(); Lon = -(float(Lon[0]) + float(Lon[1])/60.0)\n Lats.append(Lat); Lons.append(Lon)\n ind = file.find('_')\n Stations.append(file[ind+1:ind+4]+'\\n'+file[ind+5:ind+8])\n\nTemp = []; Depth = []\nimport gsw\nfrom scipy.stats import binned_statistic as binstat\nfor Cast,Lat,Lon,Stat in zip(Casts,Lats,Lons,Stations):\n # remove nans and outliers\n pgood = (~np.isnan(Cast[:,0])) & (Cast[:,0]>0)\n tgood = (~np.isnan(Cast[:,1])) & (Cast[:,1]>0)\n sgood = (~np.isnan(Cast[:,2])) & (Cast[:,2]>0)\n\n good = sgood & tgood & pgood\n\n p = Cast[:,0][good]\n t = Cast[:,1][good]\n\n # Compute depth\n z = gsw.depth_from_z(gsw.z_from_p(p, Lat))\n\n # bin data\n bins = np.arange(2750,3500,2)\n pbin = binstat(z, p, bins=bins)[0]\n zbin = binstat(z, z, bins=bins)[0]\n tbin = binstat(z, t, bins=bins)[0]\n\n p = pbin[~np.isnan(pbin) & (pbin!=0)]\n z = zbin[~np.isnan(zbin) & (zbin!=0)]\n t = tbin[~np.isnan(tbin) & (tbin!=0)]\n\n # If the cast does not have points in the depth range we are interested in\n # do not add it\n if p.size>0:\n Temp.append(t)\n Depth.append(z)\n\n# plot comparison with ctd\n# data just from the last day\nTlast = Temper[:,np.nonzero((time>290.32)&(time<291.32))[0]]\nTctd = np.mean(Tlast, axis=1)\nsdT = np.std(Tlast, axis=1)\n\nF = ppl.figure()\nF.subplots_adjust(right=0.99, top=0.99)\nAX = F.add_subplot(111)\nAX.plot(Tctd, Z3, 'b-', lw=2)\nAX.plot(Tctd+sdT, Z3, 'b-', lw=1)\nAX.plot(Tctd-sdT, Z3, 'b-', lw=1)\nfor t,d in zip(Temp,Depth):\n AX.plot(t, -d, 'r.', lw=1)\nAX.set_xlabel('Temp. ($^\\circ C$)')\nAX.set_ylabel('Depth ($m$)')\nF.show()\nF.savefig(FigDir+'CTD_comparison_detail_M3.png')\n","sub_path":"LIS132/ProcessData/ProcessDataDetail3.py","file_name":"ProcessDataDetail3.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"587017559","text":"#write a function to check if a number is reverse of the number.\n#i.e. palindrome. E.g. 12321 is a palindrome.\ndef palindrome(a):\n rev_num = 0\n while a>0:\n a = a % 10\n rev_num = rev_num % 10 + num\n a = a // 10\n return rev_num\nnum = int(input(\"enter a number \\t\"))\ncheck_num = palindrome(num)\nif check_num == num:\n print(\"the number is palindrome\")\nelse:\n print(\"the number is not palindrome\")","sub_path":"function2.py","file_name":"function2.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"96495758","text":"from fabric.contrib.project import rsync_project\nfrom fabric.api import env, local, task\n\nenv.user = 'poletaev'\nenv.hosts = ['zenwalker.me']\n\nEXCLUDE_FILES = {\n 'drafts',\n 'cv-ru.txt',\n 'cv-en.txt',\n 'wrap.py',\n}\n\n\n@task\ndef deploy():\n local('make')\n rsync_project(local_dir='build/', remote_dir='www/zenwalker.ru',\n exclude=EXCLUDE_FILES, delete=True)\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"376817000","text":"#coding=utf-8\r\n#Version:python3.6.0\r\n#Tools:Pycharm 2017.3.2\r\n# Author:LIKUNHONG\r\n__date__ = '2018/12/29 15:34'\r\n__author__ = 'likunkun'\r\n\r\n#服务器端\r\nimport random\r\nimport socket\r\nimport time\r\n# 定义类型变量\r\nserver = socket.socket()\r\n#监听哪个端口,绑定端口\r\nserver.bind(('localhost', 6969))\r\n# 监听\r\nserver.listen(5) #最大同时接入5个连接\r\nwhile True:\r\n conn, addr = server.accept()\r\n count = 0;\r\n while True:\r\n temperature = random.randint(-40,40)\r\n conn.send(bytes(str(temperature).encode()))\r\n count += 1\r\n time.sleep(1)\r\n\r\nserver.close()\r\n","sub_path":"curriculum_design/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"265401629","text":"import argparse\nimport torch\nimport os\nimport numpy as np\nimport pandas as pd\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv\n\nfrom datasets import get_citation_dataset\nfrom train_eval import run\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str)\nparser.add_argument('--gpu-no', type=int, default=0)\nparser.add_argument('--random_splits', type=bool, default=False)\nparser.add_argument('--runs', type=int, default=20)\nparser.add_argument('--epochs', type=int, default=1000)\nparser.add_argument('--lr', type=float, default=0.01)\nparser.add_argument('--weight_decay', type=float, default=0.0005)\nparser.add_argument('--early_stopping', type=int, default=200)\nparser.add_argument('--hidden', type=int, default=64)\nparser.add_argument('--dropout', type=float, default=0.5)\nparser.add_argument('--alpha', type=float, default=0.1)\nparser.add_argument('--recache', action=\"store_true\", help=\"clean up the old adj data\", default=True)\nparser.add_argument('--normalize-features', action=\"store_true\", default=True)\nparser.add_argument('--adj-type', type=str, default='or')\n\nargs = parser.parse_args()\n\nclass Net(torch.nn.Module):\n def __init__(self, dataset, cached=True):\n super(Net, self).__init__()\n self.conv1 = GCNConv(dataset.num_features, args.hidden)\n self.conv2 = GCNConv(args.hidden, dataset.num_classes)\n\n def reset_parameters(self):\n self.conv1.reset_parameters()\n self.conv2.reset_parameters()\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x = F.relu(self.conv1(x, edge_index))\n x = F.dropout(x, p=args.dropout, training=self.training)\n x = self.conv2(x, edge_index)\n \n return F.log_softmax(x, dim=1)\n\ndef run_gcn(dataset,gpu_no):\n dataset = get_citation_dataset(dataset, args.alpha, args.recache, args.normalize_features, args.adj_type)\n print(\"Num of edges \",dataset[0].num_edges)\n val_loss, test_acc, test_std, time = run(dataset, gpu_no, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay,\n args.early_stopping)\n return val_loss, test_acc, test_std, time\n\nif __name__ == '__main__':\n if args.dataset is not None:\n dataset_name = [args.dataset]\n else:\n dataset_name = ['cora_ml','citeseer']\n outputs = ['val_loss', 'test_acc', 'test_std', 'time'] \n result = pd.DataFrame(np.arange(len(outputs)*len(dataset_name), dtype=np.float32).reshape(\n (len(dataset_name), len(outputs))), index=dataset_name, columns=outputs)\n for dataset in dataset_name:\n val_loss, test_acc, test_std, time = run_gcn(dataset,args.gpu_no)\n result.loc[dataset]['val_loss'] = val_loss\n result.loc[dataset]['test_acc'] = test_acc\n result.loc[dataset]['test_std'] = test_std\n result.loc[dataset]['time'] = time","sub_path":"code/gcn.py","file_name":"gcn.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"51826283","text":"\"\"\"\nCommon Functions to haproxy_health\nfaciliates querying haproxy stats socket\n\"\"\"\nimport socket\nfrom cgi import escape\nfrom cStringIO import StringIO\nfrom time import time\n\n\nclass HAProxyStats(object):\n \"\"\"\n Used for communicating with HAProxy through its local UNIX socket interface.\n \"\"\"\n HAPROXY_LISTENER = '/var/run/haproxy.sock'\n INT_FIELDS = ['slim', 'scur', 'type', 'act', 'bck']\n\n def __init__(self, socket_name=HAPROXY_LISTENER):\n \"\"\"\n constructor for HAProxyStats\n @param self.socket_name - path to haproxy socket to use\n @param self.raw_stats - raw string of stats output from haproxy socket\n @param self.stats - list of dictionaries of parsed stats\n \"\"\"\n self.socket_name = socket_name\n self.raw_stats = None\n self.stats = []\n self.frontends = []\n self.backends = []\n self.update_stats()\n\n def execute(self, command, extra=\"\", timeout=5):\n \"\"\"\n Executes a HAProxy command by sending a message to a HAProxy's local\n UNIX socket and waiting up to 'timeout' milliseconds for the response.\n \"\"\"\n if extra:\n command = command + ' ' + extra\n\n buff = StringIO()\n end = time() + timeout\n\n client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n\n try:\n client.connect(self.socket_name)\n client.send(command + '\\n')\n\n while time() <= end:\n data = client.recv(4096)\n if data:\n buff.write(data)\n else:\n return buff.getvalue()\n except Exception:\n raise\n finally:\n client.close()\n\n def get_stats(self):\n \"\"\"\n return list of stats from haproxy socket\n will attempt to update stats list if empty\n \"\"\"\n if len(self.stats) == 0:\n self.update_stats()\n return self.stats\n\n def update_stats(self):\n \"\"\"\n query HAProxy socket and update self.stats,\n self.frontends, and self.backends lists\n \"\"\"\n # query all statistics from haproxy socket\n query = '''show stat -1 -1 -1'''\n try:\n # filter empty lines and split on line breaks\n self.raw_stats = filter(None, self.execute(query).split('\\n'))\n # unable to query stats\n except socket.error:\n raise\n\n # reset list of stats\n self.stats = []\n for line in range(len(self.raw_stats)):\n # first line is headers\n if line == 0:\n # drop '' entries as split() adds an empty string to the end\n headers = filter(None, self.raw_stats[line].split(','))\n # remove haproxy comment + whitespace from first line\n headers[0] = headers[0].strip('#').strip()\n else:\n # drop last entry as split() adds an empty string to the end\n stat_row = self.raw_stats[line].split(',')[0:-1]\n stat_dict = dict(zip(headers, stat_row))\n\n # transform all integer fields\n for key in stat_dict.keys():\n if key in self.INT_FIELDS:\n if stat_dict[key] == '':\n stat_dict[key] = 0\n else:\n stat_dict[key] = int(stat_dict[key])\n self.stats.append(stat_dict)\n # update frontends list\n self.update_fe()\n # update backends list\n self.update_be()\n\n def update_fe(self):\n \"\"\"\n updates self.frontends list\n \"\"\"\n self.frontends = []\n for stat in self.get_stats():\n if stat['svname'] == 'FRONTEND':\n if stat['pxname'] != 'stats':\n self.frontends.append(stat['pxname'])\n\n def update_be(self):\n \"\"\"\n updates self.backends list\n \"\"\"\n self.backends = []\n for stat in self.get_stats():\n if stat['svname'] == 'BACKEND':\n if stat['pxname'] != 'stats':\n self.backends.append(stat['pxname'])\n","sub_path":"haproxy/HAProxyStats.py","file_name":"HAProxyStats.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"445902322","text":"import json\nimport pandas as pd\nimport time\nfrom pprint import pprint\nimport glob\nfrom PIL import Image\nimport pytest\n\nfrom conftest import ghost\nfrom data.app_data import htaccess\nfrom test_EFL.test_EFL import filters\n\ndev = 'efl-dev.smashedmedia.guru'\nprod = 'https://endlessfrontierlabs.com/'\nnew_mentors = [\n 'David Sica',\n 'Peter Pfeiffer',\n 'Oliver Mitchell',\n 'Nasir Memon',\n 'Brian Hirsch',\n 'Nikhil Gupta',\n 'Brenton Fargnoli',\n 'Nicole McKnight'\n]\n\n# def test_mentors_grid_Mentors_inner_Pages(app,mdb):\n# app.open(dev)\n# app.efl.menu_go_to('Mentors')\n# grid = app.efl.work_items_grid()\n# mentors1 = [app.efl.mentor_info(mentor) for mentor in grid]\n# print(json.dumps(mentors1, indent=4))\n\n # edb = mdb.db('efl')\n # collection = edb.collection['mentors']\n # collection.insert_many(mentors1)\n # #\n # mentors = ([u for u in collection.find()])\n # pprint(json.loads())\n # df2 = pd.DataFrame(mentors, columns=['name','info'])\n # print(df2)\n\ndef test_Mentors_inner_PopUPs(app):\n app.open(htaccess +dev)\n app.efl.menu_go_to('Mentors')\n # footer = app.driver.find_element_by_css_selector('#footer-outer')\n app.fullpage_screenshot('mentors.png',scroll_delay=3)\n grid = app.efl.work_items_grid()\n grid.reverse()\n time.sleep(3)\n for mentor in grid:\n app.scroll(mentor)\n time.sleep(1)\n mentor.click()\n opened = app.efl.mentor_pop_up_info()\n app.driver.get_screenshot_as_file('{}.png'.format(opened['name']))\n print(\"Catch \"+ opened['name'])\n time.sleep(2)\n app.driver.find_element_by_css_selector('.mfp-close').click()\n # with open('{}.png'.format(opened['name']), 'rb') as file:\n # img = Image.open(file)\n # img.show()\n\n# def test_new_added_Mentors(ghost):\n# ghost.open(prod)\n# ghost.efl.menu_go_to('Mentors')\n# ghost.fullpage_screenshot('mentors.png', scroll_delay=3)\n# grid = ghost.efl.work_items_grid()\n# grid.reverse()\n# for mentor in grid:\n# ghost.scroll(mentor)\n# men = ghost.efl.mentor_info(mentor)\n# if men['name'] in [new_mentors]:\n# print(men['name'])\n # if men['name'] in [new_mentors]:\n # mentor.click()\n # opened = app.efl.mentor_pop_up_info()\n # app.driver.get_screenshot_as_file('{}.png'.format(opened['name']))\n# def test_opened_images():\n# images = glob.glob(\"test_EFL/*.png\")\n# for image in images:\n# with open(image, 'rb') as file:\n# img = Image.open(file)\n# img.show()\n\n\n\n# @pytest.mark.parametrize('filter', filters)\n# def test_filters_mentor_grid(app, filter):\n# app.open(dev + '/mentors/')\n# app.driver.find_element_by_css_selector('[data-filter=\"{}\"]'.format(filter)).click()\n# time.sleep(2)\n# grid = app.efl.work_items_grid()\n# for mentor in grid:\n# time.sleep(1)\n# team = app.efl.mentor_info(mentor)\n# pprint(team)\n# app.driver.find_element_by_link_text('All').click()","sub_path":"test_EFL/test_Mentors.py","file_name":"test_Mentors.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"240113040","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras import preprocessing\nfrom tensorflow.keras.models import Model, load_model\n\nclass IntentModel:\n def __init__(self, model_name, proprocess):\n self.labels= {0:\"인사\",1:\"욕설\",2:\"주문\",3:\"예약\",4:\"기타\"} #의도분류딕셔너리\n self.model = load_model(model_name)\n\n self.p = proprocess\n\n def predict_class(self, query):\n pos = self.p.pos(query)\n keywords= self.p.get_keywords(pos, without_tag = True)\n sequences = [self.p.get_wordidx_sequence(keywords)]\n\n from GlobalParams import MAX_SEQ_LEN\n\n padded_seqs = preprocessing.sequence.pad_sequences(sequences, maxlen = MAX_SEQ_LEN, padding = 'post')\n predict = self.model.predict(padded_seqs)\n predict_class = tf.math.argmax(predict, axis = 1)\n return predict_class.numpy()[0]","sub_path":"data_visualization/deeplearning/chatbot/test/IntentModel.py","file_name":"IntentModel.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219139058","text":"#Name: Izabella Wieckowska\n#Date: 03 Oct 2017\n#THis program makes a 5 number loop.\n\nimport turtle\nhelpme = turtle.Turtle()\n\nfor i in range(5):\n number = int(input(\"Please enter a number\"))\n helpme.forward(number)\n helpme.left(90)\n \n\n","sub_path":"5numloop.py","file_name":"5numloop.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"152153409","text":"from rest_framework import status\nfrom rest_framework.decorators import api_view\n\nfrom calendar_prt.models import *\nfrom .serializers import *\nfrom django.http import Http404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\n@api_view(['GET'])\ndef monthPullList(request):\n pull = MonthPull.objects.all()\n pull = MonthPullSerializer(pull, many=True)\n return Response(pull.data)\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef monthRequestDetails(request, id):\n global cur_request\n try:\n cur_request = MonthPull.objects.get(id=id)\n except cur_request.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = MonthPullSerializer(cur_request)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = MonthPullSerializer(cur_request, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n raise Http404\n\n\n","sub_path":"calendar_prt/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"190251975","text":"textHash = {}\r\ntextList = []\r\nignore_list = [';', '.', ',', '\\\"']\r\n# Make sure string doesn't contain non characters (without use of regex)\r\ndef stringCheck(input_string):\r\n check = input_string.lower()\r\n for item in ignore_list:\r\n if item in check:\r\n check = check.replace(item, '')\r\n else:\r\n pass\r\n return check\r\n# Return index of tuple\r\ndef tReturn(a):\r\n return a[1]\r\ndef tReturnVal(a):\r\n return a[0]\r\n# Convert dict to a list of tuples and sort\r\n# This only works because python SORT method is stable\r\ndef sortHash(_dict):\r\n # Initialize temp variables\r\n tempList = []\r\n tempDict = _dict\r\n # Create list of tuples from dictionary using (key, value)\r\n for item in tempDict:\r\n tempList.append((item, tempDict[item]))\r\n # Sort first using key (sorts everything alphabetically)\r\n tempList.sort(key=tReturnVal)\r\n # Sort second using value (sorts everything from greatest to least value)\r\n tempList.sort(key=tReturn, reverse=True)\r\n # Return list\r\n return tempList\r\n\r\n# Write text file to list\r\nwith open('input.txt') as f:\r\n for line in f:\r\n for word in line.split(\" \"):\r\n textList.append(stringCheck(word))\r\n\r\n# Remove non-ascii quotes from beginning and ending of string\r\ntextList[0] = textList[0][3:]\r\ntextList[len(textList)-1] = textList[len(textList)-1][:-5]\r\n\r\n# Find occurence of each word\r\nfor item in textList:\r\n if item in textHash:\r\n textHash[item] = textHash[item] + 1\r\n else:\r\n textHash[item] = 1\r\n\r\n# Output result to text file\r\nwith open('output.txt', 'w') as f:\r\n for item in sortHash(textHash):\r\n f.write(tReturnVal(item) + \" \" + str(tReturn(item)) + '\\n')\r\n","sub_path":"camblyHash.py","file_name":"camblyHash.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"423098607","text":"# 2.0 changes\n# added option to create Trace.db file directly\n# changed name to \"Trace.db creator\"\n\n\nimport tkinter as ttk\nimport datetime\nimport os.path\nimport sqlite3\n\nclass MyApplication:\n def __init__(self, root):\n # creates and configures the main window\n self.root = root\n root.title(\"Trace.db creator 2.0\")\n root.minsize(width=200, height=100)\n self.create_GUI()\n\n\n def create_GUI(self):\n # initialize app variables and add the GUI elements\n # initialize the app variables\n self.index = ttk.IntVar()\n self.index.set(1)\n self.origin = ttk.StringVar()\n self.origin.set(\"46.778672, 23.602098\")\n self.destination = ttk.StringVar()\n self.destination.set(\"46.754476, 23.550568\")\n self.time_span = ttk.IntVar()\n self.time_span.set(7)\n\n # add labels, entry fields and buttons here\n ttk.Label(root, text=\"Index\").grid(row=2, column=2)\n ttk.Entry(root, textvariable=self.index).grid(row=2, column=4)\n ttk.Label(root, text=\"Origin\").grid(row=4, column=2)\n ttk.Entry(root, textvariable=self.origin).grid(row=4, column=4)\n ttk.Label(root, text=\"Destination\").grid(row=6, column=2)\n ttk.Entry(root, textvariable=self.destination).grid(row=6, column=4)\n ttk.Label(root, text=\"How many days?\").grid(row=8, column=2)\n ttk.Entry(root, textvariable=self.time_span).grid(row=8, column=4)\n\n ttk.Button(root, text=\"TXT with SQL\", command=self.write_text_file).grid(row=10, column=2)\n ttk.Button(root, text=\"TRACE.DB\", command=self.write_db_file).grid(row=10, column=4)\n \n\n def generate_sql_commands(self):\n # get entry field values, generate commands and return a list of sql commands\n sql_strings = []\n num = self.index.get()\n orig = self.origin.get().split(',')\n dest = self.destination.get().split(',')\n days = self.time_span.get()\n start_time = datetime.datetime.now() - datetime.timedelta(days=days)\n\n for i in range (0,days):\n # print(start_time)\n temp_time = start_time\n for i in range (0,2):\n # print(temp_time)\n for i in range(0, 4):\n if num % 4 == 1:\n rtype = ''\n sql = 'INSERT INTO item VALUES(\\'{0}\\',\\'{{\\\"lon\\\":{1},\\\"lat\\\":{2}}}\\',\\'{3}\\',\\'{4}\\')'\\\n .format(num, orig[1].lstrip(), orig[0], rtype, temp_time)\n elif num % 4 == 2:\n rtype = 'START'\n sql = 'INSERT INTO item VALUES(\\'{0}\\',\\'{{\\\"lon\\\":{1},\\\"lat\\\":{2}}}\\',\\'{3}\\',\\'{4}\\')'\\\n .format(num, orig[1].lstrip(), orig[0], rtype, temp_time)\n elif num % 4 == 3:\n rtype = 'STOP'\n sql = 'INSERT INTO item VALUES(\\'{0}\\',\\'{{\\\"lon\\\":{1},\\\"lat\\\":{2}}}\\',\\'{3}\\',\\'{4}\\')'\\\n .format(num, dest[1].lstrip(), dest[0], rtype, temp_time)\n elif num % 4 == 0:\n rtype = ''\n sql = 'INSERT INTO item VALUES(\\'{0}\\',\\'{{\\\"lon\\\":{1},\\\"lat\\\":{2}}}\\',\\'{3}\\',\\'{4}\\')'\\\n .format(num, dest[1].lstrip(), dest[0], rtype, temp_time)\n sql_strings.append(sql)\n temp_time = temp_time + datetime.timedelta(hours=1)\n num += 1\n start_time = start_time + datetime.timedelta(days=1)\n\n print(\"Success: SQL commands were generated\")\n return(sql_strings)\n\n\n def create_trace_db(self):\n # Creates the empty database with the correct structure (tables, fields, field types)\n try:\n with sqlite3.connect(\"trace.db\") as connection:\n c = connection.cursor()\n c.execute(\"PRAGMA page_size = 4096\") # page size needs to be set before auto_vacuum; leave it here \n c.execute(\"PRAGMA auto_vacuum = FULL\")\n c.execute(\"PRAGMA user_version = 1\")\n c.execute(\"CREATE TABLE android_metadata(locale TEXT)\")\n c.execute(\"INSERT INTO android_metadata VALUES ('en_US')\")\n c.execute(\"CREATE TABLE item (id INTEGER PRIMARY KEY AUTOINCREMENT, location varchar NOT NULL, \\\n tag varchar, time TIMESTAMP DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f', 'NOW')))\")\n print(\"Success: Empty trace.db created\")\n except sqlite3.OperationalError:\n print(sqlite3.OperationalError)\n\n\n def write_text_file(self):\n # input list of SQL commands and adds them to the TXT file\n with open(\"sql.txt\",'a') as fn:\n for s in self.generate_sql_commands():\n fn.writelines(\"{0};\\n\".format(s))\n\n\n def write_db_file(self):\n # input list of SQL commands and adds the items in the trace.db file\n self.create_trace_db()\n try:\n with sqlite3.connect(\"trace.db\") as connection:\n c = connection.cursor()\n for s in self.generate_sql_commands():\n c.execute(s)\n print(\"Success: SQL commands executed\")\n except sqlite3.OperationalError:\n print(\"Failure: File already exists\")\n\n\n\nif __name__ == '__main__':\n root = ttk.Tk() # create the top level window\n MyApplication(root) # create a MyApplication object and pass the top level window as argument\n root.mainloop() # launch the top level window","sub_path":"Trace.db creator 2.0.py","file_name":"Trace.db creator 2.0.py","file_ext":"py","file_size_in_byte":5540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"191104740","text":"import ast\nimport subprocess\n\nfrom ObjectDetector.detector.DetectionEvent import DetectionEvent\nfrom ObjectDetector.Utilities.DatabaseHandler import DetectionDatabaseHandler\n\n\nclass DetectionReviewerWindowModel:\n\n @staticmethod\n def open_file_in_explorer(file_path):\n subprocess.check_call(['nautilus', '--', file_path])\n\n @staticmethod\n def get_detections_from_database():\n handler = DetectionDatabaseHandler()\n\n keys, detections = handler.select_all_detections()\n array_of_detection_events = []\n for x in detections:\n array_of_detection_events.append(\n DetectionEvent(ast.literal_eval(x[keys[1]]), x[keys[2]], x[keys[3]],\n x[keys[4]], x[keys[5]],\n x[keys[6]], x[keys[0]]))\n handler.close_connection()\n return array_of_detection_events\n\n @staticmethod\n def delete_detection(id):\n handler = DetectionDatabaseHandler()\n handler.delete_detection(id)\n","sub_path":"ObjectDetector/UserInterface/Model/DetectionReviewerWindowModel.py","file_name":"DetectionReviewerWindowModel.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"229412506","text":"from collections import OrderedDict\n\nimport numpy as np\nimport torch\n\nimport rlkit.torch.pytorch_util as ptu\nfrom rlkit.misc.eval_util import create_stats_ordered_dict\nfrom rlkit.state_distance.tdm import TemporalDifferenceModel\nfrom rlkit.torch.ddpg.ddpg import DDPG\n\n\nclass TdmDdpg(TemporalDifferenceModel, DDPG):\n def __init__(\n self,\n env,\n qf,\n exploration_policy,\n ddpg_kwargs,\n tdm_kwargs,\n base_kwargs,\n policy=None,\n replay_buffer=None,\n ):\n DDPG.__init__(\n self,\n env=env,\n qf=qf,\n policy=policy,\n exploration_policy=exploration_policy,\n replay_buffer=replay_buffer,\n **ddpg_kwargs,\n **base_kwargs\n )\n super().__init__(**tdm_kwargs)\n # Not supporting these in this implementation\n assert self.qf_weight_decay == 0\n assert self.residual_gradient_weight == 0\n\n def _do_training(self):\n batch = self.get_batch()\n rewards = batch['rewards']\n terminals = batch['terminals']\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n goals = batch['goals']\n num_steps_left = batch['num_steps_left']\n\n \"\"\"\n Policy operations.\n \"\"\"\n policy_actions, pre_tanh_value = self.policy(\n obs, goals, num_steps_left, return_preactivations=True,\n )\n pre_activation_policy_loss = (\n (pre_tanh_value**2).sum(dim=1).mean()\n )\n q_output = self.qf(\n observations=obs,\n actions=policy_actions,\n num_steps_left=num_steps_left,\n goals=goals,\n )\n raw_policy_loss = - q_output.mean()\n policy_loss = (\n raw_policy_loss +\n pre_activation_policy_loss * self.policy_pre_activation_weight\n )\n\n \"\"\"\n Critic operations.\n \"\"\"\n next_actions = self.target_policy(\n observations=next_obs,\n goals=goals,\n num_steps_left=num_steps_left-1,\n )\n # speed up computation by not backpropping these gradients\n next_actions.detach()\n target_q_values = self.target_qf(\n observations=next_obs,\n actions=next_actions,\n goals=goals,\n num_steps_left=num_steps_left-1,\n )\n q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values\n q_target = q_target.detach()\n if self.reward_type == 'indicator':\n q_target = torch.clamp(\n q_target,\n -self.reward_scale/(1-self.discount),\n 0\n )\n q_pred = self.qf(\n observations=obs,\n actions=actions,\n goals=goals,\n num_steps_left=num_steps_left,\n )\n if self.reward_type == 'distance' and self.tdm_normalizer:\n q_pred = self.tdm_normalizer.distance_normalizer.normalize_scale(\n q_pred\n )\n q_target = self.tdm_normalizer.distance_normalizer.normalize_scale(\n q_target\n )\n bellman_errors = (q_pred - q_target) ** 2\n qf_loss = self.qf_criterion(q_pred, q_target)\n\n \"\"\"\n Update Networks\n \"\"\"\n self.policy_optimizer.zero_grad()\n policy_loss.backward()\n self.policy_optimizer.step()\n\n self.qf_optimizer.zero_grad()\n qf_loss.backward()\n self.qf_optimizer.step()\n\n self._update_target_networks()\n\n if self.need_to_update_eval_statistics:\n self.need_to_update_eval_statistics = False\n self.eval_statistics['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))\n self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(\n policy_loss\n ))\n self.eval_statistics['Raw Policy Loss'] = np.mean(ptu.get_numpy(\n raw_policy_loss\n ))\n self.eval_statistics['Preactivation Policy Loss'] = (\n self.eval_statistics['Policy Loss'] -\n self.eval_statistics['Raw Policy Loss']\n )\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q Predictions',\n ptu.get_numpy(q_pred),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Q Targets',\n ptu.get_numpy(q_target),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Bellman Errors',\n ptu.get_numpy(bellman_errors),\n ))\n self.eval_statistics.update(create_stats_ordered_dict(\n 'Policy Action',\n ptu.get_numpy(policy_actions),\n ))\n\n def evaluate(self, epoch):\n DDPG.evaluate(self, epoch)\n\n def pretrain(self):\n super().pretrain()\n if self.qf.tdm_normalizer is not None:\n self.target_qf.tdm_normalizer.copy_stats(\n self.qf.tdm_normalizer\n )\n self.target_policy.tdm_normalizer.copy_stats(\n self.qf.tdm_normalizer\n )\n","sub_path":"rlkit/state_distance/tdm_ddpg.py","file_name":"tdm_ddpg.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"455422608","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('toggle_bot', views.toggle_bot, name='toggle_bot'),\n path('collect',views.collect,name='collect'),\n path('set_expo',views.set_expo,name='set_exp'),\n path('farming',views.farming,name='farming')\n\n]\n","sub_path":"ogamu/mybot/bot/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226416613","text":"# --완전 탐색--\n# 도시에 있는 치킨집 중 M개 고르고, => 조합 이용\n# M개를 어떻게 골라야 도시의 치킨 거리가 최소가 될지\nfrom itertools import combinations\n\nn, m = (map(int, input().split()))\nchicken, house = [], []\n\nfor r in range(n): # n개의 줄에 도시의 정보 입력 받음\n data = list(map(int, input().split()))\n for c in range(n):\n if data[c] == 1:\n house.append((r, c))\n elif data[c] == 2:\n chicken.append((r, c))\n\n# 모든 치킨집 중 m개 뽑는 조합 계산\ncandidiates = list(combinations(chicken, m))\n\n\ndef get_sum(candidate): # 도시의 치킨 거리(=치킨 거리의 합) 계산 함수\n result = 0\n for hx, hy in house:\n # 가장 가까운 치킨집 찾기\n tmp = 1e9\n for cx, cy in candidate:\n tmp = min(tmp, abs(hx-cx)+abs(hy-cy))\n result += tmp\n return result\n\n\n# 도시의 치킨거리 최솟값 구하기\nresult = 1e9\nfor candidiate in candidiates:\n result = min(result, get_sum(candidate))\n\nprint(result)\n","sub_path":"04implementation/BaekJoon/15686.py","file_name":"15686.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"287465353","text":"import discord\nfrom decouple import config\nfrom commands import *\nfrom commands.scripts import *\n\nloader.db_load() # loads database\nloader.moderator_load() # loads moderator\nloader.poll_load() # loads poll\nloader.client_load() # loads client\n\ndb = loader.db_loaded()\nmoderator = loader.moderator_loaded()\npoll = loader.poll_loaded()\nclient = loader.client_loaded()\n\n\n@client.event\nasync def on_ready():\n print(\"\\nWe have logged in as {0.user}\".format(client))\n\n # sets roBOT's status to 'Listening to _help'\n activity = discord.Activity(type=discord.ActivityType.listening, name=\"_help\")\n await client.change_presence(status=discord.Status.online, activity=activity)\n\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n if message.content.startswith(\"_\"):\n # if the command message is a single word message, takes the whole message\n if commands.get(message.content[: message.content.find(\" \")]) == None:\n command = commands.get(message.content)\n # if the command message is a multi word message, takes only the 1st word\n else:\n command = commands.get(message.content[: message.content.find(\" \")])\n\n exec(str(await eval(command + \"(discord, message)\")))\n await db.score_up(message, client) # levels up the author of the message\n\n elif message.content.startswith(\"$\"):\n if message.author.guild_permissions.administrator:\n if commands.get(message.content[: message.content.find(\" \")]) == None:\n command = commands.get(message.content)\n else:\n command = commands.get(message.content[: message.content.find(\" \")])\n\n exec(str(await eval(command + \"(message)\")))\n await db.score_up(message, client)\n\n else: # message author doesn't have admin rights\n await message.channel.send(\n \"<@\" + str(message.author.id) + \"> Do you've admin rights?\"\n )\n\n # check if the server is configured for moderation\n if await db.check_server_moderation(message.guild.id) == 1:\n # checks the words of the message to moderate\n await moderator.check(message)\n\n\ncommands = {\n \"_hi\": \"hi_contrib.hi\",\n \"_contribute\": \"hi_contrib.contrib\",\n \"_covrep\": \"api_commands.covrep\",\n \"_f\": \"api_commands.f\",\n \"_movie\": \"api_commands.movie\",\n \"_song\": \"api_commands.song\",\n \"_search\": \"wiki_search.search\",\n \"_wiki\": \"wiki_search.wikipedia\",\n \"_math\": \"api_commands.math\",\n \"_mean\": \"api_commands.mean\",\n \"_wea\": \"api_commands.wea\",\n \"_inspire\": \"api_commands.inspire\",\n \"_poll\": \"poll._create_poll\",\n \"_rolldice\": \"games.roll_a_dice\",\n \"_tosscoin\": \"games.toss_coin\",\n \"_help\": \"help.help\",\n \"_confess\": \"confess_rank.confess\",\n \"_rank\": \"confess_rank.rank\",\n \"_joke\": \"api_commands.joke\",\n \"_wearesoftwareengineers\": \"api_commands.programming_joke\",\n \"_meme\": \"api_commands.meme\",\n \"_trivia\": \"api_commands.trivia\",\n \"_ptrivia\": \"api_commands.ptrivia\",\n \"$clean\": \"admin_commands.clean\",\n \"$moderation\": \"admin_commands.moderation\",\n \"$configure\": \"admin_commands.configure\",\n \"$deconfigure\": \"admin_commands.deconfigure\",\n \"$leave\": \"admin_commands.leave\",\n \"$mute\": \"admin_commands.mute\",\n \"$unmute\": \"admin_commands.unmute\",\n \"$kick\": \"admin_commands.kick\",\n \"$configconfess\": \"admin_commands.configconfess\",\n \"$deconfigconfess\": \"admin_commands.deconfigconfess\",\n}\n\n\nDISCORD_TOKEN = config(\"TOKEN\")\nclient.run(DISCORD_TOKEN)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"242201278","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"listing/\", views.single_listing_view, name=\"single_listing_view\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n\n path(\"profile\", views.profile_view, name=\"profile\"),\n path(\"profile/edit\", views.profile_edit, name=\"profile_edit\"),\n path(\"profile/change/email\", views.change_email, name=\"change_email\"),\n path(\"profile/change/password\", views.change_password, name=\"change_password\"),\n path(\"profile/view/mylistings\", views.listings_view, name=\"listings_view\"),\n path(\"profile/view/mylistings/delete\", views.listing_delete, name=\"listing_delete\"),\n path(\"profile/view/mylistings/deactivate\", views.listing_deactivate, name=\"listing_deactivate\"),\n path(\"profile/view/mylistings/activate\", views.listing_activate, name=\"listing_activate\"),\n path(\"profile/view/mylistings/end\", views.listing_end, name=\"listing_end\"),\n\n path(\"watchlist/add\", views.add_to_watchlist, name=\"add_to_watchlist\"),\n path(\"watchlist/remove\", views.remove_from_watchlist, name=\"remove_from_watchlist\"),\n path(\"watchlist/browse\", views.watchlist_view, name=\"watchlist_view\"),\n\n path(\"add\", views.add_listing, name=\"add_listing\"),\n\n path(\"category\", views.categories_view, name=\"categories_view\"),\n path(\"category/browse\", views.browse_listings_category, name=\"browse_listings_category\"),\n\n path(\"comment/reply\", views.add_reply, name=\"add_reply\"),\n path(\"comment/add\", views.add_comment, name=\"add_comment\"),\n path(\"bid/add\", views.add_bid, name=\"add_bid\")\n]","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"163731776","text":"import logging\nfrom typing import TYPE_CHECKING, Type\n\nfrom django.conf import settings\nfrom django.http import JsonResponse\nfrom rest_framework import status\nfrom rest_framework.views import APIView\n\nfrom conf.authentication import HawkOnlyAuthentication\nfrom mail import icms_serializers\nfrom mail.enums import ChiefSystemEnum, LicenceActionEnum, LicenceTypeEnum, ReceptionStatusEnum\nfrom mail.models import LicenceData, LicenceIdMapping, LicencePayload, Mail\nfrom mail.serializers import LiteLicenceDataSerializer, MailSerializer\nfrom mail.tasks import send_licence_data_to_hmrc\n\nif TYPE_CHECKING:\n from rest_framework.serializers import Serializer # noqa\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass LicenceDataIngestView(APIView):\n authentication_classes = (HawkOnlyAuthentication,)\n\n def post(self, request):\n try:\n data = request.data[\"licence\"]\n except KeyError:\n errors = [{\"licence\": \"This field is required.\"}]\n logger.error(\n \"Failed to create licence data for %s due to %s\",\n request.data,\n errors,\n )\n return JsonResponse(status=status.HTTP_400_BAD_REQUEST, data={\"errors\": errors})\n\n serializer_cls = self.get_serializer_cls(data[\"type\"])\n serializer = serializer_cls(data=data)\n\n if not serializer.is_valid():\n errors = [{\"licence\": serializer.errors}]\n logger.error(\n \"Failed to create licence data for %s due to %s\",\n data,\n errors,\n )\n return JsonResponse(status=status.HTTP_400_BAD_REQUEST, data={\"errors\": errors})\n\n if data[\"action\"] == LicenceActionEnum.UPDATE:\n data[\"old_reference\"] = LicenceIdMapping.objects.get(lite_id=data[\"old_id\"]).reference\n else:\n data.pop(\"old_id\", None)\n\n licence, created = LicencePayload.objects.get_or_create(\n lite_id=data[\"id\"],\n reference=data[\"reference\"],\n action=data[\"action\"],\n old_lite_id=data.get(\"old_id\"),\n old_reference=data.get(\"old_reference\"),\n skip_process=False,\n defaults=dict(\n lite_id=data[\"id\"],\n reference=data[\"reference\"],\n data=data,\n old_lite_id=data.get(\"old_id\"),\n old_reference=data.get(\"old_reference\"),\n ),\n )\n\n logger.info(\"Created LicencePayload [%s, %s, %s]\", licence.lite_id, licence.reference, licence.action)\n\n return JsonResponse(\n status=status.HTTP_201_CREATED if created else status.HTTP_200_OK,\n data={\"licence\": licence.data},\n )\n\n def get_serializer_cls(self, app_type: str) -> Type[\"Serializer\"]:\n if settings.CHIEF_SOURCE_SYSTEM == ChiefSystemEnum.ICMS:\n serializers = {\n LicenceTypeEnum.IMPORT_OIL: icms_serializers.FirearmOilLicenceDataSerializer,\n LicenceTypeEnum.IMPORT_DFL: icms_serializers.FirearmDflLicenceDataSerializer,\n LicenceTypeEnum.IMPORT_SIL: icms_serializers.FirearmSilLicenceDataSerializer,\n LicenceTypeEnum.IMPORT_SAN: icms_serializers.SanctionLicenceDataSerializer,\n }\n\n return serializers[app_type]\n\n return LiteLicenceDataSerializer\n\n\nclass SendLicenceUpdatesToHmrc(APIView):\n authentication_classes = (HawkOnlyAuthentication,)\n\n def get(self, _):\n \"\"\"Force the task of sending licence data to HMRC (I assume for testing?)\"\"\"\n\n success = send_licence_data_to_hmrc.now()\n if success:\n return JsonResponse({}, status=status.HTTP_200_OK)\n else:\n return JsonResponse({}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nclass SetAllToReplySent(APIView):\n \"\"\"Updates status of all emails to REPLY_SENT\"\"\"\n\n authentication_classes = (HawkOnlyAuthentication,)\n\n def get(self, _):\n Mail.objects.all().update(status=ReceptionStatusEnum.REPLY_SENT)\n return JsonResponse({}, status=status.HTTP_200_OK)\n\n\nclass Licence(APIView):\n authentication_classes = (HawkOnlyAuthentication,)\n\n def get(self, request):\n \"\"\"Fetch existing licence\"\"\"\n license_ref = request.GET.get(\"id\", \"\")\n\n matching_licences = LicenceData.objects.filter(licence_ids__contains=license_ref)\n matching_licences_count = matching_licences.count()\n\n if matching_licences_count > 1:\n logger.warning(\"Too many matches for licence '%s'\", license_ref)\n return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)\n\n elif matching_licences_count == 0:\n logger.warning(\"No matches for licence '%s'\", license_ref)\n return JsonResponse({}, status=status.HTTP_404_NOT_FOUND)\n\n # Return single matching licence\n mail = matching_licences.first().mail\n serializer = MailSerializer(mail)\n\n return JsonResponse(serializer.data)\n","sub_path":"mail/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"531905359","text":"from app import app, db\nfrom flask import render_template, redirect, url_for, flash\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom app.forms import UserInfoForm, PostForm, LoginForm\nfrom app.models import User, Post, Products\n\n\n@app.route('/')\ndef index():\n title = 'Coding Temple Flask'\n posts = Post.query.all()\n return render_template('index.html', title=title, posts=posts)\n\n\n@app.route('/')\ndef products():\n \n product_1 = {\n 'name': \"T-shirt\",\n 'price': 9.99,\n 'description': \"This is a blue shirt\"\n }\n product_2 = {\n 'name': \"Pants\",\n 'price': 19.99,\n 'description': \"This is yellow pants\"\n }\n my_products = [product_1, product_2]\n return render_template('products.html', products=my_products)\n\n\n\n@app.route('/signup', methods=[\"GET\", 'POST'])\ndef signup():\n signup_form = UserInfoForm()\n if signup_form.validate_on_submit():\n # Grab Data from form\n username = signup_form.username.data\n email = signup_form.email.data\n password = signup_form.password.data\n\n # Check if the username from the form already exists in the User table\n existing_user = User.query.filter_by(username=username).all()\n # If there is a user with that username message them asking them to try again\n if existing_user:\n # Flash a warning message\n flash(f'The username {username} is already registered. Please try again.', 'danger')\n # Redirect back to the register page\n return redirect(url_for('register'))\n\n # Create a new user instance\n new_user = User(username, email, password)\n # Add that user to the database\n db.session.add(new_user)\n db.session.commit()\n # Flash a success message thanking them for signing up\n flash(f'Thank you {username}, you have succesfully registered!', 'success')\n\n # Redirecting to the home page\n return redirect(url_for('index'))\n \n return render_template('signup.html', form=signup_form)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n # Grab data from form\n username = form.username.data\n password = form.password.data\n\n # Query our User table for a user with username\n user = User.query.filter_by(username=username).first()\n\n # Check if the user is None or if password is incorrect\n if user is None or not user.check_password(password):\n flash('Your username or password is incorrect', 'danger')\n return redirect(url_for('login'))\n \n login_user(user)\n\n flash(f'Welcome {user.username}. You have succesfully logged in.', 'success')\n\n return redirect(url_for('index'))\n \n\n return render_template('login.html', login_form=form)\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n@app.route('/createpost', methods=['GET', 'POST'])\n@login_required\ndef createpost():\n form = PostForm()\n if form.validate_on_submit():\n print('Hello')\n title = form.title.data\n content = form.content.data\n new_post = Post(title, content, current_user.id)\n db.session.add(new_post)\n db.session.commit()\n\n flash(f'The post {title} has been created.', 'primary')\n return redirect(url_for('index'))\n \n return render_template('createpost.html', form=form)\n\n\n@app.route('/my-account')\n@login_required\ndef my_account():\n return render_template('my_account.html')\n\n\n@app.route('/my-posts')\n@login_required\ndef my_posts():\n posts = current_user.posts\n return render_template('my_posts.html', posts=posts)\n\n@app.route('/my_cart')\n@login_required\ndef my_cart():\n # posts = current_user.posts\n return render_template('my_cart.html')\n\n\n@app.route('/posts/')\ndef post_detail(post_id):\n post = Post.query.get_or_404(post_id)\n return render_template('post_detail.html', post=post)\n\n\n@app.route('/posts//update', methods=['GET', 'POST'])\n@login_required\ndef post_update(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author.id != current_user.id:\n flash('That is not your post. You may only edit posts you have created.', 'danger')\n return redirect(url_for('my_posts'))\n form = PostForm()\n if form.validate_on_submit():\n new_title = form.title.data\n new_content = form.content.data\n print(new_title, new_content)\n post.title = new_title\n post.content = new_content\n db.session.commit()\n\n flash(f'{post.title} has been saved', 'success')\n return redirect(url_for('post_detail', post_id=post.id))\n\n return render_template('post_update.html', post=post, form=form)\n\n\n@app.route('/posts//delete', methods=['POST'])\n@login_required\ndef post_delete(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n flash('You can only delete your own posts', 'danger')\n return redirect(url_for('my_posts'))\n\n db.session.delete(post)\n db.session.commit()\n\n flash(f'{post.title} has been deleted', 'success')\n return redirect(url_for('my_posts'))","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"631340846","text":"import asyncio\nimport contextlib\nimport datetime\nimport enum\nimport json\nimport logging\nimport pathlib\nimport random\n\nimport discord\nimport yaml\n\nfrom typing import List, Optional, Union\n\nfrom yaml.parser import MarkedYAMLError\nfrom redbot.core import commands, Config\nfrom redbot.core.commands import BadArgument, Context\nfrom redbot.core.utils.predicates import MessagePredicate, ReactionPredicate\nfrom redbot.core.utils.menus import menu, DEFAULT_CONTROLS, close_menu, start_adding_reactions\n\nfrom redbot.core.utils.chat_formatting import (\n box,\n inline,\n italics, \n humanize_list, \n humanize_number,\n pagify\n)\n\nfrom .exceptions import (\n RequiredKeyError, \n RaffleError\n)\n\nfrom .enums import RaffleComponents\nfrom .parser import RaffleManager\nfrom .safety import RaffleSafeMember\nfrom .checks import now\nfrom .formatting import tick, cross\n\nfrom .helpers import (\n format_traceback,\n cleanup_code,\n validator,\n raffle_safe_member_scanner\n)\n\n\nwith open(pathlib.Path(__file__).parent / \"info.json\") as fp:\n __red_end_user_data_statement__ = json.load(fp)[\"end_user_data_statement\"]\n\n\nclass Raffle(commands.Cog):\n \"\"\"Create raffles for your server.\"\"\"\n\n __author__ = [\"Kreusada\"]\n __version__ = \"1.0.3\"\n\n def __init__(self, bot):\n self.bot = bot\n self.config = Config.get_conf(self, 583475034985340, force_registration=True)\n self.config.register_guild(raffles={})\n self.docs = \"https://kreusadacogs.readthedocs.io/en/latest/cog_raffle.html\"\n\n\n async def replenish_cache(self, ctx: Context) -> None:\n async with self.config.guild(ctx.guild).raffles() as r:\n\n updates = {}\n\n for k, v in list(r.items()):\n\n getter = v.get(\"owner\")\n if not ctx.guild.get_member(getter):\n del r[k]\n updates[\"owner\"] = True\n\n getter = v.get(\"entries\")\n for userid in getter:\n if not ctx.guild.get_member(userid):\n getter.remove(userid)\n updates[\"entries\"] = True\n\n getter = v.get(\"prevented_users\", None)\n if getter:\n for userid in getter:\n if not ctx.guild.get_member(userid):\n getter.remove(userid)\n updates[\"prevented_users\"] = True\n\n getter = v.get(\"allowed_users\", None)\n if getter:\n for userid in getter:\n if not ctx.guild.get_member(userid):\n getter.remove(userid)\n updates[\"allowed_users\"] = True\n\n getter = v.get(\"roles_needed_to_enter\", None)\n if getter:\n for roleid in getter:\n if not ctx.guild.get_role(roleid):\n getter.remove(roleid)\n updates[\"roles_needed_to_enter\"] = True\n\n return any([updates[x] for x in list(updates.keys())])\n\n\n def format_help_for_context(self, ctx: commands.Context) -> str:\n context = super().format_help_for_context(ctx)\n authors = humanize_list(self.__author__)\n fmtlink = lambda x, y: f\"[{x}]({y})\"\n docnote = f\"Please consider reading the {fmtlink('docs', self.docs)} if you haven't already.\\n\\n\"\n return f\"{context}\\n\\n{docnote}Author: {authors}\\nVersion: {self.__version__}\"\n\n\n async def red_delete_data_for_user(self, **kwargs):\n \"\"\"Nothing to delete\"\"\"\n return\n\n\n def cog_unload(self):\n with contextlib.suppress(Exception):\n self.bot.remove_dev_env_value(\"raffle\")\n\n\n async def initialize(self) -> None:\n if 719988449867989142 in self.bot.owner_ids:\n with contextlib.suppress(Exception):\n self.bot.add_dev_env_value(\"raffle\", lambda x: self)\n\n\n async def cog_check(self, ctx: commands.Context):\n return ctx.guild is not None\n\n\n async def compose_menu(self, ctx, embed_pages: List[discord.Embed]):\n if len(embed_pages) == 1:\n control = {\"\\N{CROSS MARK}\": close_menu}\n else:\n control = DEFAULT_CONTROLS\n return await menu(ctx, embed_pages, control)\n\n\n @commands.group()\n async def raffle(self, ctx: Context):\n \"\"\"Manage raffles for your server.\"\"\"\n\n\n @raffle.command()\n async def version(self, ctx: Context):\n \"\"\"Get the version of your Raffle cog.\"\"\"\n await ctx.send(inline(self.__version__))\n\n @raffle.command()\n async def docs(self, ctx: Context):\n \"\"\"Get a link to the docs.\"\"\"\n message = \"**Docs:** {0.docs}\".format(self)\n await ctx.send(message)\n\n @raffle.group()\n async def create(self, ctx: Context):\n \"\"\"Create a raffle.\"\"\"\n pass\n\n\n @create.command(name=\"complex\")\n async def _complex(self, ctx: Context):\n \"\"\"Create a raffle with complex conditions.\"\"\"\n await ctx.trigger_typing()\n check = lambda x: x.author == ctx.author and x.channel == ctx.channel\n message = (\n \"You're about to create a new raffle.\\n\"\n \"Please consider reading the docs about the various \"\n \"conditional blocks if you haven't already.\\n\\n\"\n + self.docs\n )\n\n message += \"\\n\\n**Conditions Blocks:**\" + box(\"\\n\".join(f\"+ {e.name}\" for e in RaffleComponents), lang=\"diff\") \n await ctx.send(message) \n\n\n try:\n content = await self.bot.wait_for(\"message\", timeout=500, check=check)\n except asyncio.TimeoutError:\n with contextlib.suppress(discord.NotFound):\n await message.delete()\n\n\n content = content.content\n valid = validator(cleanup_code(content))\n\n if not valid:\n return await ctx.send(\n cross(\"Please provide valid YAML. You can validate your raffle YAML using `{}raffle parse`.\").format(ctx.clean_prefix)\n )\n\n try:\n parser = RaffleManager(valid)\n parser.parser(ctx)\n except (RaffleError, BadArgument) as e:\n exc = cross(\"An exception occured whilst parsing your data.\")\n return await ctx.send(exc + format_traceback(e))\n\n\n async with self.config.guild(ctx.guild).raffles() as raffle:\n\n rafflename = valid.get(\"name\").lower()\n\n if rafflename in [x.lower() for x in raffle.keys()]:\n return await ctx.send(\"A raffle with this name already exists.\")\n\n data = {\n \"entries\": [],\n \"owner\": ctx.author.id,\n }\n\n conditions = {\n \"end_message\": valid.get(\"end_message\", None),\n \"account_age\": valid.get(\"account_age\", None),\n \"join_age\": valid.get(\"join_age\", None),\n \"roles_needed_to_enter\": valid.get(\"roles_needed_to_enter\", None),\n \"prevented_users\": valid.get(\"prevented_users\", None),\n \"allowed_users\": valid.get(\"allowed_users\", None),\n \"description\": valid.get(\"description\", None),\n \"maximum_entries\": valid.get(\"maximum_entries\", None),\n \"on_end_action\": valid.get(\"on_end_action\", None),\n }\n\n for k, v in conditions.items():\n if v:\n data[k] = v\n\n raffle[rafflename] = data\n await ctx.send(tick(\"Raffle created with the name `{}`.\".format(rafflename)))\n\n await self.replenish_cache(ctx)\n\n\n @create.command()\n async def simple(self, ctx, raffle_name: str, *, description: Optional[str] = None):\n \"\"\"Create a simple arguments with just a name and description.\n \n **Arguments:**\n - `` - The name for the raffle.\n - `[description]` - The description for the raffle.\n \"\"\"\n raffle_name = raffle_name.lower()\n async with self.config.guild(ctx.guild).raffles() as raffle:\n\n if raffle_name in [x.lower() for x in raffle.keys()]:\n return await ctx.send(\"A raffle with this name already exists.\")\n\n data = {\n \"entries\": [],\n \"owner\": ctx.author.id,\n }\n\n if description:\n data[\"description\"] = description\n\n raffle[raffle_name] = data\n await ctx.send(f\"Raffle created. You can always add complex conditions with `{ctx.clean_prefix}raffle edit` if you wish.\")\n\n @raffle.command()\n async def asyaml(self, ctx: Context, raffle: str):\n \"\"\"Get a raffle in its YAML format.\n\n **Arguments:**\n - `` - The name of the raffle to get the YAML for.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n quotes = lambda x: f'\"{x}\"'\n relevant_data = [(\"name\", quotes(raffle))]\n for k, v in raffle_data.items():\n if k in (\"owner\", \"entries\"):\n # These are not user defined keys\n continue\n if isinstance(v, str):\n v = quotes(v)\n relevant_data.append((k, v))\n\n message = \"**YAML Format for the `{}` raffle**\\n\".format(raffle)\n await ctx.send(message + box(\"\\n\".join(f\"{x[0]}: {x[1]}\" for x in relevant_data), lang=\"yaml\"))\n\n await self.replenish_cache(ctx)\n\n @raffle.command()\n async def template(self, ctx: Context):\n \"\"\"Get a template of a raffle.\"\"\"\n with open(pathlib.Path(__file__).parent / \"template.yaml\") as f:\n docs = \"**For more information:** {}\\n\".format(self.docs)\n await ctx.send(docs + box(\"\".join(f.readlines()), lang=\"yaml\"))\n\n @raffle.command()\n async def parse(self, ctx: Context):\n \"\"\"Parse a complex raffle without actually creating it.\"\"\"\n await ctx.trigger_typing()\n check = lambda x: x.author == ctx.author and x.channel == ctx.channel\n message = (\n \"Paste your YAML here. It will be validated, and if there is \"\n \"an exception, it will be returned to you.\"\n\n )\n\n await ctx.send(message) \n\n try:\n content = await self.bot.wait_for(\"message\", timeout=500, check=check)\n except asyncio.TimeoutError:\n with contextlib.suppress(discord.NotFound):\n await message.delete()\n\n\n content = content.content\n valid = validator(cleanup_code(content))\n\n if not valid:\n return await ctx.send(\"This YAML is invalid.\")\n\n try:\n parser = RaffleManager(valid)\n parser.parser(ctx)\n except (RaffleError, BadArgument) as e:\n exc = \"An exception occured whilst parsing your data.\"\n return await ctx.send(cross(exc) + format_traceback(e))\n \n await ctx.send(tick(\"This YAML is good to go! No errors were found.\"))\n\n await self.replenish_cache(ctx)\n\n @raffle.command()\n async def join(self, ctx: Context, raffle: str):\n \"\"\"Join a raffle.\n \n **Arguments:**\n - `` - The name of the raffle to join.\n \"\"\"\n r = await self.config.guild(ctx.guild).raffles()\n raffle_data = r.get(raffle, None)\n\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n\n raffle_entities = lambda x: raffle_data.get(x, None)\n\n\n if ctx.author.id in raffle_entities(\"entries\"):\n return await ctx.send(\"You are already in this raffle.\")\n\n\n if raffle_entities(\"prevented_users\") and ctx.author.id in raffle_entities(\"prevented_users\"):\n return await ctx.send(\"You are not allowed to join this particular raffle.\")\n\n\n if raffle_entities(\"allowed_users\") and ctx.author.id not in raffle_entities(\"allowed_users\"):\n return await ctx.send(\"You are not allowed to join this particular raffle\")\n\n\n if ctx.author.id == raffle_entities(\"owner\"):\n return await ctx.send(\"You cannot join your own raffle.\")\n\n\n if raffle_entities(\"maximum_entries\") and len(raffle_entities(\"entries\")) > raffle_entities(\"maximum_entries\"):\n return await ctx.send(\"Sorry, the maximum number of users have entered this raffle.\")\n\n\n if raffle_entities(\"roles_needed_to_enter\"):\n for r in raffle_entities(\"roles_needed_to_enter\"):\n if not r in [x.id for x in ctx.author.roles]:\n return await ctx.send(\"You are missing a required role: {}\".format(ctx.guild.get_role(r).mention))\n\n\n if raffle_entities(\"account_age\") and raffle_entities(\"account_age\") > (now - ctx.author.created_at).days:\n return await ctx.send(\"Your account must be at least {} days old to join.\".format(raffle_entities(\"account_age\")))\n\n\n async with self.config.guild(ctx.guild).raffles() as r:\n raffle_entities = lambda x: r[raffle].get(x, None)\n raffle_entities(\"entries\").append(ctx.author.id)\n\n\n await ctx.send(f\"{ctx.author.mention} you have been added to the raffle.\")\n await self.replenish_cache(ctx)\n\n\n @raffle.command()\n async def leave(self, ctx: Context, raffle: str):\n \"\"\"Leave a raffle.\n \n **Arguments:**\n - `` - The name of the raffle to leave.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n raffle_entries = raffle_data.get(\"entries\")\n\n if not ctx.author.id in raffle_entries:\n return await ctx.send(\"You are not entered into this raffle.\")\n\n raffle_entries.remove(ctx.author.id)\n await ctx.send(f\"{ctx.author.mention} you have been removed from the raffle.\")\n\n await self.replenish_cache(ctx)\n\n\n @raffle.command()\n async def mention(self, ctx: Context, raffle: str):\n \"\"\"Mention all the users entered into a raffle.\n \n **Arguments:**\n - `` - The name of the raffle to mention all the members in.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n raffle_entities = lambda x: raffle_data.get(x)\n\n if not ctx.author.id == raffle_entities(\"owner\"):\n return await ctx.send(\"You are not the owner of this raffle.\")\n\n if not raffle_entities(\"entries\"):\n return await ctx.send(\"There are no entries yet for this raffle.\")\n\n for page in pagify(humanize_list([self.bot.get_user(u).mention for u in raffle_entities(\"entries\")])):\n await ctx.send(page)\n\n await self.replenish_cache(ctx)\n\n\n @raffle.command()\n async def end(self, ctx: Context, raffle: str):\n \"\"\"End a raffle.\n \n **Arguments:**\n - `` - The name of the raffle to end.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n msg = await ctx.send(f\"Ending the `{raffle}` raffle...\")\n raffle_owner = raffle_data.get(\"owner\")\n \n if not ctx.author.id == raffle_owner:\n return await ctx.send(\"You are not the owner of this raffle.\")\n\n r.pop(raffle)\n\n await asyncio.sleep(1)\n with contextlib.suppress(discord.NotFound):\n await msg.edit(content=\"Raffle ended.\")\n\n await self.replenish_cache(ctx)\n \n\n @raffle.command()\n @commands.guildowner()\n async def refresh(self, ctx: Context):\n \"\"\"Refresh all of the raffle caches.\"\"\"\n cleaner = await self.replenish_cache(ctx)\n if cleaner:\n return await ctx.send(\"Raffles updated.\")\n else:\n return await ctx.send(\"Everything was already up to date.\")\n\n\n @raffle.command()\n async def kick(self, ctx: Context, raffle: str, member: discord.Member):\n \"\"\"Kick a member from your raffle.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The member to kick from the raffle.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n raffle_entities = lambda x: raffle_data.get(x)\n\n if not ctx.author.id == raffle_entities(\"owner\"):\n return await ctx.send(\"You are not the owner of this raffle.\")\n\n if member.id not in raffle_entities(\"entries\"):\n return await ctx.send(\"This user has not entered this raffle.\")\n\n raffle_entities(\"entries\").remove(member.id)\n await ctx.send(\"User removed from the raffle.\")\n\n await self.replenish_cache(ctx)\n \n\n @raffle.command(name=\"list\")\n async def _list(self, ctx: Context):\n \"\"\"List the currently ongoing raffles.\"\"\"\n r = await self.config.guild(ctx.guild).raffles()\n\n if not r:\n return await ctx.send(\"There are no ongoing raffles.\")\n\n lines = []\n for k, v in sorted(r.items()):\n description = v.get(\"description\", None)\n if not description:\n description=\"\"\n lines.append(\"**{}** {}\".format(k, RaffleManager.shorten_description(description)))\n\n embeds = []\n data = list(pagify(\"\\n\".join(lines), page_length=1024))\n\n for index, page in enumerate(data, 1):\n embed = discord.Embed(\n title=\"Current raffles\",\n description=page,\n color=await ctx.embed_colour()\n )\n embed.set_footer(text=\"Page {}/{}\".format(index, len(data)))\n embeds.append(embed)\n\n await self.compose_menu(ctx, embeds)\n await self.replenish_cache(ctx)\n\n\n @raffle.command()\n @commands.guildowner()\n async def teardown(self, ctx: Context):\n \"\"\"End ALL ongoing raffles.\"\"\"\n raffles = await self.config.guild(ctx.guild).raffles()\n\n if not raffles:\n await ctx.send(\"There are no ongoing raffles in this guild.\")\n return\n\n message = \"Are you sure you want to tear down all ongoing raffles in this guild?\"\n can_react = ctx.channel.permissions_for(ctx.me).add_reactions\n if not can_react:\n message += \" (yes/no)\"\n message = await ctx.send(message)\n if can_react:\n start_adding_reactions(message, ReactionPredicate.YES_OR_NO_EMOJIS)\n predicate = ReactionPredicate.yes_or_no(message, ctx.author)\n event_type = \"reaction_add\"\n else:\n predicate = MessagePredicate.yes_or_no(ctx)\n event_type = \"message\"\n \n try:\n await self.bot.wait_for(event_type, check=predicate, timeout=30)\n except asyncio.TimeoutError:\n await ctx.send(\"You took too long to respond.\")\n return\n\n with contextlib.suppress(discord.NotFound):\n await message.delete()\n\n if predicate.result:\n async with self.config.guild(ctx.guild).raffles() as r:\n r.clear()\n await ctx.send(\"Raffles cleared.\")\n \n else:\n await ctx.send(\"No changes have been made.\")\n\n await self.replenish_cache(ctx)\n\n\n @raffle.command()\n async def raw(self, ctx: Context, raffle: str):\n \"\"\"View the raw dictionary for a raffle.\n \n **Arguments:**\n - `` - The name of the raffle.\n \"\"\"\n r = await self.config.guild(ctx.guild).raffles()\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n for page in pagify(str({raffle: raffle_data})):\n await ctx.send(box(page, lang=\"json\"))\n\n await self.replenish_cache(ctx)\n\n\n @raffle.command()\n async def members(self, ctx: Context, raffle: str):\n \"\"\"Get all the members of a raffle.\n \n **Arguments:**\n - `` - The name of the raffle to get the members from.\n \"\"\"\n r = await self.config.guild(ctx.guild).raffles()\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n entries = raffle_data.get(\"entries\")\n\n if not entries:\n return await ctx.send(\"There are no entries yet for this raffle.\")\n\n embed_pages = []\n\n if len(entries) == 1:\n embed = discord.Embed(\n description=f\"Looks like its only {self.bot.get_user(entries[0]).display_name} in here!\",\n color=await ctx.embed_colour()\n )\n embed_pages.append(embed)\n else:\n for page in pagify(humanize_list([self.bot.get_user(u).display_name for u in entries])):\n embed = discord.Embed(\n description=page,\n color=await ctx.embed_colour()\n )\n embed_pages.append(embed)\n\n await self.compose_menu(ctx, embed_pages)\n await self.replenish_cache(ctx)\n\n\n @raffle.command()\n async def draw(self, ctx: Context, raffle: str):\n \"\"\"Draw a raffle and select a winner.\n \n **Arguments:**\n - `` - The name of the raffle to draw a winner from.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n raffle_entities = lambda x: raffle_data.get(x, None)\n\n if not raffle_entities(\"entries\"):\n return await ctx.send(\"There are no participants yet for this raffle.\")\n winner = random.choice(raffle_entities(\"entries\"))\n\n if raffle_entities(\"end_message\"):\n message = raffle_entities(\"end_message\")\n else:\n message = \"Congratulations {winner.mention}, you have won the {raffle} raffle!\"\n\n on_end_action = raffle_entities(\"on_end_action\") or \"keep_winner\"\n message = message.format(winner=RaffleSafeMember(member=self.bot.get_user(winner)), raffle=raffle)\n\n # Let's add a bit of suspense, shall we? :P\n await ctx.send(\"Picking a winner from the pool...\")\n await ctx.trigger_typing()\n await asyncio.sleep(2)\n\n await ctx.send(message)\n\n if on_end_action == \"keep_winner\":\n return\n if on_end_action == \"remove_winner\": \n raffle_entities(\"entries\").remove(winner)\n return\n else:\n # end\n r.pop(raffle)\n\n await self.replenish_cache(ctx)\n\n\n @raffle.command()\n async def info(self, ctx: Context, raffle: str):\n \"\"\"Get information about a certain raffle.\n \n **Arguments:**\n - `` - The name of the raffle to get information for.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n raffle_entities = lambda x: raffle_data.get(x, None)\n\n properties = {\n \"name\": raffle,\n \"description\": raffle_data.get(\"description\", None),\n \"rolesreq\": raffle_data.get(\"roles_needed_to_enter\", None),\n \"agereq\": raffle_data.get(\"account_age\", None),\n \"joinreq\": raffle_data.get(\"join_age\", None),\n \"prevented_users\": raffle_data.get(\"prevented_users\", None),\n \"allowed_users\": raffle_data.get(\"allowed_users\", None),\n \"owner\": raffle_data.get(\"owner\", None),\n \"maximum_entries\": raffle_data.get(\"maximum_entries\", None),\n \"entries\": raffle_data.get(\"entries\", None),\n \"end_message\": raffle_data.get(\"end_message\", None),\n \"on_end_action\": raffle_data.get(\"on_end_action\", None)\n }\n\n embed = discord.Embed(\n title=\"Raffle information | {}\".format(properties[\"name\"]),\n description=properties[\"description\"] or italics(\"No description was provided.\"),\n color=await ctx.embed_colour(),\n timestamp=datetime.datetime.now(),\n )\n\n embed.add_field(\n name=\"Owner\",\n value=self.bot.get_user(properties[\"owner\"]).mention,\n inline=True\n )\n\n embed.add_field(\n name=\"Entries\",\n value=len(properties[\"entries\"]),\n inline=True\n )\n\n embed.add_field(\n name=\"End Action\",\n value=inline(properties[\"on_end_action\"] or \"keep_winner\"),\n inline=False\n )\n\n if properties[\"maximum_entries\"]:\n embed.add_field(\n name=\"Maximum Entries\",\n value=humanize_number(properties[\"maximum_entries\"]),\n inline=False\n )\n\n winner_text = box(properties[\"end_message\"] or r\"Congratulations {winner.mention}, you have won the {raffle} raffle!\")\n embed.add_field(\n name=\"Winner text\",\n value=winner_text,\n inline=False\n )\n\n if any([properties[\"joinreq\"], properties[\"agereq\"]]):\n\n age_requirements = []\n\n if properties[\"joinreq\"]:\n text = \"Guild: {}\".format( \n properties[\"joinreq\"]\n )\n age_requirements.append(text)\n\n if properties[\"agereq\"]:\n text = \"Discord: {}\\n\".format(\n properties[\"agereq\"]\n )\n age_requirements.append(text)\n \n embed.add_field(\n name=\"Age Requirements\",\n value=box(\"# Days since you've joined:\\n\" + \"\\n\".join(age_requirements), lang=\"yaml\"),\n inline=False\n )\n\n if properties[\"rolesreq\"]:\n roles = []\n for role in properties[\"rolesreq\"]:\n if not ctx.guild.get_role(role):\n continue\n roles.append(ctx.guild.get_role(role).name)\n\n if roles:\n\n embed.add_field(\n name=\"Roles Required\",\n value=box(\"\\n\".join(f\"+ @{v.lstrip('@')}\" for v in roles), lang=\"diff\"),\n inline=False\n )\n\n if properties[\"allowed_users\"]:\n users = []\n for user in properties[\"allowed_users\"]:\n if not ctx.guild.get_member(user):\n continue\n if ctx.author == ctx.guild.get_member(user):\n users.append((\">>> \", str(ctx.guild.get_member(user))))\n else:\n users.append((\"#\", str(ctx.guild.get_member(user))))\n\n if users:\n \n embed.add_field(\n name=\"Allowed Users\",\n value=box(\"\\n\".join(f\"{v[0]}{c} {v[1]}\" for c, v in enumerate(users, 1)), lang=\"md\"),\n inline=False\n )\n \n if properties[\"prevented_users\"]:\n users = []\n for user in properties[\"prevented_users\"]:\n if not ctx.guild.get_member(user):\n continue\n if ctx.author == ctx.guild.get_member(user):\n users.append((\">>> \", str(ctx.guild.get_member(user))))\n else:\n users.append((\"#\", str(ctx.guild.get_member(user))))\n\n if users:\n\n embed.add_field(\n name=\"Prevented Users\",\n value=box(\"\\n\".join(f\"{v[0]}{c} {v[1]}\" for c, v in enumerate(users, 1)), lang=\"md\"),\n inline=False\n )\n\n embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)\n await ctx.send(embed=embed)\n await self.replenish_cache(ctx)\n\n\n @raffle.group()\n async def edit(self, ctx):\n \"\"\"Edit the settings for a raffle.\"\"\"\n pass\n\n\n @edit.command()\n async def accage(self, ctx, raffle: str, new_account_age: Union[int, bool]):\n \"\"\"Edit the account age requirement for a raffle.\n \n Use `0` or `false` to disable this condition.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new account age requirement.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n if isinstance(new_account_age, bool):\n if not new_account_age:\n with contextlib.suppress(KeyError):\n del raffle_data[\"account_age\"]\n return await ctx.send(\"Account age requirement removed from this raffle.\")\n else:\n return await ctx.send(\"Please provide a number, or \\\"false\\\" to disable this condition.\")\n\n try:\n RaffleManager.parse_accage(new_account_age)\n except BadArgument as e:\n return await ctx.send(format_traceback(e))\n\n raffle_data[\"account_age\"] = new_account_age\n await ctx.send(\"Account age requirement updated for this raffle.\")\n\n await self.replenish_cache(ctx)\n\n\n @edit.command()\n async def joinage(self, ctx, raffle: str, new_join_age: Union[int, bool]):\n \"\"\"Edit the join age requirement for a raffle.\n \n Use `0` or `false` to disable this condition.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new join age requirement.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n if not new_join_age:\n with contextlib.suppress(KeyError):\n del raffle_data[\"join_age\"]\n return await ctx.send(\"Join age requirement removed from this raffle.\")\n\n elif new_join_age is True:\n return await ctx.send(\"Please provide a number, or \\\"false\\\" to disable this condition.\")\n\n else:\n try:\n RaffleManager.parse_joinage(ctx, new_join_age)\n except BadArgument as e:\n return await ctx.send(format_traceback(e))\n\n raffle_data[\"join_age\"] = new_join_age\n await ctx.send(\"Join age requirement updated for this raffle.\")\n\n await self.replenish_cache(ctx)\n\n\n @edit.command()\n async def description(self, ctx, raffle: str, *, description: Union[bool, str]):\n \"\"\"Edit the description for a raffle.\n \n Use `0` or `false` to remove this feature.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new description.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n if not description:\n with contextlib.suppress(KeyError):\n del raffle_data[\"description\"]\n return await ctx.send(\"Description removed from this raffle.\")\n\n elif description is True:\n return await ctx.send(\"Please provide a number, or \\\"false\\\" to disable the description.\")\n\n else:\n raffle_data[\"description\"] = description\n await ctx.send(\"Description updated for this raffle.\")\n\n await self.replenish_cache(ctx)\n\n\n @edit.command()\n async def endaction(self, ctx, raffle: str, *, on_end_action: Union[bool, str]):\n \"\"\"Edit the on_end_action for a raffle.\n \n Use `0` or `false` to remove this feature.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new action. Must be one of `end`, `remove_winner`, or `keep_winner`.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n if not on_end_action:\n with contextlib.suppress(KeyError):\n del raffle_data[\"on_end_action\"]\n return await ctx.send(\"On end action set to the default: `keep_winner`.\")\n\n elif on_end_action is True:\n return await ctx.send(\"Please provide a number, or \\\"false\\\" to disable the description.\")\n\n else:\n if not on_end_action in (\"end\", \"remove_winner\", \"keep_winner\"):\n return await ctx.send(\"Please provide one of `end`, `remove_winner`, or `keep_winner`.\")\n raffle_data[\"on_end_action\"] = on_end_action\n await ctx.send(\"On end action updated for this raffle.\")\n\n await self.replenish_cache(ctx)\n\n\n @edit.command()\n async def maxentries(self, ctx, raffle: str, maximum_entries: Union[int, bool]):\n \"\"\"Edit the max entries requirement for a raffle.\n \n Use `0` or `false` to disable this condition.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new maximum number of entries.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n if not maximum_entries:\n with contextlib.suppress(KeyError):\n del raffle_data[\"maximum_entries\"]\n return await ctx.send(\"Maximum entries condition removed from this raffle.\")\n\n elif maximum_entries is True:\n return await ctx.send(\"Please provide a number, or \\\"false\\\" to disable this condition.\")\n\n else:\n raffle_data[\"maximum_entries\"] = maximum_entries\n await ctx.send(\"Max entries requirement updated for this raffle.\")\n\n await self.replenish_cache(ctx)\n\n\n @edit.command()\n async def endmessage(self, ctx, raffle: str, *, end_message: Union[bool, str]):\n \"\"\"Edit the end message of a raffle.\n \n Use `0` or `false` to disable this condition.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The new ending message.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n if not end_message:\n with contextlib.suppress(KeyError):\n del raffle_data[\"end_message\"]\n return await ctx.send(\"End message feature removed from this raffle. It will now use the default.\")\n\n elif end_message is True:\n return await ctx.send(\"Please provide a number, or \\\"false\\\" to disable this condition.\")\n\n else:\n try:\n raffle_safe_member_scanner(ctx, end_message)\n except BadArgument as e:\n return await ctx.send(format_traceback(e))\n raffle_data[\"end_message\"] = end_message\n await ctx.send(\"End message updated for this raffle.\")\n\n await self.replenish_cache(ctx)\n\n @edit.command()\n async def fromyaml(self, ctx, raffle: str):\n \"\"\"Edit a raffle directly from yaml.\n \n **Arguments:**\n - `` - The name of the raffle to edit.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n if not ctx.author.id == raffle_data[\"owner\"]:\n return await ctx.send(\"You are not the owner of this raffle.\")\n\n existing_data = {\n \"end_message\": raffle_data.get(\"end_message\", None),\n \"account_age\": raffle_data.get(\"account_age\", None),\n \"join_age\": raffle_data.get(\"join_age\", None),\n \"roles_needed_to_enter\": raffle_data.get(\"roles_needed_to_enter\", None),\n \"prevented_users\": raffle_data.get(\"prevented_users\", None),\n \"allowed_users\": raffle_data.get(\"allowed_users\", None),\n \"description\": raffle_data.get(\"description\", None),\n \"maximum_entries\": raffle_data.get(\"maximum_entries\", None),\n \"on_end_action\": raffle_data.get(\"on_end_action\", None),\n }\n\n message = (\n \"You're about to **edit an existing raffle**.\\n\\nThe `name` \"\n \"block cannot be edited through this command, it's preferred \"\n \"if you create a new raffle with the new name instead.\\nYou can end \"\n f\"this raffle through using `{ctx.clean_prefix}raffle end {raffle}`.\"\n \"\\nPlease consider reading the docs about the various \"\n \"conditional blocks if you haven't already.\\n\\n\"\n + self.docs\n )\n\n quotes = lambda x: f'\"{x}\"'\n noedits = lambda x: f\"{x} # Cannot be edited\"\n relevant_data = [(\"name\", noedits(quotes(raffle)))]\n for k, v in raffle_data.items():\n if k in (\"owner\", \"entries\"):\n # These are not user defined keys\n continue\n if isinstance(v, str):\n v = quotes(v)\n relevant_data.append((k, v))\n\n message += \"\\n\\n**Current settings:**\" + box(\"\\n\".join(f\"{x[0]}: {x[1]}\" for x in relevant_data), lang=\"yaml\")\n await ctx.send(message) \n\n check = lambda x: x.channel == ctx.channel and x.author == ctx.author\n\n try:\n content = await self.bot.wait_for(\"message\", timeout=500, check=check)\n except asyncio.TimeoutError:\n with contextlib.suppress(discord.NotFound):\n await message.delete()\n\n\n content = content.content\n valid = validator(cleanup_code(content))\n\n if not valid:\n return await ctx.send(\n \"Please provide valid YAML. You can validate your raffle YAML using `{}raffle parse`.\".format(ctx.clean_prefix)\n )\n\n try:\n parser = RaffleManager(valid)\n parser.parser(ctx)\n except RequiredKeyError:\n pass\n except (RaffleError, BadArgument) as e:\n exc = cross(\"An exception occured whilst parsing your data.\")\n return await ctx.send(exc + format_traceback(e))\n\n data = {\n \"owner\": raffle_data.get(\"owner\"),\n \"entries\": raffle_data.get(\"entries\")\n }\n\n conditions = {\n \"end_message\": valid.get(\"end_message\", None),\n \"account_age\": valid.get(\"account_age\", None),\n \"join_age\": valid.get(\"join_age\", None),\n \"roles_needed_to_enter\": valid.get(\"roles_needed_to_enter\", None),\n \"prevented_users\": valid.get(\"prevented_users\", None),\n \"allowed_users\": valid.get(\"allowed_users\", None),\n \"description\": valid.get(\"description\", None),\n \"maximum_entries\": valid.get(\"maximum_entries\", None),\n \"on_end_action\": valid.get(\"on_end_action\", None),\n }\n\n for k, v in conditions.items():\n if v:\n data[k] = v\n\n async with self.config.guild(ctx.guild).raffles() as r:\n r[raffle] = data\n\n additions = []\n deletions = []\n\n for k, v in conditions.items():\n if v and not existing_data[k]:\n additions.append(k)\n continue\n if not v and existing_data[k]:\n deletions.append(k)\n continue\n\n if any([additions, deletions]):\n additions = \"\\n\".join(f\"+ {a}\" for a in additions)\n deletions = \"\\n\".join(f\"- {d}\" for d in deletions)\n\n diffs = box(f\"{additions}\\n{deletions}\", lang=\"diff\")\n update = tick(\"Raffle edited. The following conditions have been added/removed: {}\".format(diffs))\n \n else:\n update = tick(\"Raffle edited. No conditions were added or removed.\")\n\n await ctx.send(update)\n\n await self.replenish_cache(ctx)\n\n\n @edit.group()\n async def prevented(self, ctx):\n \"\"\"Manage prevented users in a raffle.\"\"\"\n pass\n\n\n @prevented.command(name=\"add\")\n async def prevented_add(self, ctx, raffle: str, member: discord.Member):\n \"\"\"Add a member to the prevented list of a raffle.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The member to add to the prevented list.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n prevented = raffle_data.get(\"prevented_users\", [])\n\n if member.id in prevented:\n return await ctx.send(\"This user is already prevented in this raffle.\")\n\n prevented.append(member.id)\n await ctx.send(\"{} added to the prevented list for this raffle.\".format(member.name))\n\n await self.replenish_cache(ctx)\n\n\n @prevented.command(name=\"remove\", aliases=[\"del\"])\n async def prevented_remove(self, ctx, raffle: str, member: discord.Member):\n \"\"\"Remove a member from the prevented list of a raffle.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The member to remove from the prevented list.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n prevented = raffle_data.get(\"prevented_users\", [])\n\n if member.id not in prevented:\n return await ctx.send(\"This user was not already prevented in this raffle.\")\n\n prevented.remove(member.id)\n await ctx.send(\"{} remove from the prevented list for this raffle.\".format(member.name))\n\n await self.replenish_cache(ctx)\n\n\n @prevented.command(name=\"clear\")\n async def prevented_clear(self, ctx, raffle: str):\n \"\"\"Clear the prevented list for a raffle.\n \n **Arguments:**\n - `` - The name of the raffle.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n prevented = raffle_data.get(\"prevented_users\", None)\n\n if prevented is None:\n return await ctx.send(\"There are no prevented users.\")\n\n message = \"Are you sure you want to clear the prevented users list for this raffle?\"\n can_react = ctx.channel.permissions_for(ctx.me).add_reactions\n if not can_react:\n message += \" (yes/no)\"\n message = await ctx.send(message)\n if can_react:\n start_adding_reactions(message, ReactionPredicate.YES_OR_NO_EMOJIS)\n predicate = ReactionPredicate.yes_or_no(message, ctx.author)\n event_type = \"reaction_add\"\n else:\n predicate = MessagePredicate.yes_or_no(ctx)\n event_type = \"message\"\n \n try:\n await self.bot.wait_for(event_type, check=predicate, timeout=30)\n except asyncio.TimeoutError:\n await ctx.send(\"You took too long to respond.\")\n return\n\n if predicate.result:\n del raffle_data[\"prevented_users\"] \n try:\n await message.edit(content=\"Prevented users list cleared for this raffle.\")\n except discord.NotFound:\n await ctx.send(\"Prevented users list cleared for this raffle.\")\n \n else:\n await ctx.send(\"No changes have been made.\") \n\n\n @edit.group()\n async def allowed(self, ctx):\n \"\"\"Manage the allowed users list in a raffle.\"\"\"\n pass\n\n\n @allowed.command(name=\"add\")\n async def allowed_add(self, ctx, raffle: str, member: discord.Member):\n \"\"\"Add a member to the allowed list of a raffle.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The member to add to the allowed list.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n allowed = raffle_data.get(\"allowed_users\", [])\n\n if member.id in allowed:\n return await ctx.send(\"This user is already allowed in this raffle.\")\n\n allowed.append(member.id)\n await ctx.send(\"{} added to the allowed list for this raffle.\".format(member.name))\n\n await self.replenish_cache(ctx)\n\n\n @allowed.command(name=\"remove\", aliases=[\"del\"])\n async def allowed_remove(self, ctx, raffle: str, member: discord.Member):\n \"\"\"Remove a member from the allowed list of a raffle.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The member to remove from the allowed list.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n allowed = raffle_data.get(\"allowed_users\", [])\n\n if member.id not in allowed:\n return await ctx.send(\"This user was not already allowed in this raffle.\")\n\n allowed.remove(member.id)\n await ctx.send(\"{} remove from the allowed list for this raffle.\".format(member.name))\n\n await self.replenish_cache(ctx)\n\n\n @allowed.command(name=\"clear\")\n async def allowed_clear(self, ctx, raffle: str):\n \"\"\"Clear the allowed list for a raffle.\"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n allowed = raffle_data.get(\"allowed_users\", None)\n\n if allowed is None:\n return await ctx.send(\"There are no allowed users.\")\n\n message = \"Are you sure you want to clear the allowed list for this raffle?\"\n can_react = ctx.channel.permissions_for(ctx.me).add_reactions\n if not can_react:\n message += \" (yes/no)\"\n message = await ctx.send(message)\n if can_react:\n start_adding_reactions(message, ReactionPredicate.YES_OR_NO_EMOJIS)\n predicate = ReactionPredicate.yes_or_no(message, ctx.author)\n event_type = \"reaction_add\"\n else:\n predicate = MessagePredicate.yes_or_no(ctx)\n event_type = \"message\"\n \n try:\n await self.bot.wait_for(event_type, check=predicate, timeout=30)\n except asyncio.TimeoutError:\n await ctx.send(\"You took too long to respond.\")\n return\n\n if predicate.result:\n with contextlib.suppress(KeyError):\n # Still wanna remove empty list here\n del raffle_data[\"allowed_users\"] \n try:\n await message.edit(content=\"Allowed list cleared for this raffle.\")\n except discord.NotFound:\n await ctx.send(\"Allowed list cleared for this raffle.\")\n \n else:\n await ctx.send(\"No changes have been made.\") \n\n await self.replenish_cache(ctx)\n\n\n @edit.group()\n async def rolesreq(self, ctx):\n \"\"\"Manage role requirements in a raffle.\"\"\"\n pass\n\n\n @rolesreq.command(name=\"add\")\n async def rolesreq_add(self, ctx, raffle: str, role: discord.Role):\n \"\"\"Add a role to the role requirements list of a raffle.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The role to add to the list of role requirements.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n roles = raffle_data.get(\"roles_needed_to_enter\", [])\n\n if role.id in roles:\n return await ctx.send(\"This role is already a requirement in this raffle.\")\n\n if not roles:\n raffle_data[\"roles_needed_to_enter\"] = [role.id]\n else:\n roles.append(role.id)\n await ctx.send(\"{} added to the role requirement list for this raffle.\".format(role.name))\n\n await self.replenish_cache(ctx)\n\n\n @rolesreq.command(name=\"remove\", aliases=[\"del\"])\n async def rolereq_remove(self, ctx, raffle: str, role: discord.Role):\n \"\"\"Remove a role from the role requirements list of a raffle.\n \n **Arguments:**\n - `` - The name of the raffle.\n - `` - The role to remove from the list of role requirements.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n roles = raffle_data.get(\"roles_needed_to_enter\", [])\n\n if role.id not in roles:\n return await ctx.send(\"This role is not already a requirement in this raffle.\")\n\n roles.remove(role.id)\n await ctx.send(\"{} remove from the role requirement list for this raffle.\".format(role.name))\n\n await self.replenish_cache(ctx)\n\n\n @rolesreq.command(name=\"clear\")\n async def rolereq_clear(self, ctx, raffle: str):\n \"\"\"Clear the role requirement list for a raffle.\n\n \n **Arguments:**\n - `` - The name of the raffle.\n \"\"\"\n async with self.config.guild(ctx.guild).raffles() as r:\n\n raffle_data = r.get(raffle, None)\n if not raffle_data:\n return await ctx.send(\"There is not an ongoing raffle with the name `{}`.\".format(raffle))\n\n rolesreq = raffle_data.get(\"roles_needed_to_enter\", [])\n\n if rolesreq is None:\n return await ctx.send(\"There are no required roles.\")\n\n message = \"Are you sure you want to clear the role requirement list for this raffle?\"\n can_react = ctx.channel.permissions_for(ctx.me).add_reactions\n if not can_react:\n message += \" (yes/no)\"\n message = await ctx.send(message)\n if can_react:\n start_adding_reactions(message, ReactionPredicate.YES_OR_NO_EMOJIS)\n predicate = ReactionPredicate.yes_or_no(message, ctx.author)\n event_type = \"reaction_add\"\n else:\n predicate = MessagePredicate.yes_or_no(ctx)\n event_type = \"message\"\n \n try:\n await self.bot.wait_for(event_type, check=predicate, timeout=30)\n except asyncio.TimeoutError:\n await ctx.send(\"You took too long to respond.\")\n return\n\n if predicate.result:\n with contextlib.suppress(KeyError):\n # Still wanna remove empty list here\n del raffle_data[\"roles_needed_to_enter\"] \n try:\n await message.edit(content=\"Role requirement list cleared for this raffle.\")\n except discord.NotFound:\n await ctx.send(\"Role requirement list cleared for this raffle.\")\n \n else:\n await ctx.send(\"No changes have been made.\") \n\n await self.replenish_cache(ctx)\n\n\n @raffle.command()\n async def conditions(self, ctx: Context):\n \"\"\"Get information about how conditions work.\"\"\"\n message = \"\\n\".join(f\"{e.name}: {e.value[0].__name__}\\n\\t{e.value[1]}\" for e in RaffleComponents)\n await ctx.send(box(message, lang=\"yaml\"))\n await self.replenish_cache(ctx)\n","sub_path":"raffle/raffle.py","file_name":"raffle.py","file_ext":"py","file_size_in_byte":55230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"248522704","text":"nums = int(input('Digite aqui o número om 5 digitos!'))\nnotas = [100, 50, 20, 10, 2]\nfor x in notas:\n\n print(((nums/x) // 1), \"Nota(s) de\" , x)\n nums %= (x)\n\n \n\n\n","sub_path":"URI/pasta execícios lógica/exercicío1.py","file_name":"exercicío1.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"242400058","text":"class RichestCustomerWealth1672:\n # Time: 4:16\n def maximumWealthRefined(self, accounts: List[List[int]]) -> int:\n maxWealth = 0\n for customer in accounts:\n wealth = sum(customer)\n maxWealth = max([wealth, maxWealth])\n return maxWealth\n # Time: 2:48\n def maximumWealthBrute(self, accounts: List[List[int]]) -> int:\n wealths = []\n for customer in accounts:\n wealth = 0\n for bank in customer:\n wealth += bank\n wealths.append(wealth)\n return max(wealths)\n\nclass FinalValueOfVariableAfterOperations2011:\n # Time: 3:32\n def finalValueAfterOperations(self, operations: List[str]) -> int:\n x = 0\n for op in operations:\n if(op == 'X++' or op == '++X'):\n x+=1\n elif(op == 'X--' or op == '--X'):\n x-=1\n return x\n\n\nclass ConcatenationOfArray1929:\n # Time: 6:30 \n def getConcatenation(self, nums: List[int]) -> List[int]:\n ans = [None] * (len(nums)*2)\n for x in range(0, len(nums)):\n ans[x] = nums[x]\n ans[(x+len(nums))] = nums[x]\n return ans\n\n\nclass BuildArrayFromPermutation1920:\n # Time: 10:00 \n def buildArray(self, nums: List[int]) -> List[int]:\n ans = [None] * len(nums)\n for x in range(0, len(nums)):\n ans[x] = nums[nums[x]]\n return ans","sub_path":"Leetcode_Practice/Arrays.py","file_name":"Arrays.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"59231535","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Handlers for data charts.\"\"\"\n\nimport base64\nimport datetime\nimport logging\nimport re\nimport zlib\nimport os\n\nfrom services import datacommons\nfrom models import stat_config_pb2\n\nfrom google.protobuf import text_format\n\nDEFAULT_NUM_BARS = 4\nMIN_PLACE_POP = 10000\nDEFAULT_CHART_WIDTH = 400\nDEFAULT_CHART_HEIGHT = 250\nURL_FETCH_DEADLINE = 180 # seconds\n\n# https://standards.google/guidelines/google-material/color/palettes.html#brand-palette\n# https://standards.google/guidelines/google-material/color/palettes.html#expanded-palette\n# with weight 600, 200, 800, 400\nCOLORS = [\n '#1A73E8',\n '#D93025',\n '#F9AB00',\n '#1E8E3E',\n '#E8710A',\n '#E52592',\n '#9334E6',\n '#12B5CB',\n '#AECBFA',\n '#F6AEA9',\n '#FDE293',\n '#A8DAB5',\n '#FDC69C',\n '#FBA9D6',\n '#D7AEFB',\n '#AE14F2',\n '#185ABC',\n '#B31412',\n '#EA8600',\n '#137333',\n '#C26401',\n '#B80672',\n '#7627BB',\n '#098591',\n '#669DF6',\n '#EE675C',\n '#FCC934',\n '#5BB974',\n '#FA903E',\n '#FF63B8',\n '#AF5CF7',\n '#4ECDE6',\n]\nCONTRAST_COLOR = '#8AB4F8'\nCHART_MARGIN = 20\nCHART_LEGEND_CHAR_WIDTH = 8\nCHART_TITLE_HEIGHT = 30\nCHART_SUBTITLE_HEIGHT = 25\nCHART_X_AXIS_HEIGHT = 20\nCHART_Y_AXIS_WIDTH = 45\nCHART_BAR_VERT_MARGIN = 3\nMAX_POPOBS_TYPES = 100\nOBS_PROPS = ['measurementMethod', 'observationPeriod', 'measuredProp']\n\nDEFAULT_LINE_CHART_WIDTH = 600\n\n_DASHES = ['', '5, 5', '10, 5', '5, 10', '1, 5', '5, 1', '0.9', '5, 5, 1, 5']\n\n# TODO(b/155484547) Read this from config.\nLATEST_POPULATION_YEAR = 2018\n\ndef get_dash(i):\n return _DASHES[i % len(_DASHES)]\n\n\ndef get_color(i):\n return COLORS[i % len(COLORS)]\n\n\ndef get_golden_pop_obs_args():\n \"\"\"Get pop obs args.\"\"\"\n result = {}\n path_name = os.path.dirname(os.path.realpath(__file__))\n file_path = os.path.join(path_name, 'pop_obs_args.textproto')\n pop_obs_args_list = stat_config_pb2.PopObsArgsList()\n with open(file_path, 'rb') as f:\n proto_data = f.read()\n text_format.Parse(proto_data, pop_obs_args_list)\n for arg in pop_obs_args_list.arg:\n pop_type = arg.pop_type\n mprop = arg.mprop\n if arg.cpv:\n key = '{},{}'.format(pop_type, mprop)\n if arg.cpv[0].prop != '*':\n for s_pv in sorted(arg.cpv, key=lambda x: x.prop):\n key += ',{}'.format(s_pv.prop)\n if s_pv.val:\n key += ',{}'.format(s_pv.val)\n result[key] = {\n 'st': arg.stat_type,\n 'op': arg.obs_period,\n 'mmethod': arg.mmethod,\n 'mdenom': arg.mdenominator,\n 'mqual': arg.mqualifier,\n 'sfactor': arg.scaling_factor,\n }\n return result\n\n\ngolden_pop_obs_args = get_golden_pop_obs_args()\n\n\ndef parse_pop_obs_args(get_params, suffix=''):\n \"\"\"Parses common arguments for getting observations.\n\n Args:\n get_params: Function to get the url parameters.\n suffix: Url parameter suffix.\n\n Returns:\n dict with the parsed values.\n \"\"\"\n # Keys in camel case to match the keys returned by the API\n args = {}\n args['popType'] = get_params.get('popt' + suffix)\n args['measuredProp'] = get_params.get('mprop' + suffix, '')\n args['measurementMethod'] = get_params.get('mmethod' + suffix, '')\n args['observationPeriod'] = get_params.get('op' + suffix, '')\n args['measurementDenominator'] = get_params.get('mdenom' + suffix, '')\n args['measurementQualifier'] = get_params.get('mqual' + suffix, '')\n args['scalingFactor'] = get_params.get('sfactor' + suffix, '')\n args['observationDate'] = get_params.get('od' + suffix)\n args['statType'] = get_params.get('st' + suffix)\n args['legend'] = get_params.get('lg' + suffix, '')\n args['domId'] = get_params.get('domid' + suffix, '')\n\n constraints = {}\n cpv = get_params.getlist('cpv' + suffix)\n if cpv:\n for pv in cpv:\n pvs = pv.split(',')\n assert len(pvs) == 2\n constraints[pvs[0]] = pvs[1]\n\n args['constraints'] = constraints\n\n # URL always havs both contraining property and value. However in\n # golden_pop_obs_args, value may not be defined. The section below list all\n # potential keys, with or without value appended.\n key_prefix = '{},{}'.format(args['popType'], args['measuredProp'])\n ps = sorted(constraints, key=lambda constraint: constraint[0])\n potential_keys = [key_prefix]\n for p in ps:\n temp_potential_keys = []\n for k in potential_keys:\n k_with_v = k + ',{},{}'.format(p, constraints[p])\n k_without_v = k + ',{}'.format(p)\n # Append key with constraining value first so that when matching keys,\n # always check the key with constraining value first before downgrade to\n # key with only constraining property.\n temp_potential_keys.append(k_with_v)\n temp_potential_keys.append(k_without_v)\n potential_keys = temp_potential_keys\n\n # Assign key to popType + measuredProp. However, if any potential key is in\n # golden_pop_obs_args, us it as the key.\n key = key_prefix\n for k in potential_keys:\n if k in golden_pop_obs_args:\n key = k\n break\n\n if key in golden_pop_obs_args:\n # URL parameters, if set explicitly, trumps golden pop obs args.\n args['measurementMethod'] = (\n args['measurementMethod'] or golden_pop_obs_args[key]['mmethod'])\n args['observationPeriod'] = (\n args['observationPeriod'] or golden_pop_obs_args[key]['op'])\n args['statType'] = (args['statType'] or golden_pop_obs_args[key]['st'])\n args['measurementDenominator'] = (\n args['measurementDenominator'] or golden_pop_obs_args[key]['mdenom'])\n args['measurementQualifier'] = (\n args['measurementQualifier'] or golden_pop_obs_args[key]['mqual'])\n args['scalingFactor'] = (\n args['scalingFactor'] or golden_pop_obs_args[key]['sfactor'])\n return args\n\n\ndef check_obs(obs, po_args, props=OBS_PROPS):\n \"\"\"Check with an observation matches the query argument.\"\"\"\n keep = True\n for key in props:\n obs_arg = re.sub(r'(^DataCommonsAggregate$|^dcAggregate/)', '',\n obs.get(key, ''))\n keep = keep and obs_arg == po_args.get(key, '')\n\n return keep and po_args['statType'] in obs\n\n\ndef filter_val(obs_list, po_args):\n \"\"\"Function to filter observation values.\n\n Args:\n obs_list: is the list of observations fron datacommons.get_place_obs.\n po_args: is the result of parse_pop_obs_args\n\n Returns:\n the stat from the list of observations based on given pop_obs_args\n (or None if not found).\n \"\"\"\n for obs in obs_list:\n if check_obs(obs, po_args):\n return obs[po_args['statType']]\n return None\n\n\ndef filter_place_obs_vals(places, place_obs, pop_obs_args):\n \"\"\"Filter place observation values.\n\n Args:\n places: is a set of geoId's to filter for\n place_obs: is the result of dc.get_place_obs\n pop_obs_args: is the result of parse_pop_obs_args\n\n Returns:\n a pair of:\n dict of {geoId: observed value}\n dict of {geoId: name}\n for the list of given places\n \"\"\"\n data = {}\n names = {}\n for el in place_obs:\n if el['place'] in places:\n data[el['place']] = filter_val(el['observations'], pop_obs_args)\n names[el['place']] = el['name']\n return data, names\n\n\ndef get_and_filter_places_in(dcid, ptype):\n \"\"\"Get and filter the places in another place.\n\n Args:\n dcid: dcid of the parent place.\n ptype: place type for the children places.\n\n Returns:\n a list of places contained in the dcid, constrained by type and\n filtered for outliers\n \"\"\"\n if not dcid:\n return []\n place_dcids = datacommons.get_places_in([dcid], ptype)[dcid]\n place_pops = datacommons.get_populations(place_dcids, 'Person')\n place_obs = datacommons.get_observations(\n list(place_pops.values()),\n 'count',\n 'measuredValue',\n '2017',\n measurement_method='CensusACS5yrSurvey')\n pop_place = {v: k for k, v in place_pops.items()}\n return set(\n [pop_place[k] for k, v in place_obs.items() if v > MIN_PLACE_POP])\n\n\ndef get_ancestor_places(dcid):\n \"\"\"Get all containing or geoOverlaps places of higher place types.\n\n Args:\n dcid: dcid of the place.\n\n Returns:\n a list of places containing or overlapping with the dcid, of a larger\n place type.\n \"\"\"\n if not dcid:\n return []\n else:\n # Hardcoded\n return ['geoId/26', 'country/USA']\n\n\n# TODO(b/149601841): refactor result to return a dict[dcid][date]\n# that approach might be better than the commented out attempt.\ndef get_place_population(dcids):\n result = {}\n keys = [dcid + '^count^CensusACS5yrSurvey^^measured^^^^Person' for dcid in dcids]\n chart_data = datacommons.get_chart_data(keys)\n for key, data in chart_data.items():\n dcid = key.split('^')[0]\n for date, v in data['obsTimeSeries']['val'].items():\n result[(dcid, date)] = v\n return result\n\n# TODO(b/155485304): Add unit test for this.\ndef get_plot_data(place_args, pc, gr):\n keys = set()\n key_to_idx = {}\n all_places = set()\n dcid_name = {}\n\n for idx, (dcids, po_args) in place_args.items():\n all_places |= set(dcids)\n for dcid in dcids:\n key_parts = [\n dcid, po_args['measuredProp'], po_args['measurementMethod'],\n po_args.get('observationPeriod', ''),\n po_args.get('statType', '').replace('Value', ''),\n po_args.get('measurementDenominator', ''),\n po_args.get('measurementQualifier', ''),\n po_args.get('scalingFactor', ''), po_args['popType']\n ]\n ps = sorted(po_args['constraints'].keys())\n for p in ps:\n key_parts.extend([p, po_args['constraints'][p]])\n key = '^'.join(key_parts)\n keys.add(key)\n key_to_idx[key] = idx\n\n chart_data = datacommons.get_chart_data(list(keys))\n result = []\n for key, data in chart_data.items():\n dcid = key.split('^')[0]\n points = [(date, v) for date, v in data['obsTimeSeries']['val'].items()]\n result.append({\n 'idx': key_to_idx[key],\n 'dcid': dcid,\n 'name': data['obsTimeSeries']['placeName'],\n 'points': sorted(points, key=lambda x: x[0]),\n 'domid': place_args[key_to_idx[key]][1]['domId']\n })\n dcid_name[dcid] = data['obsTimeSeries']['placeName']\n for dcid in all_places:\n if dcid not in dcid_name:\n dcid_name[dcid] = dcid\n\n if pc:\n place_population = get_place_population(all_places)\n for d in result:\n pc_data = []\n for point in d['points']:\n try:\n dt = datetime.datetime.strptime(point[0], '%Y')\n except ValueError:\n try:\n dt = datetime.datetime.strptime(point[0], '%Y-%m')\n except ValueError:\n try:\n dt = datetime.datetime.strptime(point[0], '%Y-%m-%d')\n except:\n return []\n point_key = (d['dcid'], str(dt.year))\n if point_key not in place_population:\n point_key = (d['dcid'], str(LATEST_POPULATION_YEAR))\n if place_population[point_key] < 0:\n raise ValueError('Population for %s is %f' %\n (d['dcid'], place_population[point_key]))\n pc_data.append(\n (point[0], point[1] / float(place_population[point_key])))\n d['points'] = pc_data\n elif gr:\n for d in result:\n gr_data = []\n for i, point in enumerate(d['points']):\n if i > 0:\n grow_rate = ((d['points'][i][1] - d['points'][i - 1][1]) /\n abs(d['points'][i - 1][1])) * 100\n gr_data.append((d['points'][i][0], grow_rate))\n d['points'] = gr_data\n return result, dcid_name\n\n\nclass ChartHandler(object):\n \"\"\"Parent handler to handle chart api.\"\"\"\n def __init__(self, get_params):\n width = get_params.get('w')\n height = get_params.get('h')\n max_total_width = get_params.get('maxw')\n title = get_params.get('title')\n subtitle = get_params.get('subtitle', '')\n self.get_params = get_params\n self.width = int(width) if width else DEFAULT_CHART_WIDTH\n self.height = int(height) if height else DEFAULT_CHART_HEIGHT\n self.max_total_width = int(max_total_width) if max_total_width else 0\n self.title = title if title else ''\n self.subtitle = subtitle if subtitle else ''\n\n def chart_layout_data(self, plot_data, has_legend=False):\n \"\"\"Common chart layout calculations based on data passed into the template.\n\n This is used by scatter plots.\n\n Args:\n plot_data: The data to be plot.\n has_legend: If the chart has legend.\n\n Returns:\n A dict with the layout information.\n \"\"\"\n layout = {}\n layout['height'] = self.height\n layout['title'] = self.title\n layout['margin'] = CHART_MARGIN\n layout['title_height'] = CHART_TITLE_HEIGHT if self.title else 0\n layout['subtitle1_height'] = 0\n layout['subtitle2_height'] = 0\n if self.subtitle:\n lines = self.subtitle.split('\\n')\n if len(lines) > 2:\n raise ValueError('Maximum allows subtitle lines=2.')\n layout['subtitle1'] = lines[0]\n layout['subtitle1_height'] = CHART_SUBTITLE_HEIGHT\n if len(lines) == 2:\n layout['subtitle2'] = lines[1]\n layout['subtitle2_height'] = CHART_SUBTITLE_HEIGHT\n layout['x_axis_height'] = CHART_X_AXIS_HEIGHT\n layout['y_axis_width'] = CHART_Y_AXIS_WIDTH\n layout['chart_area_height'] = (\n self.height - layout['x_axis_height'] - layout['title_height'] -\n layout['subtitle1_height'] - layout['subtitle2_height'] -\n 2 * CHART_MARGIN)\n\n # Compute width\n layout['chart_area_width'] = self.width\n if has_legend:\n max_name_len = max([len(x['name']) for x in plot_data['plot_data']])\n layout['legend_width'] = max_name_len * CHART_LEGEND_CHAR_WIDTH\n else:\n layout['legend_width'] = 0\n\n layout['width'] = (\n layout['chart_area_width'] + layout['y_axis_width'] + 2 * CHART_MARGIN +\n layout['legend_width'])\n if self.max_total_width and layout['width'] > self.max_total_width:\n layout['width'] = self.max_total_width\n layout['legend_width'] = (\n layout['width'] - layout['chart_area_width'] -\n layout['y_axis_width'] - 2 * CHART_MARGIN)\n\n layout['y_ratio'] = (layout['chart_area_height'] /\n (plot_data['y_max'] - plot_data['y_min']))\n layout['x_ratio'] = (layout['chart_area_width'] /\n (plot_data['x_max'] - plot_data['x_min']))\n\n # TODO(boxu): Add more tick marks to the axes\n x_axis_data = [plot_data['x_min'], plot_data['x_max']]\n layout['x_axis_ticks'] = x_axis_data\n y_axis_data = [plot_data['y_min'], plot_data['y_max']]\n layout['y_axis_ticks'] = y_axis_data\n return layout\n","sub_path":"server/models/datachart_handler.py","file_name":"datachart_handler.py","file_ext":"py","file_size_in_byte":15027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"66399700","text":"class Fib(object):\n \"\"\"iterator that yields numbers in the Fibonacci sequence\"\"\"\n def __init__(self, max):\n self.max = max\n\n def __iter__(self):\n self.a = 0\n self.b = 1\n return self\n\n def __next__(self):\n fib = self.a\n if fib > self.max:\n raise StopIteration\n self.a, self.b = self.b, self.a + self.b\n return fib\n\n\n \n# THe first argument of every class method, including the __init__() method, is always a reference to the\n# current instance of the class. \n\n# By convention, this argument is named self.\n\n\n# In Python, simply call a class as if it were a function to create a new instance of the class.\n# There's no explicit new operator like there is in C++ or Java.\nimport fibonacci2\n\nfib = fibonacci2.Fib(100) # fib is now an instance of the Fib class\n\nfib.__class__\n\n","sub_path":"DiveIntoPy3/fibonacci2.py","file_name":"fibonacci2.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"547455131","text":"from controls import *\nimport mysql.connector\n#pip install mysql-connector\nmydb = mysql.connector.connect(\nhost=\"localhost\",\nuser=\"root\", #username par defaut\npasswd=\"\", #password par defaut\ndatabase=\"gestionrdv\"\n)\n#block de départ avant la classe\n\nclass rdv:\n #bloc de constructeur== BD\n def __init__(self,date,lieu,description,duree,organisation):\n self.date=date\n self.lieu=lieu\n self.description=description \n self.duree=duree\n self.organisation=organisation\n\n\n def save(self):\n mycursor = mydb.cursor()\n sql = \"INSERT INTO `rdv`( `date`, `lieu`, `description`, `duree`, `organisateur`) VALUES (%s, %s,%s,%s,%s)\"\n val = (self.date,self.lieu,self.description,self.duree,self.organisation)\n mycursor.execute(sql, val)\n mydb.commit()\n print(mycursor.rowcount, \"record inserted.\")\n\nrd=rdv(\"25/11/2021\",\"gabes\",\"aucun\",30,\"personel\")\nrd.save()\n","sub_path":"qt/tpqt/rdv.py","file_name":"rdv.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"241701085","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# NOTE: 現時点で使われていない\n\nimport os\nfrom datetime import datetime\n\nfrom influxdb import InfluxDBClient\n\nINFLUXDB_ADDR = \"192.168.0.10\"\nINFLUXDB_PORT = 8086\nINFLUXDB_DB = \"sensor\"\n\nINFLUXDB_QUERY1 = \"\"\" SELECT mean(\"touchpad\") FROM \"sensor.esp32\"\nWHERE (\"hostname\" = \\'ESP32-raindrop\\') AND time >= now() - 1h GROUP\nBY time(5m) fill(previous) ORDER by time desc LIMIT 10 \"\"\"\n\nINFLUXDB_QUERY2 = \"\"\" SELECT sum(\"rain\") FROM \"sensor.esp32\" WHERE\n(\"hostname\" = \\'ESP32-rain\\') AND time >= now() - 2d GROUP BY\ntime(12h) fill(0) ORDER by time desc LIMIT 10 \"\"\"\n\nWET_THRESHOLD1 = 370\nWET_THRESHOLD2 = 0.5\n\n\ndef is_soil_wet_1():\n try:\n client = InfluxDBClient(host=INFLUXDB_ADDR, port=INFLUXDB_PORT, database=INFLUXDB_DB)\n result = client.query(INFLUXDB_QUERY1)\n\n points = list(filter(lambda x: x is not None, map(lambda x: x[\"mean\"], result.get_points())))\n\n with open(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"soilwet.log\"),\n mode=\"a\",\n ) as f:\n print(\"{} wet1 {}\".format(datetime.now(), list(points)), file=f)\n\n val = points[0]\n if val is None:\n return False\n return val < WET_THRESHOLD1\n except:\n pass\n\n return False\n\n\ndef is_soil_wet_2():\n try:\n client = InfluxDBClient(host=INFLUXDB_ADDR, port=INFLUXDB_PORT, database=INFLUXDB_DB)\n result = client.query(INFLUXDB_QUERY2)\n\n points = list(filter(lambda x: x is not None, map(lambda x: x[\"sum\"], result.get_points())))\n\n with open(\n os.path.join(os.path.dirname(os.path.abspath(__file__)), \"soilwet.log\"),\n mode=\"a\",\n ) as f:\n print(\"{} wet2 {}\".format(datetime.now(), list(points)), file=f)\n\n val = points[0]\n if val is None:\n return False\n return val > WET_THRESHOLD2\n except:\n pass\n\n return False\n\n\ndef is_soil_wet():\n return is_soil_wet_1() or is_soil_wet_2()\n\n\nif __name__ == \"__main__\":\n print(is_soil_wet())\n","sub_path":"flask/lib/weather_sensor.py","file_name":"weather_sensor.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"633634866","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt import JWT, jwt_required\nfrom models.item import ItemModel\n\nclass Item(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument(\n \"price\",\n type=float,\n required=True,\n help=\"This field can not be left blank\"\n )\n parser.add_argument(\n \"store_id\",\n type=int,\n required=True,\n help=\"This field can not be left blank\"\n )\n\n\n @jwt_required()\n def get(self, name):\n item = ItemModel.find_by_name(name)\n if item is not None:\n return item.json(), 200\n else:\n return {\"message\": \"Item has not been found\"}, 404\n\n def post(self, name):\n item = ItemModel.find_by_name(name)\n if item is not None:\n return \"The item {} already exists\".format(name), 400\n else:\n data = Item.parser.parse_args()\n item = ItemModel(name=name, price=data['price'], store_id=data['store_id'])\n try:\n item.save_item_to_db()\n return item.json(), 201\n except:\n return {\"message\": \"An error occured\"}, 500\n\n\n @jwt_required()\n def delete(self, name):\n item = ItemModel.find_by_name(name)\n if item is not None:\n item.delete_item_from_db()\n return {\"message\": \"item deleted\"}\n else:\n return {\"message\": \"item does not exist\"}, 404\n\n\n def put(self, name):\n data = Item.parser.parse_args()\n item = ItemModel.find_by_name(name)\n if item is None:\n item = ItemModel(name, price=data['price'], store_id=data['store_id'])\n else:\n item.price = data['price']\n item.store_id = data['store_id']\n item.save_item_to_db()\n return item.json()\n\n\nclass ItemList(Resource):\n @jwt_required()\n def get(self):\n return {'items': [item.json() for item in ItemModel.query.all()]}","sub_path":"resources/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"602104633","text":"'''\nIn a row of seats, 1 represents a person sitting in that seat, and 0 represents that the seat is empty. \nThere is at least one empty seat, and at least one person sitting.\nAlex wants to sit in the seat such that the distance between him and the closest person to him is maximized. \nReturn that maximum distance to closest person.\n\nExample 1:\nInput: [1,0,0,0,1,0,1]\nOutput: 2\nExplanation: \nIf Alex sits in the second open seat (seats[2]), then the closest person has distance 2.\nIf Alex sits in any other open seat, the closest person has distance 1.\nThus, the maximum distance to the closest person is 2.\nExample 2:\nInput: [1,0,0,0]\nOutput: 3\nExplanation: \nIf Alex sits in the last seat, the closest person is 3 seats away.\nThis is the maximum distance possible, so the answer is 3.\nNote:\n1.1 <= seats.length <= 20000\n2.seats contains only 0s or 1s, at least one 0, and at least one 1.\n'''\n\n__date__ = '2018-7-18'\n\n# way 1\nclass Solution_1(object):\n def maxDistToClosest(self, seats):\n \"\"\"\n :type seats: List[int]\n :rtype: int\n \"\"\"\n # 对于两边都有同学时,得到最大连续空座,计算距离时需要加一除二取整\n count, res = 0, 0\n for seat in seats:\n if seat == 0:\n count += 1\n else:\n res = max(res, count)\n count = 0\n res = (res + 1) // 2\n # 第一个座位是空座时,最大距离为从左至右遇到第一个同学\n count = 0\n if seats[0] == 0:\n for seat in seats:\n if seat == 0:\n count += 1\n else:\n break\n res = max(res, count)\n # 最后一个座位是空座时,最大距离为从右至左遇到第一个同学\n count = 0\n if seats[-1] == 0:\n for seat in seats[::-1]:\n if seat == 0:\n count += 1\n else:\n break\n res = max(res, count)\n return res\n\n# way 2\nclass Solution_2(object):\n def maxDistToClosest(self, seats):\n \"\"\"\n :type seats: List[int]\n :rtype: int\n \"\"\"\n total = 0\n # li数组记录每段连续空座的数目,当有人时记零\n li = []\n for i in seats:\n if i == 0:\n total += 1\n elif i == 1:\n li.append(total)\n total = 0\n li.append(total)\n # 除去li第一个元素和最后一个元素不用处理\n for i in range(1, len(li)-1):\n li[i] = li[i] - li[i] // 2\n return max(li)","sub_path":"849. Maximize Distance to Closest Person.py","file_name":"849. Maximize Distance to Closest Person.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"220704068","text":"from lxml import etree\nimport requests\nimport json\n\nurl=\"http://eip.megmeet.com:8008/sys/task/sys_task_main/sysTaskIndex.do?\"\nmaxpage=5\nheaders={\n\n \"Cookie\": \"j_lang=zh-CN; JSESSIONID=A03ACE72F5C69DE72FB4A7AB78C8C420\",\n \"User-Agent\":\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\"\n}\n\ndef get_onepage(page):\n data = {\n \"method\": \"list\",\n \"q.mydoc\": \"all\",\n \"q.j_path\": \"/listAll\",\n \"q.s_raq\": \"0.6779489634474305\",\n \"pageno\": page,\n \"rowsize\": \"30\",\n \"orderby\": \"docCreateTime\",\n \"ordertype\": \"down\",\n \"s_ajax\": \"true\"\n }\n text=requests.get(url,headers=headers,params=data).text\n #print(type(text),text)\n jsons=json.loads(text)\n results=jsons.get('datas')\n for result in results:\n html=result[1]['value']\n html=etree.HTML(html)\n title=html.xpath('//span/text()')\n print(title)\n #print(html)\n\nif __name__ == '__main__':\n for page in range(1,maxpage+1):\n get_onepage(page)\n print(\"第{0}页加载完成!\".format(page))\n","sub_path":"EIP/EIP系统所有任务.py","file_name":"EIP系统所有任务.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"178699361","text":"#Python program to implement Vigener Cipher\n#For explaination refer to GeeksforGeeks\n\n\n# Dictionary to lookup the index of alphabets\ndict1 = {'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4,\n 'F': 5, 'G': 6, 'H': 7, 'I': 8, 'J': 9,\n 'K': 10, 'L': 11, 'M': 12, 'N': 13, 'O': 14,\n 'P': 15, 'Q': 16, 'R': 17, 'S': 18, 'T': 19,\n 'U': 20, 'V': 21, 'W': 22, 'X': 23, 'Y': 24, 'Z': 25}\n\ndict2={0:'A',1:'B',2:'C',3:'D',4:'E',\n 5:'F',6:'G',7:'H',8:'I',9:'J',\n 10:'K',11:'L',12:'M',13:'N',14:'O',\n 15:'P',16:'Q',17:'R',18:'S',19:'T',\n 20:'U',21:'V',22:'W',23:'X',24:'Y',25:'Z'}\n\n\n#This function generates the key in\n#a cyclic manner until it's length isi'nt\n#equal to the length of original text\ndef generate_key(message,key):\n\tx=len(message)\n\ti=0\n\twhile True:\n\t\tif x==i:\n\t\t\ti=0\n\t\tif len(key)==len(message):\n\t\t\tbreak\n\t\tkey+=key[i]\n\t\ti+=1\n\treturn key\n\n#This function returns the encrypted text\n#generated with the help of the key\ndef cipherText(message,key_new):\n\tcipher_text=''\n\ti=0\n\tfor letter in message:\n\t\tif letter==' ':\n\t\t\tcipher_text+=' '\n\t\telse:\n\t\t\tx=(dict1[letter]+dict1[key_new[i]])%26\n\t\t\ti+=1\n\t\t\tcipher_text+=dict2[x]\n\treturn cipher_text\n\n#This function decrypts the encrypted text\n#and returns the original text\ndef originalText(cipher_text,key_new):\n\tor_txt=''\n\ti=0\n\tfor letter in cipher_text:\n\t\tif letter==' ':\n\t\t\tor_txt+=' '\n\t\telse:\n\t\t\tx=(dict1[letter]-dict1[key_new[i]]+26)%26\n\t\t\ti+=1\n\t\t\tor_txt+=dict2[x]\n\treturn or_txt\n\ndef main():\n\tmessage='THE GERMAN ATTACK'\n\tkey='SECRET'\n\tkey_new=generate_key(message,key)\n\tcipher_text=cipherText(message,key_new)\n\toriginal_text=originalText(cipher_text,key_new)\n\tprint(\"Encrypted Text = \"+cipher_text)\n\tprint(\"Original Text = \"+original_text)\n\n#Executes the main function\nif __name__ == '__main__':\n\tmain()","sub_path":"vigenere_cipher.py","file_name":"vigenere_cipher.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"316576315","text":"# -*- coding:utf-8 -*-\n\"\"\"\nDescription:\n Contract class in neo.Wallets\n Base class of all contracts\nUsage:\n from neo.Wallets.Contract import Contract\n\"\"\"\nfrom io import BytesIO,BufferedReader,BufferedWriter\nfrom neo.VM.OpCode import *\nfrom neo.VM.ScriptBuilder import ScriptBuilder\nfrom neo.Cryptography.Crypto import *\nfrom neo.IO.Mixins import SerializableMixin\nfrom neo.Wallets.ContractParameterType import ContractParameterType\nfrom neo.Cryptography.Helper import *\nfrom autologging import logged\n\n@logged\nclass Contract(SerializableMixin):\n \"\"\"docstring for Contract\"\"\"\n\n RedeemScript=None\n ParameterList = None\n PubKeyHash = None\n ScriptHash = None\n\n def __init__(self, redeem_script, param_list, pubkey_hash, script_hash):\n super(Contract, self).__init__()\n\n self.RedeemScript = redeem_script\n self.ParameterList = param_list\n self.PubKeyHash = pubkey_hash\n self.ScriptHash = script_hash\n\n\n @staticmethod\n def Create(publicKeyHash, parameterList, redeemScript):\n\n return Contract(redeemScript, parameterList, publicKeyHash, Contract.RedeemToScripthash(redeemScript))\n\n\n\n @staticmethod\n def CreateMultiSigContract(publickKeyHash, m, publicKeys):\n# raise NotImplementedError()\n pass\n\n @staticmethod\n def CreateMultiSigRedeemScript(m, publicKeys):\n # raise NotImplementedError()\n\n if m < 2 or m > len(publicKeys) or len(publicKeys) > 1024:\n raise Exception('Invalid keys')\n\n sb = ScriptBuilder()\n sb.push(m)\n\n\n pkeys = [point for point in publicKeys]\n pkeys.sort()\n keys = [p.encode_point().decode() for p in pkeys]\n\n #for now we dont\n for key in keys:\n sb.push(key)\n\n sb.push(len(publicKeys))\n sb.add(CHECKMULTISIG)\n\n toarray = sb.ToArray()\n tastr = toarray.decode('utf8')\n return toarray\n\n @staticmethod\n def CreateSignatureContract(publicKey):\n result = Contract.RedeemToScripthash(Contract.PubkeyToRedeem(publicKey))\n return Contract.Create(result, [ContractParameterType.Signature], Contract.CreateSignatureRedeemScript(publicKey))\n\n @staticmethod\n def CreateSignatureRedeemScript(publicKey):\n sb = ScriptBuilder()\n sb.push(publicKey)\n sb.add(CHECKSIG)\n return sb.ToArray()\n\n def Equals(self, other):\n if id(self) == id(other):\n return True\n if not isinstance(other, Contract):\n return False\n return self.ScriptHash == other.ScriptHash\n\n def GetAddress(self):\n # TODO\n raise NotImplementedError()\n\n def GetHashCode(self):\n if self.ScriptHash == None:\n self.ScriptHash = Contract.RedeemToScripthash(self.RedeemScript)\n return self.ScriptHash\n\n def ToScriptHash(self):\n return Crypto.Hash160(self.ScriptHash)\n\n def IsStandard(self):\n if len(self.RedeemScript) / 2 != 35:\n return False\n array = self.RedeemScript[:]\n if array[:2] != '21' or array[-2:] != 'ac':\n return False\n return True\n\n def Serialize(self, writer):\n writer.WriteBytes(self.ScriptHash)\n writer.WriteBytes(self.PubKeyHash)\n writer.WriteVarBytes(self.ParameterList) # TODO need check\n writer.WriteVarBytes(self.RedeemScript)\n\n def Deserialize(self, reader):\n raise NotImplementedError()\n\n @staticmethod\n def PubkeyToRedeem(pubkey):\n return binascii.unhexlify('21'+ pubkey) + from_int_to_byte(int('ac',16))\n\n @staticmethod\n def RedeemToScripthash(redeem):\n return binascii.hexlify(bin_hash160(redeem))\n","sub_path":"neo/Wallets/Contract.py","file_name":"Contract.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"573128134","text":"import subprocess\nfrom threading import Thread\n\nfrom lemon_pi.car.display_providers import (\n TemperatureProvider,\n LapProvider,\n FuelProvider\n)\nfrom lemon_pi.car.event_defs import (\n LeaveTrackEvent,\n RadioSyncEvent,\n DriverMessageEvent,\n DriverMessageAddendumEvent,\n RaceFlagStatusEvent,\n LapInfoEvent,\n RadioReceiveEvent,\n RefuelEvent,\n ExitApplicationEvent, RacePositionEvent\n)\nfrom lemon_pi.shared.events import EventHandler\nfrom lemon_pi.shared.generated.messages_pb2 import (\n RaceStatus,\n DriverMessage,\n Ping,\n RacePosition,\n RaceFlagStatus,\n SetFuelLevel,\n ToPitMessage, RemoteReboot)\n\nfrom python_settings import settings\n\nimport logging\n\nfrom lemon_pi.shared.radio import Radio\n\nlogger = logging.getLogger(__name__)\n\n\n# an adapter class that gets radio events and then sends them onto the radio.\n# This is a thread, because it listens for messages coming in from the radio, and we do\n# not want to use the radio control thread for processing the messages. We need the radio\n# control thread to be back controlling the radio\n\nclass RadioInterface(Thread, EventHandler):\n\n def __init__(self, radio:Radio,\n temp_provider:TemperatureProvider,\n lap_provider:LapProvider,\n fuel_provider:FuelProvider):\n Thread.__init__(self)\n self.radio = radio\n self.temp_provider = temp_provider\n self.lap_provider = lap_provider\n self.gps_provider = None\n self.fuel_provider = fuel_provider\n RadioSyncEvent.register_handler(self)\n LeaveTrackEvent.register_handler(self)\n\n def register_lap_provider(self, lap_provider):\n self.lap_provider = lap_provider\n\n def register_gps_provider(self, gps):\n self.gps_provider = gps\n\n def handle_event(self, event, **kwargs):\n if event == RadioSyncEvent:\n msg = ToPitMessage()\n msg.telemetry.coolant_temp = self.temp_provider.get_temp_f()\n msg.telemetry.last_lap_time = self.lap_provider.get_last_lap_time()\n msg.telemetry.lap_count = self.lap_provider.get_lap_count()\n msg.telemetry.last_lap_fuel_usage = self.fuel_provider.get_fuel_used_last_lap_ml()\n msg.telemetry.fuel_remaining_percent = self.fuel_provider.get_fuel_percent_remaining()\n # we send the event asynchronously, because the radio can take multiple seconds\n # to transmit, so there is no guarantee that this message will be sent exactly now\n self.radio.send_async(msg)\n if event == LeaveTrackEvent:\n msg = ToPitMessage()\n msg.pitting.timestamp = 1\n self.radio.send_async(msg)\n\n def run(self):\n while True:\n try:\n msg = self.radio.receive_queue.get()\n self.process_incoming(msg)\n self.radio.receive_queue.task_done()\n except Exception:\n logger.exception(\"got an exception in radio_interface\")\n\n def process_incoming(self, msg):\n RadioReceiveEvent.emit()\n if type(msg) == RaceStatus:\n logger.info(\"got race status message...{}\".format(msg))\n RaceFlagStatusEvent.emit(flag=RaceFlagStatus.Name(msg.flag_status))\n if msg.flag_status == RaceFlagStatus.RED:\n DriverMessageEvent.emit(text=\"Race Red Flagged\", duration_secs=10)\n if msg.flag_status == RaceFlagStatus.BLACK:\n DriverMessageEvent.emit(text=\"Race Black Flagged\", duration_secs=10)\n if msg.flag_status == RaceFlagStatus.YELLOW:\n DriverMessageEvent.emit(text=\"Course Yellow\", duration_secs=10)\n elif type(msg) == DriverMessage:\n logger.info(\"got race driver message...{}\".format(msg))\n # for a multi-car team we only want to show the message to the car it\n # was intended for\n if msg.car_number == settings.CAR_NUMBER:\n DriverMessageEvent.emit(text=msg.text, duration_secs=30)\n elif type(msg) == Ping:\n logger.info(\"got ping message...{}\".format(msg))\n elif type(msg) == RacePosition:\n logger.info(\"got race position message...{}\".format(msg))\n # is this about us directly?\n if msg.car_number == settings.CAR_NUMBER:\n RacePositionEvent.emit(pos=msg.position,\n pos_in_class=msg.position_in_class,\n car_ahead=msg.car_ahead.car_number,\n gap=msg.car_ahead.gap_text)\n position_text = self.format_position(msg)\n if msg.car_ahead.car_number:\n text = \"{} ▲ #{} by {}\".format(position_text, msg.car_ahead.car_number, msg.car_ahead.gap_text)\n DriverMessageEvent.emit(text=text, duration_secs=120)\n else:\n # we're in the lead, there's no-one ahead\n text = \"P1\"\n DriverMessageEvent.emit(text=text, duration_secs=120)\n LapInfoEvent.emit(lap_count=msg.lap_count, ts=msg.timestamp)\n else:\n # this might be the following car behind us ... it might also be for a different car in our team\n if msg.car_ahead and msg.car_ahead.car_number == settings.CAR_NUMBER:\n text = \" ▼ #{} by {}\".format(msg.car_number, msg.car_ahead.gap_text)\n DriverMessageAddendumEvent.emit(text=text)\n # now that this message also contains the race flag status we can emit it\n # unlike the similar message above this does not mean that the status has changed\n # it's more for corrective purposes, so the display doesn't get stuck in a bad\n # state if a flag message is missed\n RaceFlagStatusEvent.emit(flag=RaceFlagStatus.Name(msg.flag_status))\n elif type(msg) == SetFuelLevel:\n logger.info(\"got fuel level adjustment...{}\".format(msg))\n # for a multi-car team we only want to show the message to the car it\n # was intended for\n if msg.car_number != settings.CAR_NUMBER:\n logger.info(\"it's not for me, ignoring\")\n return\n if msg.percent_full == 0:\n RefuelEvent.emit(percent_full=100)\n else:\n RefuelEvent.emit(percent_full=msg.percent_full)\n elif type(msg) == RemoteReboot:\n # for a multi-car team we only want to show the message to the car it\n # was intended for\n if msg.car_number != settings.CAR_NUMBER:\n logger.info(\"it's not for me, ignoring\")\n return\n logger.info(\"got remote reboot going down\".format(msg))\n ExitApplicationEvent.emit()\n logger.info(\"told system to shut down ... now rebooting lemon-pi\")\n subprocess.run(['sudo', 'reboot', 'now'])\n logger.info(\"goodbye, cruel world...\")\n else:\n logger.info(\"got unexpected message : {}\".format(type(msg)))\n\n def format_position(self, msg: RacePosition):\n if msg.position_in_class > 0 and msg.position_in_class != msg.position:\n return \"P{} ({})\".format(msg.position, msg.position_in_class)\n return \"P{}\".format(msg.position)\n\n\n\n\n\n\n","sub_path":"lemon_pi/car/radio_interface.py","file_name":"radio_interface.py","file_ext":"py","file_size_in_byte":7380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"541192080","text":"from datetime import datetime\n\nfrom django import forms\nfrom django.forms import ImageField, ModelForm, Textarea\nfrom django.forms.models import ModelForm\nfrom django.forms.widgets import DateInput, NumberInput, Select, TextInput\nfrom django.utils.translation import gettext_lazy as _\n\nfrom donations.models import *\nfrom donations.models import Feedback\nfrom donations.widgets import AdvancedFileInput\n\n\ndef validate_past_date(value):\n if value < datetime.now().date():\n raise forms.ValidationError(\n 'วันที่เลือกต้องเป็นวันหลังวันปัจจุบัน'\n )\n\nclass DonationForm(ModelForm):\n class Meta:\n model = Donation\n fields = ['name', 'dtype', 'condition', 'desc', 'quantity']\n\n labels = {\n 'name': _('ชื่อสิ่งของบริจาค'),\n 'dtype': _('ประเภท'),\n 'condition': _('สภาพ'),\n 'desc': _('คำอธิบายเกี่ยวกับสิ่งของบริจาค'),\n 'quantity': _('จำนวน'),\n }\n widgets = {\n 'name': TextInput(attrs={'class':'form-control'}),\n 'dtype': Select(attrs={'class':'form-control'}),\n 'condition': Select(attrs={'class':'form-control'}),\n 'quantity': NumberInput(attrs={'class':'form-control'}),\n 'desc': Textarea(attrs={'cols': 40, 'rows': 5 ,'class':'form-control'}),\n }\n\nclass CreateProjectForm(ModelForm):\n expire_date = forms.DateField(label='วันสิ้น���ุดการรับ', widget=forms.DateTimeInput(attrs={'class':'form-control', 'type':'date'}),\n validators=[validate_past_date]\n )\n requiretype = forms.ModelMultipleChoiceField(label='สิ่งที่เปิดรับ',\n widget = forms.CheckboxSelectMultiple(attrs={'class':'form-check-control'}),\n queryset = RequireType.objects.all()\n )\n\n class Meta:\n model = Project\n exclude = ('recipient','location','status','album')\n labels = {\n 'name': _('ชื่อโครงการ'),\n 'desc': _('รายละเอียดโครงการ'),\n 'requiretype': _('การบริจาคที่รองรับ'),\n 'propose': _('จุดประสงค์โครงการ'),\n 'helping_people': _('จำนวนคนที่จะได้รับการช่วยเหลือ'),\n 'address': _('ที่อยู่ของผู้จะได้รับบริจาค'),\n }\n widgets = {\n 'name': TextInput(attrs={'class':'form-control'}),\n 'propose': TextInput(attrs={'class':'form-control'}),\n 'helping_people': NumberInput(attrs={'class':'form-control'}),\n 'address': Textarea(attrs={'cols': 40, 'rows': 5 , 'class':'form-control'}),\n 'desc': Textarea(attrs={'cols': 40, 'rows': 17 , 'class':'form-control'}),\n }\n\nclass FeedbackForm(ModelForm):\n class Meta:\n model = Feedback\n exclude = ['album', 'sender', 'sent_date', 'donation', 'location']\n labels = {\n 'header': _('หัวเรื่อง'),\n 'detail': _('เนื้อความ')\n }\n widgets = {\n 'header': TextInput(attrs={'class':'form-control'}),\n 'detail': Textarea(attrs={'cols': 40, 'rows': 5 ,'class':'form-control'}),\n }\n","sub_path":"IODT/donations/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"542109267","text":"import Constants as Cn\n\n\nclass Frames(object):\n \"\"\" Creates an array of integers. Represents a second of video frames.\n Attributes:\n Integer drops: Number of video frames dropped\n Integer errors: Number of decode errors\n Boolean complete: True when a second's worth of video frames has been appended\n Integer last_int: Last valid video frame appended\n Integer frames_per_second = Number of frames in one second of video\n Integer[] second = For each frame, indicates if frame was dropped, had decode error, or was valid\n \"\"\"\n\n def __init__(self, fps=30):\n self.drops = 0\n self.errors = 0\n self.complete = False\n self.last_int = -1\n self.frames_per_second = fps\n self.second = [Cn.DROP] * self.frames_per_second\n\n def append(self, frame):\n \"\"\" Appends a frame to the seconds array \"\"\"\n try:\n int_frame = int(frame)\n if self.last_int > int_frame:\n self.complete = True\n self._find_dropped_frames()\n else:\n self.second[int_frame] = int_frame\n self.last_int = int_frame\n except ValueError:\n try:\n self.last_int += 1\n self.second[self.last_int] = Cn.ERROR\n self.errors += 1\n except IndexError:\n self.complete = True\n self._find_dropped_frames()\n except IndexError:\n raise IndexError('Max value for frame is {}, received {}'.format(self.frames_per_second, frame))\n\n def _find_dropped_frames(self):\n \"\"\" Returns number of dropped frames in seconds array \"\"\"\n self.drops = self.second.count(Cn.DROP)\n","sub_path":"Sorter/Frames.py","file_name":"Frames.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"341516634","text":"import os\nimport pickle\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom taggers.svm_tagger import SVMTagger\nfrom config import SVMConfig\nfrom corpora.corpus import Utterance\nfrom typing import List\nimport json\n\nfrom .trainer import Trainer\nfrom pathlib import Path\nimport logging\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(\"ISO_DA\")\n\n\nclass SVMTrainer(Trainer):\n def __init__(self, config: SVMConfig):\n Trainer.__init__(self, config, config.taxonomy)\n for c in config.corpora_list:\n try:\n self.corpora.append(c[0](c[1], config.taxonomy))\n except Exception as e:\n logger.warning(f\"Corpus {c[0]} not loaded. {e}\")\n\n @staticmethod\n def train_pipeline(config: SVMConfig, dataset: List[Utterance]):\n if all(len(u.tags) == 1 for u in dataset) and all(u.tags[0] == dataset[0].tags[0] for u in dataset):\n logger.warning(f\"The only tag available for this classifier is {dataset[0].tags[0]}.\"\n \"The classifier will still be trained, but it won't recognise any other labels.\"\n \"Please provide additional data to obtain a working classifier. You can check README.md \"\n \"for information on how to obtain more data\")\n for _ in range(0, 3):\n dataset.append(Utterance(text=\"<>\", context=[], tags=[\"<>\"], speaker_id=0))\n features = SVMTagger.build_features(dataset, config)\n train_pipeline = Pipeline([\n # Use FeatureUnion to combine the features from wordcount and labels\n ('union', FeatureUnion(\n transformer_list=[('feature_' + str(i), pipeline) for i, pipeline in enumerate(features[1])]\n )),\n # Use a SVC classifier on the combined features\n ('classifier', config.classifier)\n ])\n if len(dataset) == 0:\n logger.error(f\"Not enough data to train the classifier! Please check README.md for \"\n f\"more information on how to obtain more data\")\n return\n train_pipeline.fit(features[0], [u.tags for u in dataset])\n for _ in range(1, 4):\n del(dataset[-1])\n return train_pipeline\n\n def dump_model(self, pipelines: dict):\n # Create directory\n path = Path(os.path.dirname(self.config.out_folder))\n print(f\"creating {self.config.out_folder}\")\n path.mkdir(parents=True, exist_ok=True)\n\n # Save the config file\n with open(f\"{self.config.out_folder}/config.json\", \"w\") as f:\n json.dump(self.config.to_dict(), f, indent=4)\n\n # Save the pipelines\n for pipeline in pipelines.keys():\n pickle.dump(pipelines[pipeline], open(f\"{self.config.out_folder}/{pipeline}\", 'wb'))\n return\n\n def train(self, dump=True):\n logger.info(f\"Training Dialogue Act Tagger for {self.config.taxonomy} taxonomy, using the following corpora:\"\n f\"{[c.name for c in self.corpora]}\")\n dataset = []\n pipelines = {}\n\n for corpus in self.corpora:\n dataset = dataset + corpus.utterances\n if \"dimension\" in self.config.taxonomy.value.__annotations__.keys():\n # Train dimension tagger\n logger.info(\"Training dimension pipeline\")\n dimension_dataset = SVMTagger.stringify_tags(dataset, \"dimension\")\n pipelines['dimension'] = self.train_pipeline(self.config, dimension_dataset)\n\n # Train a comm-function classifier for each dimension\n dimension_labels = [\n [tag for tag in utt.tags]\n for utt in dimension_dataset\n ]\n dimension_values = list(set([label for tagset in dimension_labels for label in tagset]))\n for dimension_value in dimension_values:\n logger.info(f\"Training communication function pipeline for dimension {dimension_value}\")\n comm_dataset = SVMTagger.stringify_tags(dataset, \"comm_function\",\n filter_attr=\"dimension\", filter_value=dimension_value)\n pipelines[f'comm_{dimension_value}'] = self.train_pipeline(self.config, comm_dataset)\n else:\n logger.info(\"Training unified communication function pipeline\")\n comm_dataset = SVMTagger.stringify_tags(dataset, \"comm_function\")\n pipelines['comm_all'] = self.train_pipeline(self.config, comm_dataset)\n self.config.pipeline_files = list(pipelines.keys())\n if dump:\n self.dump_model(pipelines)\n return SVMTagger(self.config)\n\n","sub_path":"trainers/svm_trainer.py","file_name":"svm_trainer.py","file_ext":"py","file_size_in_byte":4693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"598443947","text":"\"\"\"\r\n@author: J.W.Spaak\r\nExample how to compute the ND and FD for a given differential equation setting\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\ntry:\r\n from numerical_NFD import NFD_model\r\nexcept ImportError:\r\n # in case this code is used in a submodule, import from the submodule\r\n from nfd_definitions.numerical_NFD import NFD_model\r\n\r\n# create the differential equation system\r\nn_spec = 2 # number of species in the system\r\nnp.random.seed(6) # set random seed for reproduce ability\r\n\r\n# Lotka-Volterra model\r\nA = np.random.uniform(0,1,(n_spec,n_spec)) # interaction matrix\r\nnp.fill_diagonal(A,np.random.uniform(1,2,n_spec)) # to ensure coexistence\r\nmu = np.random.uniform(1,2,n_spec) # intrinsic growth rate\r\ndef test_f(N):\r\n return mu - np.dot(A,N)\r\n\r\n# compute relevant parameters with software\r\npars = NFD_model(test_f, n_spec)\r\nND, NO, FD, c = pars[\"ND\"], pars[\"NO\"], pars[\"FD\"], pars[\"c\"]\r\n# manualy check results for the two species case\r\n# see appendix for proof of correctness\r\nNO_check = np.sqrt(np.array([A[0,1]*A[1,0]/A[1,1]/A[0,0],\r\n A[0,1]*A[1,0]/A[1,1]/A[0,0]]))\r\nND_check = 1-NO\r\nFD_check = 1- mu[::-1]/mu*np.sqrt(np.array([A[0,1]*A[0,0]/A[1,0]/A[1,1],\r\n A[1,0]*A[1,1]/A[0,1]/A[0,0]]))\r\nc_check = np.sqrt(np.array([A[0,1]*A[1,1]/A[1,0]/A[0,0],\r\n A[1,0]*A[0,0]/A[0,1]/A[1,1]]))\r\n\r\n# precision of output\r\nprec = 4\r\nprint(\"Results of two species case:\\n\")\r\nprint(\"\\t Software\\t\\t Manual check\\t\\t Rel. Difference\\n\")\r\nprint(\"ND:\\t\", np.round(ND,prec), \"\\t\", np.round(ND_check,prec),\r\n \"\\t\", np.abs(ND-ND_check)/ND_check)\r\nprint(\"NO:\\t\", np.round(NO,prec), \"\\t\", np.round(NO_check,prec),\r\n \"\\t\", np.abs(NO-NO_check)/NO_check)\r\nprint(\"FD:\\t\", np.round(FD,prec), \"\\t\", np.round(FD_check,prec),\r\n \"\\t\", np.abs(FD-FD_check)/FD_check)\r\nprint(\"c:\\t\", np.round(c[[0,1],[1,0]],prec), \"\\t\", np.round(c_check,prec),\r\n \"\\t\", np.abs(c[[0,1],[1,0]]-c_check)/c_check)\r\n\r\n\r\n###############################################################################\r\n# Switching to multispecies case\r\n# create the differential equation system\r\nn_spec = 10 # nuber of species in the system\r\n\r\n# Lotka-Volterra model\r\nA = np.random.uniform(0,1,(n_spec,n_spec)) # interaction matrix\r\n\r\n# to ensure coexistence increase diagonal values\r\nnp.fill_diagonal(A,np.random.uniform(n_spec,n_spec+1,n_spec)) \r\nmu = np.random.uniform(1,2,n_spec) # intrinsic growth rate\r\ndef test_f(N, mu, A):\r\n return mu - np.dot(A,N)\r\n\r\n# how to pass additional arguments to NFD_model\r\npars = NFD_model(test_f, n_spec, args = (mu, A))\r\nND_m, NO_m, FD_m, c_m = pars[\"ND\"], pars[\"NO\"], pars[\"FD\"], pars[\"c\"]\r\n\r\nNO_check_m = np.empty(n_spec)\r\nFD_check_m = np.empty(n_spec)\r\nfor i in range(n_spec):\r\n denominator = 0\r\n numerator = 0\r\n for j in range(n_spec):\r\n if i==j:\r\n continue\r\n numerator += pars[\"N_star\"][i,j]*A[i,j]\r\n denominator += pars[\"N_star\"][i,j]*np.sqrt(A[i,j]/A[j,i]*A[i,i]*A[j,j])\r\n NO_check_m[i] = numerator/denominator\r\n FD_check_m[i] = 1-denominator/mu[i]\r\n \r\n# printing layout is optimized for 6 species\r\ndef print_function(var, var_check, name):\r\n rel_diff = np.round(np.abs(var-var_check)/var_check,prec)\r\n var = np.round(var,prec)\r\n var_check = np.round(var_check,prec)\r\n print(name+\":\\t\", var[:2],\"\\t\", var_check[:2], \"\\t\", rel_diff[:2])\r\n for i in range(1,len(var)//2):\r\n ind = [2*i,2*i+1]\r\n print(\"\\t\", var[ind],\"\\t\", var_check[ind], \"\\t\", rel_diff[ind])\r\n if len(var)%2==1:\r\n print(\"\\t\", var[-1],\"\\t\\t\", var_check[-1], \"\\t\\t\", rel_diff[-1])\r\n print()\r\n \r\n\r\nprint(\"\\n\\nResults of multi species case:\\n\")\r\nprint(\"\\t Software\\t\\t Manual check\\t\\t Rel. Difference\\n\")\r\nprint_function(NO_m, NO_check_m, \"NO\")\r\nprint_function(FD_m, FD_check_m, \"FD\")\r\n","sub_path":"Example,compute NFD.py","file_name":"Example,compute NFD.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"636825303","text":"\nfrom lxml import etree\nfrom cStringIO import StringIO\nfrom urllib import urlopen\nfrom gzip import GzipFile\nimport os\nimport re\nimport sys\n\nfrom exceptions import *\nfrom urlsetelement import *\n\nclass UrlSet(object):\n \"\"\"\n UrlSet urlset structure\n\n Lazy loading of an urlset from a sitemap.\n \"\"\"\n\n @staticmethod\n def from_url(url, **kwargs):\n \"\"\" Create an urlset from an url \"\"\"\n u = urlopen(url)\n if u.headers.has_key(\"content-type\") and u.headers[\"content-type\"].lower() == \"application/x-gzip\":\n u = GzipFile(fileobj=StringIO(u.read()))\n return UrlSet(u, url, **kwargs)\n\n @staticmethod\n def from_file(file, **kwargs):\n \"\"\" Create an urlset from a file \"\"\"\n return UrlSet(open(file), file, **kwargs)\n\n @staticmethod\n def from_str(str, **kwargs):\n \"\"\" Create an urlset from a string \"\"\"\n return UrlSet(StringIO(str), 'string', **kwargs)\n\n @staticmethod\n def empty_container():\n \"\"\" Create an empty urlset container. Use this for constructing a sitemap \"\"\"\n return UrlSet()\n\n source = property(lambda self:self._source)\n\n def __init__(self,handle=None, source='handle', validate=True):\n \"\"\" Create an urlset from any kinf of File like object \"\"\"\n self._source = source\n self._handle = handle\n self._validate = validate\n self._elements = []\n\n def append(self, urlsetelement):\n if self._handle:\n raise Exception(\"You can append only to a container. \" + \\\n \" This urlset is binded to a handle\")\n self._elements.append(urlsetelement)\n\n def get_urls(self):\n if not self._handle:\n return self.get_urls_from_elements()\n else:\n return self.get_urls_from_handle()\n\n def get_urls_from_elements(self):\n return self._elements\n\n def get_urls_from_handle(self):\n \"\"\" Parse the xml file and generate the elements \"\"\"\n if self._validate:\n schema = etree.XMLSchema(file=open(self.get_schema_path()))\n else:\n schema = None\n context = etree.iterparse(self._handle, events=('end',), schema=schema)\n\n element_data = {}\n for action, elem in context:\n tag = self._remove_ns(elem.tag)\n if tag == 'url' and element_data:\n try:\n e = UrlSetElement(**element_data)\n yield e\n except ValueError:\n element_data = {}\n continue\n elif tag in ['loc', 'lastmod', 'changefreq', 'priority']:\n element_data[tag] = elem.text\n del context\n del schema\n\n def _remove_ns(self, str):\n return re.sub('{[^}]*}', '', str)\n\n def get_schema_path(self):\n base = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(base, 'schemas', 'sitemap.xsd')\n\n def pprint(out=sys.stdout):\n \"\"\" Preatty print an urlset as xml. Ready to be put online.\"\"\"\n # todo: implement this if you need it\n pass\n\n def __iter__(self):\n return iter(self.get_urls())\n\n","sub_path":"sitemap/urlset.py","file_name":"urlset.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"544750580","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: D:\\Projects\\usersapi\\midaxusersutils\\migrations\\versions\\7c38e4dbbeb3_.py\n# Compiled at: 2018-10-30 15:06:39\n# Size of source mod 2**32: 1585 bytes\n\"\"\"empty message\n\nRevision ID: 7c38e4dbbeb3\nRevises: 43a3450ebe27\nCreate Date: 2018-10-30 21:01:14.592773\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa, midaxusers.migration_types\nrevision = '7c38e4dbbeb3'\ndown_revision = '43a3450ebe27'\nbranch_labels = None\ndepends_on = None\n\ndef upgrade():\n with op.batch_alter_table('USER_ATTRIBUTES') as (batch_op):\n batch_op.drop_constraint('USER_ATTRIBUTES_user_uuid_fkey',\n type_='foreignkey')\n op.create_foreign_key('USER_ATTRIBUTES_user_uuid_fkey',\n 'USER_ATTRIBUTES', 'USERS', [\n 'user_uuid'],\n ['uuid'], ondelete='CASCADE', onupdate='CASCADE')\n with op.batch_alter_table('USER_LOGINS') as (batch_op):\n batch_op.drop_constraint('USER_LOGINS_user_uuid_fkey',\n type_='foreignkey')\n op.create_foreign_key('USER_LOGINS_user_uuid_fkey',\n 'USER_LOGINS', 'USERS', [\n 'user_uuid'],\n ['uuid'], ondelete='CASCADE', onupdate='CASCADE')\n\n\ndef downgrade():\n with op.batch_alter_table('USER_ATTRIBUTES') as (batch_op):\n batch_op.drop_constraint('USER_ATTRIBUTES_user_uuid_fkey',\n type_='foreignkey')\n op.create_foreign_key('USER_ATTRIBUTES_user_uuid_fkey', 'USER_ATTRIBUTES', 'USERS', [\n 'user_uuid'], ['uuid'])\n with op.batch_alter_table('USER_LOGINS') as (batch_op):\n batch_op.drop_constraint('USER_LOGINS_user_uuid_fkey',\n type_='foreignkey')\n op.create_foreign_key('USER_LOGINS_user_uuid_fkey', 'USER_LOGINS', 'USERS', [\n 'user_uuid'], ['uuid'])","sub_path":"pycfiles/midaxusers-2.0.38-py3-none-any/7c38e4dbbeb3_.cpython-36.py","file_name":"7c38e4dbbeb3_.cpython-36.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"316005027","text":"import numpy as np\nfrom scipy.interpolate import RectBivariateSpline\n\ndef LucasKanadeBasis(It, It1, rect, bases, p0 = np.zeros(2)):\n\t# Input: \n\t#\tIt: template image\n\t#\tIt1: Current image\n\t#\trect: Current position of the car\n\t#\t(top left, bot right coordinates)\n\t#\tbases: [n, m, k] where nxm is the size of the template.\n\t# Output:\n\t#\tp: movement vector [dp_x, dp_y]\n\n # Put your implementation here\n threshold = 0.05\n x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]\n Iy, Ix = np.gradient(It1)\n dp = 1\n #calculate appearance bases\n num_bases = bases.shape[2]\n #calculate BB.T\n orthobases = bases.reshape(-1,num_bases)\n bases_sum = 0\n for i in range(num_bases):\n bases_sum += orthobases[:,i] @ orthobases[:,i].T\n \n \n \n while np.square(dp).sum() > threshold:\n \n \n #warp image\n px, py = p0[0], p0[1]\n x1_w, y1_w, x2_w, y2_w = x1+px, y1+py, x2+px, y2+py\n \n x = np.arange(0, It.shape[0], 1)\n y = np.arange(0, It.shape[1], 1)\n \n c = np.linspace(x1, x2, 55)\n r = np.linspace(y1, y2, 47)\n cc, rr = np.meshgrid(c, r)\n \n cw = np.linspace(x1_w, x2_w, 55)\n rw = np.linspace(y1_w, y2_w, 47)\n ccw, rrw = np.meshgrid(cw, rw)\n \n spline = RectBivariateSpline(x, y, It)\n T = spline.ev(rr, cc)\n \n spline1 = RectBivariateSpline(x, y, It1)\n warpImg = spline1.ev(rrw, ccw)\n \n #compute error image\n err = T - warpImg\n errImg = err.reshape(-1,1) \n errImg = (1 - bases_sum) * errImg\n #compute gradient\n spline_gx = RectBivariateSpline(x, y, Ix)\n Ix_w = spline_gx.ev(rrw, ccw)\n\n spline_gy = RectBivariateSpline(x, y, Iy)\n Iy_w = spline_gy.ev(rrw, ccw)\n #I is (n,2)\n I = np.vstack((Ix_w.ravel(),Iy_w.ravel())).T\n \n #evaluate jacobian (2,2)\n jac = np.array([[1,0],[0,1]])\n \n #computer Hessian\n delta = I @ jac \n delta = (1 - bases_sum) * delta\n #H is (2,2)\n H = delta.T @ delta\n \n \n \n #compute dp\n #dp is (2,2)@(2,n)@(n,1) = (2,1)\n dp = np.linalg.inv(H) @ (delta.T) @ errImg\n \n #update parameters\n p0[0] += dp[0,0]\n p0[1] += dp[1,0]\n \n p = p0\n return p\n \n","sub_path":"src/LucasKanadeBasis.py","file_name":"LucasKanadeBasis.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"86683207","text":"from game.actions import Action\n\nclass MoveBall(Action):\n def run(self):\n x, y = self.item.center\n dx, dy = self.item.delta\n width, height = self.game.size\n radius = self.item.radius\n th = self.game.border * 2\n\n if(dx == -1 and x - radius < th): dx = 1\n if(dx == 1 and x + radius > width - th): dx = -1\n\n if(dy == -1 and y -radius < th): dy = 1\n if(dy == 1 and y + radius > height - th): dy = -1\n\n self.item.delta = (dx, dy)\n self.item.center = (x + self.game.grid * dx, y + self.game.grid * dy)\n\nclass MoveUp(Action):\n def run(self):\n x, y = self.item.corner()\n th = self.game.border * 2 + self.game.grid\n if(y >= th): \n self.item.y -= self.game.grid\n\nclass MoveDown(Action):\n def run(self):\n width, height = self.game.size\n x, y = self.item.corner()\n th = self.game.border * 2 + self.game.grid\n if(y + self.item.size <= height - th): \n self.item.y += self.game.grid\n","sub_path":"training/c32_pygame/e11_resizable/pingpong/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"439617169","text":"from base64 import b64encode\nfrom http.cookiejar import CookieJar\nfrom datetime import datetime, timezone, timedelta\nimport unittest\nimport unittest.mock\nfrom functools import partial\nfrom webtest import TestApp\nfrom zstandard import ZstdCompressor # type: ignore\nfrom penguin_judge.api import app as _app, _kdf\nfrom penguin_judge.models import (\n User, Environment, Contest, Problem, TestCase, Submission, JudgeResult,\n Token, JudgeStatus, configure, transaction)\nfrom . import TEST_DB_URL\n\napp = TestApp(_app, cookiejar=CookieJar())\n\n\nclass TestAPI(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n configure(**{'sqlalchemy.url': TEST_DB_URL}, drop_all=True)\n\n def setUp(self):\n from penguin_judge.main import _configure_app\n app.reset()\n _configure_app({})\n tables = (\n JudgeResult, Submission, TestCase, Problem, Contest, Environment,\n Token, User)\n admin_token = bytes([i for i in range(32)])\n salt = b'penguin'\n passwd = _kdf('penguinpenguin', salt)\n with transaction() as s:\n for t in tables:\n s.query(t).delete(synchronize_session=False)\n admin_user = User(\n login_id='admin', name='Administrator', salt=salt, admin=True,\n password=passwd)\n s.add(admin_user)\n s.flush()\n s.add(Token(\n token=admin_token, user_id=admin_user.id,\n expires=datetime.now(tz=timezone.utc) + timedelta(hours=1)))\n self.admin_id = admin_user.id\n self.admin_token = b64encode(admin_token).decode('ascii')\n self.admin_headers = {'X-Auth-Token': self.admin_token}\n\n def test_create_user(self):\n def _invalid(body, setup_token=True, status=400):\n headers = self.admin_headers if setup_token else {}\n app.post_json('/users', body, headers=headers,\n status=status if setup_token else 401)\n _invalid({})\n _invalid({'login_id': 'abc', 'name': 'penguin'})\n _invalid({'login_id': 'abc', 'password': 'penguinpenguin'})\n _invalid({'name': 'abc', 'password': 'penguinpenguin'})\n _invalid({'login_id': 'pe', 'name': 'ぺんぎん',\n 'password': 'penguinpenguin'})\n _invalid({'login_id': 'penguin', 'name': 'ぺんぎん',\n 'password': 'pen'})\n _invalid({'login_id': 'penguin', 'name': '',\n 'password': 'penguinpenguin'})\n resp = app.post_json('/users', {\n 'login_id': 'penguin', 'name': 'ぺんぎん', 'password': 'penguinpenguin'\n }, status=201, headers=self.admin_headers).json\n self.assertEqual(len(list(resp.keys())), 5)\n self.assertIsInstance(resp['id'], int)\n self.assertEqual(resp['login_id'], 'penguin')\n self.assertEqual(resp['name'], 'ぺんぎん')\n self.assertEqual(resp['admin'], False)\n self.assertIn('created', resp)\n _invalid({'login_id': 'penguin', 'name': 'same',\n 'password': 'hogehoge'}, status=409)\n\n def test_auth(self):\n def _invalid(body, status=400):\n app.post_json('/auth', body, status=status)\n\n _notfound = partial(_invalid, status=404)\n uid, pw = 'penguin', 'password'\n app.post_json(\n '/users', {'login_id': uid, 'name': 'ABC', 'password': pw},\n headers=self.admin_headers)\n _invalid({})\n _invalid({'login_id': uid})\n _invalid({'password': pw})\n _invalid({'login_id': 'a', 'password': pw})\n _invalid({'login_id': uid, 'password': 'a'})\n _notfound({'login_id': uid, 'password': 'wrong password'})\n _notfound({'login_id': 'invalid', 'password': pw})\n resp = app.post_json('/auth', {'login_id': uid, 'password': pw}).json\n self.assertIsInstance(resp['token'], str)\n self.assertIsInstance(resp['expires_in'], int)\n\n def test_get_current_user(self):\n uid, pw, name = 'penguin', 'password', 'ABC'\n u = app.post_json(\n '/users', {'login_id': uid, 'name': name, 'password': pw},\n headers=self.admin_headers).json\n token = app.post_json(\n '/auth', {'login_id': uid, 'password': pw}).json['token']\n self.assertEqual(u, app.get('/user').json)\n app.reset()\n app.authorization = ('Bearer', token)\n self.assertEqual(u, app.get('/user').json)\n app.authorization = None\n self.assertEqual(u, app.get(\n '/user', headers={'X-Auth-Token': token}).json)\n\n app.get('/user', status=401)\n app.get('/user', headers={\n 'X-Auth-Token': b64encode(b'invalid token').decode('ascii')\n }, status=401)\n app.get('/user', headers={'X-Auth-Token': b'Z'}, status=401)\n\n with transaction() as s:\n s.query(Token).filter(Token.user_id == u['id']).update({\n 'expires': datetime.now(tz=timezone.utc)})\n app.get('/user', headers={'X-Auth-Token': token}, status=401)\n\n def test_get_user(self):\n app.get('/users/invalid_user', status=400)\n app.get('/users/9999999', status=404)\n u = app.get('/users/{}'.format(self.admin_id)).json\n self.assertEqual(u['name'], 'Administrator')\n self.assertTrue(u['admin'])\n\n def test_list_environments(self):\n envs = app.get('/environments').json\n self.assertEqual(envs, [])\n\n env = dict(name='Python 3.7', test_image_name='docker-image',\n published=True)\n with transaction() as s:\n s.add(Environment(**env))\n envs = app.get('/environments').json\n self.assertEqual(len(envs), 1)\n self.assertIsInstance(envs[0]['id'], int)\n self.assertEqual(envs[0]['name'], env['name'])\n\n def test_create_list_modify_contest(self):\n def _post(body, status=None):\n return app.post_json('/contests', body, headers=self.admin_headers,\n status=status)\n\n def _invalid_post(body, status=400):\n _post(body, status=status)\n\n def _patch(id, body, status=None):\n return app.patch_json('/contests/{}'.format(id), body,\n headers=self.admin_headers, status=status)\n\n def _invalid_patch(id, body, status=400):\n _patch(id, body, status=status)\n\n start_time = datetime.now(tz=timezone.utc)\n end_time = start_time + timedelta(hours=1)\n c = {\n 'id': 'abc000',\n 'title': 'ABC000',\n 'description': '# ABC000\\n\\nほげほげ\\n',\n 'start_time': start_time.isoformat(),\n 'end_time': end_time.isoformat(),\n }\n _invalid_post({})\n _invalid_post(dict(id='a', title='A', description='',\n start_time=start_time.isoformat(),\n end_time=start_time.isoformat()))\n\n c2 = _post(c).json\n c['published'] = False\n c['penalty'] = 300.0\n self.assertEqual(c, c2)\n\n _invalid_patch(c['id'], dict(end_time=start_time.isoformat()))\n\n patch = {\n 'title': 'Hoge',\n 'end_time': (end_time + timedelta(hours=1)).isoformat(),\n 'published': True,\n }\n _invalid_patch('invalid', patch, status=404)\n c3 = dict(c)\n c3.update(patch)\n c4 = _patch(c['id'], patch).json\n self.assertEqual(c3, c4)\n\n self.assertEqual(app.get('/contests/{}'.format(c['id'])).json, c4)\n app.get('/contests/invalid', status=404)\n\n c4.pop('description')\n c4.pop('penalty')\n contests = app.get('/contests').json\n self.assertEqual(len(contests), 1)\n self.assertEqual(contests[0], c4)\n\n def test_problem(self):\n def _post(contest_id, body, status=None):\n return app.post_json(\n '/contests/{}/problems'.format(contest_id), body,\n headers=self.admin_headers, status=status)\n\n def _invalid_post(contest_id, body, status=400):\n _post(contest_id, body, status=status)\n\n def _patch(contest_id, id, body, status=None):\n return app.patch_json(\n '/contests/{}/problems/{}'.format(contest_id, id), body,\n headers=self.admin_headers, status=status)\n\n def _invalid_patch(contest_id, id, body, status=400):\n _patch(contest_id, id, body, status=status)\n\n start_time = datetime.now(tz=timezone.utc)\n contest_id = app.post_json('/contests', {\n 'id': 'abc000',\n 'title': 'ABC000',\n 'description': '# ABC000\\n\\nほげほげ\\n',\n 'start_time': start_time.isoformat(),\n 'end_time': (start_time + timedelta(hours=1)).isoformat(),\n 'published': True\n }, headers=self.admin_headers).json['id']\n\n p0 = dict(\n id='A', title='A Problem', description='# A\\n', time_limit=2,\n score=100)\n _invalid_post('invalid', p0, status=404)\n _invalid_post(contest_id, {})\n _post(contest_id, p0)\n _invalid_post(contest_id, p0, status=409)\n\n p1 = dict(\n id='B', title='B Problem', description='# B\\n', time_limit=1,\n memory_limit=1, score=200)\n _post(contest_id, p1)\n\n ret = app.get('/contests/{}/problems'.format(contest_id)).json\n if ret[0]['id'] != 'A':\n ret = [ret[1], ret[0]]\n p0['memory_limit'] = 256\n p0['contest_id'] = p1['contest_id'] = contest_id\n self.assertEqual([p0, p1], ret)\n\n _invalid_patch(contest_id, 'invalid-id', {}, status=404)\n ret = _patch(contest_id, p0['id'], {'title': 'AAAA'}).json\n p0['title'] = 'AAAA'\n self.assertEqual(ret, p0)\n\n app.delete('/contests/{}/problems/{}'.format(contest_id, p1['id']),\n headers=self.admin_headers)\n self.assertEqual([p0], app.get(\n '/contests/{}/problems'.format(contest_id)).json)\n\n app.get('/contests/invalid/problems/invalid', status=404)\n app.get('/contests/{}/problems/invalid'.format(contest_id), status=404)\n self.assertEqual(p0, app.get(\n '/contests/{}/problems/{}'.format(contest_id, p0['id'])).json)\n\n ret = app.get('/contests/{}'.format(contest_id)).json\n self.assertEqual([p0], ret['problems'])\n\n with transaction() as s:\n s.query(User).update({'admin': False})\n s.query(Contest).update({'start_time': (\n datetime.now(tz=timezone.utc) + timedelta(hours=1))})\n self.assertNotIn(\n 'problems', app.get('/contests/{}'.format(contest_id)).json)\n app.get('/contests/{}/problems'.format(contest_id), status=403)\n app.get('/contests/{}/problems/A'.format(contest_id), status=404)\n\n with transaction() as s:\n s.query(Contest).update({'published': False})\n app.get('/contests/{}'.format(contest_id), status=404)\n app.get('/contests/{}/problems'.format(contest_id), status=404)\n app.get('/contests/{}/problems/A'.format(contest_id), status=404)\n\n @unittest.mock.patch('pika.BlockingConnection')\n @unittest.mock.patch('penguin_judge.api.get_mq_conn_params')\n def test_submission(self, mock_conn, mock_get_params):\n # TODO(kazuki): API経由に書き換える\n env = dict(name='Python 3.7', test_image_name='docker-image')\n with transaction() as s:\n env = Environment(**env)\n s.add(env)\n s.flush()\n env = env.to_dict()\n\n start_time = datetime.now(tz=timezone.utc)\n contest_id = app.post_json('/contests', {\n 'id': 'abc000',\n 'title': 'ABC000',\n 'description': '# ABC000\\n\\nほげほげ\\n',\n 'start_time': start_time.isoformat(),\n 'end_time': (start_time + timedelta(hours=1)).isoformat(),\n 'published': True,\n }, headers=self.admin_headers).json['id']\n prefix = '/contests/{}'.format(contest_id)\n app.post_json(\n '{}/problems'.format(prefix), dict(\n id='A', title='A Problem', description='# A', time_limit=2,\n score=100\n ), headers=self.admin_headers)\n\n # TODO(kazuki): API経由に書き換える\n ctx = ZstdCompressor()\n with transaction() as s:\n s.add(TestCase(\n contest_id=contest_id,\n problem_id='A',\n id='1',\n input=ctx.compress(b'1'),\n output=ctx.compress(b'2')))\n\n app.get('{}/submissions'.format(prefix), status=403)\n self.assertEqual([], app.get(\n '{}/submissions'.format(prefix), headers=self.admin_headers).json)\n app.get('/contests/invalid/submissions', status=404)\n\n code = 'print(\"Hello World\")'\n resp = app.post_json('{}/submissions'.format(prefix), {\n 'problem_id': 'A',\n 'environment_id': env['id'],\n 'code': code,\n }, headers=self.admin_headers).json\n self.assertEqual([resp], app.get(\n '{}/submissions'.format(prefix), headers=self.admin_headers).json)\n app.get('{}/submissions/{}'.format(prefix, resp['id']), status=404)\n resp2 = app.get('{}/submissions/{}'.format(prefix, resp['id']),\n headers=self.admin_headers).json\n self.assertEqual(resp2.pop('code'), code)\n resp['tests'] = []\n self.assertEqual(resp, resp2)\n\n app.post_json('{}/submissions'.format(prefix), {\n 'problem_id': 'invalid',\n 'environment_id': env['id'],\n 'code': code,\n }, headers=self.admin_headers, status=400)\n app.post_json('{}/submissions'.format(prefix), {\n 'problem_id': 'A',\n 'environment_id': 99999,\n 'code': code,\n }, headers=self.admin_headers, status=400)\n app.get('{}/submissions/99999'.format(prefix), status=404)\n\n contest_id2 = app.post_json('/contests', {\n 'id': 'abc001',\n 'title': 'ABC001',\n 'description': '# ABC001',\n 'start_time': start_time.isoformat(),\n 'end_time': (start_time + timedelta(hours=1)).isoformat(),\n }, headers=self.admin_headers).json['id']\n app.get(\n '/contests/{}/submissions/{}'.format(contest_id2, resp['id']),\n status=404)\n\n with transaction() as s:\n s.query(Contest).update({'end_time': start_time})\n app.get('{}/submissions'.format(prefix))\n\n def test_contests_pagination(self):\n test_data = []\n base_time = datetime.now(tz=timezone.utc)\n for i in range(100):\n start_time = base_time + timedelta(minutes=i * 10)\n end_time = start_time + timedelta(hours=1)\n c = {\n 'id': 'id-{}'.format(i),\n 'title': 'Test Contest {}'.format(i),\n 'description': '**Pagination** Test {}'.format(i),\n 'start_time': start_time.isoformat(),\n 'end_time': end_time.isoformat(),\n 'published': True,\n }\n test_data.append(app.post_json(\n '/contests', c, headers=self.admin_headers).json)\n\n test_data.reverse()\n\n resp = app.get('/contests')\n self.assertEqual(len(resp.json), 20)\n self.assertEqual(int(resp.headers['X-Page']), 1)\n self.assertEqual(int(resp.headers['X-Per-Page']), 20)\n self.assertEqual(int(resp.headers['X-Total']), 100)\n self.assertEqual(int(resp.headers['X-Total-Pages']), 5)\n\n resp = app.get('/contests?page=2&per_page=31')\n self.assertEqual(len(resp.json), 31)\n self.assertEqual(\n [x['id'] for x in resp.json],\n [x['id'] for x in test_data[31:62]])\n self.assertEqual(int(resp.headers['X-Page']), 2)\n self.assertEqual(int(resp.headers['X-Per-Page']), 31)\n self.assertEqual(int(resp.headers['X-Total']), 100)\n self.assertEqual(int(resp.headers['X-Total-Pages']), 4)\n\n def test_submissions_pagination(self):\n test_data = []\n start_time = datetime.now(tz=timezone.utc)\n end_time = start_time + timedelta(hours=1)\n app.post_json('/contests', {\n 'id': 'id0',\n 'title': 'Test Contest',\n 'description': '**Pagination** Test',\n 'start_time': start_time.isoformat(),\n 'end_time': end_time.isoformat(),\n 'published': True,\n }, headers=self.admin_headers)\n app.post_json('/contests/id0/problems', {\n 'id': 'A', 'title': 'Problem', 'description': '# A',\n 'time_limit': 2, 'score': 100\n }, headers=self.admin_headers)\n\n test_data = []\n with transaction() as s:\n env = Environment(name='Python 3.7', test_image_name='image')\n s.add(env)\n s.flush()\n for i in range(100):\n submission = Submission(\n contest_id='id0', problem_id='A', user_id=self.admin_id,\n code=b'dummy', code_bytes=1, environment_id=env.id)\n s.add(submission)\n s.flush()\n test_data.append(submission.to_dict())\n\n resp = app.get('/contests/id0/submissions', headers=self.admin_headers)\n self.assertEqual(len(resp.json), 20)\n self.assertEqual(int(resp.headers['X-Page']), 1)\n self.assertEqual(int(resp.headers['X-Per-Page']), 20)\n self.assertEqual(int(resp.headers['X-Total']), 100)\n self.assertEqual(int(resp.headers['X-Total-Pages']), 5)\n\n resp = app.get(\n '/contests/id0/submissions?page=2&per_page=31',\n headers=self.admin_headers)\n self.assertEqual(len(resp.json), 31)\n self.assertEqual(\n [x['id'] for x in resp.json],\n [x['id'] for x in test_data[31:62]])\n self.assertEqual(int(resp.headers['X-Page']), 2)\n self.assertEqual(int(resp.headers['X-Per-Page']), 31)\n self.assertEqual(int(resp.headers['X-Total']), 100)\n self.assertEqual(int(resp.headers['X-Total-Pages']), 4)\n\n def test_ranking(self):\n salt = b'penguin'\n passwd = _kdf('penguinpenguin', salt)\n\n app.get('/contests/abc000/rankings', status=404)\n\n with transaction() as s:\n env = Environment(\n name='Python3 (3.8.0)',\n test_image_name='penguin_judge_python:3.8')\n s.add(env)\n s.add(Contest(\n id='abc000',\n title='ABC000',\n description='# Title\\nMarkdown Test\\n\\n* Item0\\n* Item1\\n',\n published=True,\n start_time=datetime.now(tz=timezone.utc),\n end_time=datetime.now(\n tz=timezone.utc) + timedelta(days=365)))\n s.flush()\n env_id = env.id\n problem_ids = ['A', 'B', 'C', 'D', 'E']\n for i, id in enumerate(problem_ids):\n s.add(Problem(\n contest_id='abc000', id=id, title='Problem {}'.format(id),\n description='', time_limit=1, memory_limit=1024,\n score=(i + 1) * 100))\n user_mapping = {}\n for i in range(10):\n u = User(\n login_id='user{}'.format(i), name='User{}'.format(i),\n salt=salt, password=passwd)\n s.add(u)\n s.flush()\n user_mapping['user{}'.format(i)] = u.id\n\n # 未提出者もランキングに載せるようにした\n # self.assertEquals([], app.get('/contests/abc000/rankings').json)\n\n with transaction() as s:\n problem_kwargs = [dict(\n contest_id='abc000', problem_id=id, code=b'', code_bytes=0,\n environment_id=env_id) for id in problem_ids]\n start = datetime.now(tz=timezone.utc)\n d = timedelta(seconds=1)\n users = {\n 'user0': [\n (1, 0, 1), (1, 0, 2), (1, 0, 4), (1, 0, 8), (1, 0, 16)],\n 'user1': [\n (1, 1, 4), (1, 2, 8), (1, 1, 16), (1, 2, 32), (1, 1, 64)],\n 'user2': [\n (1, 1, 2), (0, 2, 0), (0, 1, 0), (0, 0, 0), (0, 0, 0)],\n 'user3': [\n (0, 1, 0), (0, 2, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0)],\n }\n for u, v in users.items():\n for i, (n_ac, n_wa, t) in enumerate(v):\n for j in range(n_wa):\n if t > 0:\n tmp = t - 1\n else:\n tmp = i + j + 1\n s.add(Submission(\n user_id=user_mapping[u],\n status=JudgeStatus.WrongAnswer,\n created=start + d * tmp, **problem_kwargs[i]))\n if n_ac > 0:\n s.add(Submission(\n user_id=user_mapping[u],\n status=JudgeStatus.Accepted,\n created=start + d * t, **problem_kwargs[i]))\n\n ret = app.get('/contests/abc000/rankings').json\n # 未提出者もランキングに載せるようにした\n # self.assertEquals(4, len(ret))\n for i in range(4):\n self.assertEquals(ret[i]['user_id'],\n user_mapping['user{}'.format(i)])\n self.assertEquals(ret[i]['user_name'], 'User{}'.format(i))\n self.assertEquals(ret[0]['ranking'], 1)\n self.assertEquals(ret[0]['score'], 1500)\n self.assertEquals(ret[0]['penalties'], 0)\n self.assertEquals(ret[0]['time'], ret[0]['adjusted_time'])\n self.assertEquals(ret[1]['ranking'], 2)\n self.assertEquals(ret[1]['score'], 1500)\n self.assertEquals(ret[1]['penalties'], 7)\n self.assertEquals(ret[1]['time'] + 7 * 300, ret[1]['adjusted_time'])\n self.assertEquals(ret[2]['ranking'], 3)\n self.assertEquals(ret[2]['score'], 100)\n self.assertEquals(ret[2]['penalties'], 1)\n self.assertEquals(ret[2]['time'] + 300, ret[2]['adjusted_time'])\n self.assertEquals(ret[3]['ranking'], 4)\n self.assertEquals(ret[3]['score'], 0)\n self.assertEquals(ret[3]['penalties'], 0)\n self.assertEquals(ret[3]['time'], 0)\n self.assertEquals(ret[3]['problems'], {\n 'A': {'penalties': 1, 'pending': False},\n 'B': {'penalties': 2, 'pending': False},\n 'C': {'penalties': 1, 'pending': False},\n 'D': {'penalties': 1, 'pending': False},\n 'E': {'penalties': 1, 'pending': False},\n })\n","sub_path":"backend/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":22818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"464452127","text":"\"\"\"\r\nVTK utilities.\r\n\r\nAuthor:\r\n Phil David, Army Research Laboratory, December 2017.\r\n \r\nNotes:\r\n Display (screen) coordinates have origin (0,0) at the lower left corner of the\r\n screen.\r\n \r\n Viewport coordinates go from -1 to 1 with (-1,-1) at the lower left corner of\r\n the image.\r\n \r\n Z-buffer values range from 0 to 1.\r\n\"\"\"\r\n\r\nimport vtk\r\nimport vtk.util.colors\r\nfrom vtk.util.numpy_support import vtk_to_numpy\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n \r\n\r\ndef close_renwin(iren):\r\n \"\"\"\r\n After calling this function, please execute the following:\r\n del renwin, iren\r\n \"\"\"\r\n renwin = iren.GetRenderWindow()\r\n renwin.Finalize()\r\n iren.TerminateApp() \r\n \r\n \r\ndef getdepthmap(renwin, camera):\r\n \"\"\"\r\n Get a depth map of the currently rendered scene.\r\n \r\n Usage:\r\n depth = getdepthmap(renwin, camera)\r\n \r\n Notes:\r\n Objects that are texture-mappped with PNG images seem to be transparent in \r\n the Z-buffer, and therefore are not given a depth value.\r\n \"\"\"\r\n assert camera is not None\r\n assert renwin is not None\r\n \r\n xsize, ysize = renwin.GetSize()\r\n numpts = xsize*ysize\r\n\r\n zbuf = zbuffer2numpy(renwin) # zbuf[0,0] is at upper left corner of screen.\r\n \r\n # Get the viewport coordinates (-1 to 1) of every pixel in the window.\r\n xc1 = np.linspace(-1, 1, xsize, endpoint=True)\r\n yc1 = np.linspace(1, -1, ysize, endpoint=True)\r\n xc2, yc2 = np.meshgrid(xc1, yc1, indexing='xy')\r\n viewpts = np.zeros((ysize, xsize, 4))\r\n viewpts[:,:,0] = xc2\r\n viewpts[:,:,1] = yc2\r\n viewpts[:,:,2] = zbuf\r\n viewpts[:,:,3] = 1.0\r\n viewpts = np.reshape(viewpts.T, (4,numpts), order='f') # New shape: 4 x Numpts\r\n \r\n # The transformation matrix 'hmat1' will convert homogeneous world coordinates\r\n # to viewport coordinates. 'Aspect' is the width/height for the viewport,\r\n # and the 'nearz' and 'farz' are the Z-buffer values that map to the near\r\n # and far clipping planes. The viewport coordinates of a point located\r\n # inside the frustum are in the range ([-1,+1],[-1,+1],[nearz,farz]).\r\n hmat1 = camera.GetCompositeProjectionTransformMatrix(xsize/ysize, 0, 1)\r\n \r\n # The inverse transformation converts viewport coordinates to world coordinates.\r\n hmat1.Invert()\r\n hmat2 = np.zeros((4,4))\r\n for r in range(4):\r\n for c in range(4):\r\n hmat2[r,c] = hmat1.GetElement(r,c) \r\n hwp = np.matmul(hmat2, viewpts) # Homogeneous world points (4xNumpts).\r\n \r\n # Convert world coordinates to camera coordinates.\r\n z = hwp[2,:]/hwp[3,:]\r\n y = hwp[1,:]/hwp[3,:]\r\n wp = hwp[0:3,:]/hwp[3,:] # Nonhomogeneous world points (3xNumpts)\r\n \r\n # Get distance of each point from camera center.\r\n cpos = np.array(camera.GetPosition())\r\n cp = wp - cpos[:,None] # Points in camera coordinates (with rotation)\r\n depth = np.linalg.norm(cp, axis=0) # Distance from camera center.\r\n depth = depth.reshape((ysize, xsize)) # Form back into original image shape.\r\n \r\n return depth\r\n\r\n\r\ndef matprint(m, nrows, ncols):\r\n \"\"\"\r\n Print a VTK matrix.\r\n \"\"\"\r\n for r in range(nrows):\r\n for c in range(ncols):\r\n print('{:7.3f} '.format(m.GetElement(r, c)), end='')\r\n if r < nrows-1:\r\n print('')\r\n \r\n\r\ndef filter2numpy(filter):\r\n \"\"\"\r\n Convert output of a VTK filter to a Numpy array. \r\n \r\n Usage:\r\n im = filter2numpy(filter)\r\n \r\n Arguments:\r\n filter: The VTK filter to convert.\r\n \r\n Returns:\r\n The Numpy array containing the filter output.\r\n \"\"\"\r\n \r\n filter.Update() \r\n im1 = filter.GetOutput()\r\n cols, rows, depth = im1.GetDimensions()\r\n assert depth == 1, 'depth of image != 1: {}'.format(depth)\r\n data = im1.GetPointData().GetScalars()\r\n im2 = vtk_to_numpy(data)\r\n if depth == 1:\r\n im2 = im2.reshape(rows, cols)\r\n else:\r\n im2 = im2.reshape(rows, cols, depth) \r\n return im2\r\n\r\n\r\ndef renwin2numpy(renwin):\r\n \"\"\"\r\n Convert the current VTK rendered scene to a Numpy array. \r\n \r\n Usage:\r\n im = renwin2numpy(renwin)\r\n \r\n Arguments:\r\n renwin: The VTK render window to get the image of.\r\n \r\n Returns:\r\n im: The Numpy array containing the rendered image.\r\n \"\"\"\r\n \r\n w2if = vtk.vtkWindowToImageFilter()\r\n w2if.SetInput(renwin)\r\n \r\n w2if.Update()\r\n im = w2if.GetOutput()\r\n cols, rows, depth = im.GetDimensions()\r\n \r\n if cols == 0 or rows == 0 or depth == 0:\r\n raise Exception('Unable to get VTK rendered image. Make sure window is active.')\r\n \r\n assert depth == 1, 'depth of image != 1: {}'.format(depth)\r\n \r\n data = im.GetPointData().GetScalars()\r\n im2 = vtk_to_numpy(data)\r\n # im2 = im2.reshape(rows, cols, -1) \r\n datatype = data.GetDataType()\r\n if datatype == 3:\r\n depth = 3\r\n if depth == 1:\r\n im2 = im2.reshape(rows, cols)\r\n else:\r\n im2 = im2.reshape(rows, cols, depth) \r\n im2 = np.flipud(im2) # Index (0,0) maps to upper left corner of window.\r\n return im2\r\n\r\n\r\ndef zbuffer2numpy(renwin):\r\n \"\"\"\r\n Convert the z-buffer of the render window to a Numpy array.\r\n \r\n Usage:\r\n im = zbuffer2numpy(renwin)\r\n \r\n Arguments:\r\n renwin: the render window to convert the z-buffer of.\r\n \r\n Returns: \r\n im: the z-buffer as a Numpy array. Index (0,0) (e.g., im[0,0])\r\n corresponds to the upper left corner of the window.\r\n \r\n Description:\r\n A z-buffer value of 1.0 (maybe any value > 0.999999?) indicates that the\r\n screen point has not been rendered into.\r\n \"\"\"\r\n\r\n filter = vtk.vtkWindowToImageFilter()\r\n filter.SetInput(renwin)\r\n filter.SetScale(1) # SetMagnification(1) # Resolution of output relative to input resolution.\r\n filter.SetInputBufferTypeToZBuffer()\r\n \r\n # scale = vtk.vtkImageShiftScale()\r\n # scale.SetInput(filter.GetOutput())\r\n # scale.SetOutputScalarTypeToDouble()\r\n # scale.SetShift(0)\r\n # scale.SetScale(-255)\r\n \r\n #im = filter2numpy(scale)\r\n im = filter2numpy(filter)\r\n im = np.flipud(im) # Make index (0,0) access z value for upper left corner of window.\r\n return im\r\n \r\n \r\ndef set_keypress_callback(renderer, interactor, callbackfun):\r\n \"\"\"\r\n This function assigns a callback function to process renderer keyboard events.\r\n \r\n Usage:\r\n set_keypress_callback(renderer, interactor, callbackfun)\r\n \r\n Example:\r\n def my_keypress_callback(renderer, interactor, key):\r\n key = key.lower() \r\n print(\"Pressed key {}\".format(key))\r\n ...\r\n renderer.ResetCameraClippingRange()\r\n renderer.GetRenderWindow().Render()\r\n return\r\n set_keypress_callback(renderer, interactor, my_keypress_callback)\r\n interactor.Initialize()\r\n renwindow.Render() \r\n interactor.Start()\r\n \"\"\"\r\n \r\n class MyInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):\r\n \r\n def keyPressEvent(self, obj, event):\r\n key = self.interactor.GetKeySym()\r\n self.callback(self.renderer, self.interactor, key)\r\n return \r\n \r\n def __init__(self, renderer, interactor, callbackfun):\r\n self.renderer = renderer\r\n self.interactor = interactor\r\n self.callback = callbackfun\r\n interactor.AddObserver(\"KeyPressEvent\", self.keyPressEvent)\r\n\r\n interactor.SetInteractorStyle(MyInteractorStyle(renderer, interactor, callbackfun))\r\n interactor.GetInteractorStyle().EnabledOn()\r\n \r\n \r\ndef make_ellipsoid(renderer, param, color=None, texture=None, lightcoef=(1,1,1), \r\n res=30):\r\n \"\"\"\r\n Make an upright ellipsoid.\r\n\r\n Arguments:\r\n renderer: The renderer that the obect's actor is to be added to.\r\n param: Parameters of the ellipsoid: [xctr, yctr, zctr, xyhalfwidth, zhalfwidth] \r\n texture: (vtkTexture, r_scale, s_scale)\r\n color: the color (R,G,B) of the object. Used if texture is not used.\r\n lightcoef: the object's reflectance coefficients: (ambient, diffuse, specular).\r\n res: resolution of facets on ellipsoid.\r\n \r\n Notes:\r\n The VTK texture object may be created from an image file as follows:\r\n reader = vtk.vtkPNGReader()\r\n reader.SetFileName(pngfilename) \r\n texture = vtk.vtkTexture()\r\n texture.SetInputConnection(reader.GetOutputPort()) \r\n \"\"\" \r\n \r\n if color is None and texture is None:\r\n raise Exception('Must provide either color or texture for ellipsoid')\r\n \r\n ellipsoid = vtk.vtkParametricEllipsoid()\r\n ellipsoid.SetXRadius(param[3])\r\n ellipsoid.SetYRadius(param[3])\r\n ellipsoid.SetZRadius(param[4])\r\n source = vtk.vtkParametricFunctionSource()\r\n source.SetParametricFunction(ellipsoid)\r\n source.SetUResolution(res)\r\n source.SetVResolution(res) \r\n mapper = vtk.vtkPolyDataMapper()\r\n actor = vtk.vtkActor()\r\n\r\n if texture is None:\r\n # Ellipsoid is single color. \r\n mapper.SetInputConnection(source.GetOutputPort())\r\n mapper.SetScalarRange(-0.5, 0.5)\r\n actor.SetMapper(mapper)\r\n actor.GetProperty().EdgeVisibilityOff()\r\n # actor.GetProperty().SetEdgeColor(.2, .2, .5)\r\n actor.GetProperty().SetColor(vtk.util.colors.green)\r\n actor.GetProperty().SetAmbientColor(color)\r\n actor.GetProperty().SetDiffuseColor(color)\r\n actor.GetProperty().SetSpecularColor(color)\r\n else:\r\n # Texture map ellipsoid. \r\n source.GenerateTextureCoordinatesOn()\r\n xform = vtk.vtkTransformTextureCoords()\r\n xform.SetInputConnection(source.GetOutputPort())\r\n xform.SetScale(texture[1], texture[2], 1) \r\n xform.SetFlipT(1)\r\n xform.SetFlipS(0)\r\n mapper.SetInputConnection(xform.GetOutputPort())\r\n mapper.SetScalarRange(-.5, 0.5)\r\n actor.SetMapper(mapper)\r\n actor.SetTexture(texture[0])\r\n \r\n actor.SetPosition(param[0:3])\r\n actor.GetProperty().SetAmbient(lightcoef[0]) # Ambient (nondirectional) lighting coefficient\r\n actor.GetProperty().SetDiffuse(lightcoef[1]) # Diffuse (direct) lighting coefficient\r\n actor.GetProperty().SetSpecular(lightcoef[2]) # Specular (highlight) lighting coefficient \r\n \r\n renderer.AddActor(actor)\r\n return actor\r\n \r\n\r\ndef make_cuboid(renderer, param, color=None, texture=None, lightcoef=(1,1,1), alr_variation=0.3):\r\n \"\"\"\r\n Make an upright cuboid.\r\n\r\n Arguments:\r\n renderer: The renderer that the obect's actor is to be added to.\r\n param: Parameters of the cuboid: [xctr, yctr, zctr, xhalfwidth, yhalfwidth, zhalfwidth]\r\n texture: (vtkTexture, r_scale, s_scale)\r\n color: the color (R,G,B) of the object. Used if texture is not used.\r\n lightcoef: the object's reflectance coefficients: (ambient, diffuse, specular).\r\n \r\n Notes:\r\n The VTK texture object may be created from an image file as follows:\r\n reader = vtk.vtkPNGReader()\r\n reader.SetFileName(pngfilename) \r\n texture = vtk.vtkTexture()\r\n texture.SetInputConnection(reader.GetOutputPort())\r\n \"\"\"\r\n \r\n if color is None and texture is None:\r\n raise Exception('Must provide either color or texture for cuboid')\r\n \r\n cube = vtk.vtkCubeSource() \r\n cube.SetXLength(2*param[3]) \r\n cube.SetYLength(2*param[5]) \r\n cube.SetZLength(2*param[4]) \r\n cube.SetCenter(0, 0, 0) \r\n mapper = vtk.vtkPolyDataMapper()\r\n actor = vtk.vtkActor() \r\n \r\n if texture is None:\r\n # Cuboid is single color.\r\n mapper.SetInputConnection(cube.GetOutputPort())\r\n actor.SetMapper(mapper)\r\n actor.GetProperty().EdgeVisibilityOff()\r\n # actor.GetProperty().SetEdgeColor(.2, .2, .5)\r\n actor.GetProperty().SetColor(color)\r\n actor.GetProperty().SetAmbientColor(color)\r\n actor.GetProperty().SetDiffuseColor(color)\r\n actor.GetProperty().SetSpecularColor(color) \r\n else:\r\n # Texture map cuboid.\r\n if True:\r\n xform = vtk.vtkTransformTextureCoords()\r\n xform.SetInputConnection(cube.GetOutputPort())\r\n xform.SetScale(texture[1], texture[2], 1) \r\n # xform.SetOrigin(0,0,0) # point about which texture map is flipped (e.g., rotated)\r\n xform.SetFlipT(1)\r\n xform.SetFlipS(0)\r\n mapper = vtk.vtkDataSetMapper()\r\n mapper.SetInputConnection(xform.GetOutputPort())\r\n else:\r\n mapper.SetInputConnection(cube.GetOutputPort())\r\n actor.SetMapper(mapper)\r\n actor.SetTexture(texture[0]) \r\n \r\n actor.GetProperty().SetAmbient(lightcoef[0]) # Ambient (nondirectional) lighting coefficient\r\n actor.GetProperty().SetDiffuse(lightcoef[1]) # Diffuse (direct) lighting coefficient\r\n actor.GetProperty().SetSpecular(lightcoef[2]) # Specular (highlight) lighting coefficient \r\n \r\n # alr = 0.6 + alr_variation*(np.random.rand(1)[0] - 0.5)\r\n # actor.GetProperty().SetAmbient(alr)\r\n actor.SetPosition(param[0:3]) # Center of cubeoid.\r\n actor.RotateX(-90.0)\r\n actor.RotateY(0.0)\r\n renderer.AddActor(actor) \r\n return actor\r\n\r\n\r\ndef make_sphere(renderer, param, color=None, texture=None, lightcoef=(1,1,1), \r\n res=50, alr_variation=0.3):\r\n \"\"\"\r\n Make a sphere.\r\n \r\n Arguments:\r\n renderer: The renderer that the obect's actor is to be added to.\r\n param: Parameters of the sphere: [xctr, yctr, zctr, radius]\r\n texture: (vtkTexture, r_scale, s_scale)\r\n color: the color (R,G,B) of the object. Used if texture is not used.\r\n lightcoef: the object's reflectance coefficients: (ambient, diffuse, specular).\r\n Default is (1, 1, 1).\r\n \r\n Notes:\r\n The VTK texture object may be created from an image file as follows:\r\n reader = vtk.vtkPNGReader()\r\n reader.SetFileName(pngfilename) \r\n texture = vtk.vtkTexture()\r\n texture.SetInputConnection(reader.GetOutputPort())\r\n \"\"\"\r\n if color is None and texture is None:\r\n raise Exception('Must provide either color or texture for sphere')\r\n \r\n # Generate sphere polydata\r\n sphere = vtk.vtkSphereSource()\r\n sphere.SetThetaResolution(res)\r\n sphere.SetPhiResolution(res)\r\n sphere.SetCenter(param[0], param[1], param[2])\r\n sphere.SetRadius(param[3])\r\n actor = vtk.vtkActor()\r\n \r\n if texture is None:\r\n # Sphere is single color.\r\n mapper = vtk.vtkPolyDataMapper()\r\n mapper.SetInputConnection(sphere.GetOutputPort()) \r\n actor.SetMapper(mapper) \r\n actor.GetProperty().SetColor(color) # Set one color for ambient, diffuse, and specular\r\n actor.GetProperty().SetAmbientColor(color)\r\n actor.GetProperty().SetDiffuseColor(color)\r\n actor.GetProperty().SetSpecularColor(color)\r\n else:\r\n # Texture map sphere.\r\n maptosphere = vtk.vtkTextureMapToSphere()\r\n maptosphere.SetInputConnection(sphere.GetOutputPort())\r\n maptosphere.PreventSeamOn() \r\n xform = vtk.vtkTransformTextureCoords()\r\n xform.SetInputConnection(maptosphere.GetOutputPort())\r\n xform.SetScale(texture[1], texture[2], 1) \r\n xform.SetFlipT(1)\r\n xform.SetFlipS(0)\r\n mapper = vtk.vtkDataSetMapper()\r\n mapper.SetInputConnection(xform.GetOutputPort()) \r\n actor.SetMapper(mapper)\r\n actor.SetTexture(texture[0])\r\n \r\n actor.GetProperty().SetAmbient(lightcoef[0]) # Ambient (nondirectional) lighting coefficient\r\n actor.GetProperty().SetDiffuse(lightcoef[1]) # Diffuse (direct) lighting coefficient\r\n actor.GetProperty().SetSpecular(lightcoef[2]) # Specular (highlight) lighting coefficient \r\n \r\n renderer.AddActor(actor)\r\n return actor\r\n \r\n \r\n\r\ndef make_cylinder(renderer, param, color=None, texture=None, lightcoef=(1,1,1), \r\n res=20, camera=None):\r\n \"\"\"\r\n Make an upright cylinder.\r\n\r\n Arguments:\r\n renderer: The renderer that the obect's actor is to be added to.\r\n param: Parameters of the cylinder: [xctr, yctr, zctr, radius, halfheight] \r\n texture: (vtkTexture, r_scale, s_scale)\r\n color: the color (R,G,B) of the object. Used if texture is not used.\r\n lightcoef: the object's reflectance coefficients: (ambient, diffuse, specular).\r\n camera: the current camera. If provided, the cylinder will rotate to follow \r\n the camera.\r\n \r\n Notes:\r\n The VTK texture object may be created from an image file as follows:\r\n reader = vtk.vtkPNGReader()\r\n reader.SetFileName(pngfilename) \r\n texture = vtk.vtkTexture()\r\n texture.SetInputConnection(reader.GetOutputPort())\r\n \"\"\"\r\n if color is None and texture is None:\r\n raise Exception('Must provide either color or texture for cylinder') \r\n \r\n cylinder = vtk.vtkCylinderSource()\r\n cylinder.SetResolution(res)\r\n cylinder.SetHeight(2*param[4])\r\n cylinder.SetRadius(param[3])\r\n \r\n if texture is None:\r\n # Cylinder is a single color.\r\n mapper = vtk.vtkPolyDataMapper()\r\n mapper.SetInputConnection(cylinder.GetOutputPort())\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper) \r\n # actor.GetProperty().SetColor(color) # Set one color for ambient, diffuse, and specular\r\n actor.GetProperty().SetAmbientColor(color)\r\n actor.GetProperty().SetDiffuseColor(color)\r\n actor.GetProperty().SetSpecularColor(color)\r\n else:\r\n # Texture map cylinder.\r\n maptocylinder = vtk.vtkTextureMapToCylinder()\r\n maptocylinder.SetInputConnection(cylinder.GetOutputPort())\r\n maptocylinder.PreventSeamOn()\r\n # mapper = vtk.vtkPolyDataMapper()\r\n # mapper.SetInputConnection(maptocylinder.GetOutputPort())\r\n xform = vtk.vtkTransformTextureCoords()\r\n xform.SetInputConnection(maptocylinder.GetOutputPort())\r\n xform.SetScale(texture[1], texture[2], 1) \r\n xform.SetFlipT(1)\r\n xform.SetFlipS(0)\r\n mapper = vtk.vtkDataSetMapper()\r\n mapper.SetInputConnection(xform.GetOutputPort()) \r\n if camera is None:\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper)\r\n actor.SetTexture(texture[0]) \r\n else:\r\n # Make the cylinder follow the camera.\r\n actor = vtk.vtkFollower()\r\n actor.SetMapper(mapper)\r\n actor.SetCamera(camera) \r\n actor.SetTexture(texture[0]) \r\n \r\n actor.SetPosition(param[0:3]) \r\n actor.RotateX(-90.0)\r\n actor.RotateY(360*np.random.rand()) # cylinder has random \"front side\"\r\n \r\n actor.GetProperty().SetAmbient(lightcoef[0]) # Ambient (nondirectional) lighting coefficient\r\n actor.GetProperty().SetDiffuse(lightcoef[1]) # Diffuse (direct) lighting coefficient\r\n actor.GetProperty().SetSpecular(lightcoef[2]) # Specular (highlight) lighting coefficient \r\n \r\n renderer.AddActor(actor)\r\n return actor\r\n \r\n \r\ndef make_vrect(renderer, param, color=None, texture=None, lightcoef=(1,1,1)):\r\n \"\"\"\r\n Make a vertical rectangle, one whose edges are parallel to the XY plane and\r\n Z axis.\r\n \r\n Arguments:\r\n renderer: The renderer that the obect's actor is to be added to.\r\n \r\n param: Parameters of the rectangle: [xstart, ystart, zstart, xend, yend,\r\n zend]\r\n \r\n texture: (texture, hscale, vscale, topcolor, repetition, aspectwdh)\r\n \r\n color: the color (R,G,B) of the object. Used if texture is not used.\r\n \r\n lightcoef: the object's reflectance coefficients: (ambient, diffuse,\r\n specular).\r\n \r\n \"\"\"\r\n if color is None and texture is None:\r\n raise Exception('Must provide either color or texture for vert. rect')\r\n \r\n # Define the rectangle verticies [x, y, z]. \r\n xs = param[0]; ys = param[1]; zs = param[2];\r\n xe = param[3]; ye = param[4]; ze = param[5]\r\n pts = [[xs, ys, zs], [xe, ye, zs], [xe, ye, ze], [xs, ys, ze]]\r\n poly_height = abs(zs - ze)\r\n poly_width = np.sqrt((xs - xe)**2 + (ys - ye)**2)\r\n points = vtk.vtkPoints()\r\n for pt in pts:\r\n points.InsertNextPoint(*pt)\r\n\r\n # Create the polygon\r\n numpts = len(pts)\r\n polygon = vtk.vtkPolygon()\r\n polygon.GetPointIds().SetNumberOfIds(numpts)\r\n for k in range(numpts):\r\n polygon.GetPointIds().SetId(k,k)\r\n\r\n # Add the polygon to a list of polygons\r\n polygons = vtk.vtkCellArray()\r\n polygons.InsertNextCell(polygon)\r\n\r\n # Create a PolyData\r\n polydata = vtk.vtkPolyData()\r\n polydata.SetPoints(points)\r\n polydata.SetPolys(polygons)\r\n\r\n # Create a mapper and actor\r\n mapper = vtk.vtkPolyDataMapper()\r\n mapper.SetInputData(polydata)\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper)\r\n\r\n if texture: # Texture map the polygon.\r\n # Assign texture image to polygon.\r\n actor.SetTexture(texture[0])\r\n\r\n # Assign texture coordinates.\r\n textureCoordinates = vtk.vtkFloatArray()\r\n textureCoordinates.SetNumberOfComponents(2);\r\n textureCoordinates.SetName(\"TextureCoordinates\")\r\n xrep = poly_width/texture[1]\r\n yrep = poly_height/texture[2]\r\n textureCoordinates.InsertNextTuple([0, 0])\r\n textureCoordinates.InsertNextTuple([xrep, 0])\r\n textureCoordinates.InsertNextTuple([xrep, yrep])\r\n textureCoordinates.InsertNextTuple([0, yrep])\r\n polydata.GetPointData().SetTCoords(textureCoordinates)\r\n else: # Polygon is single color.\r\n actor.GetProperty().EdgeVisibilityOff()\r\n # actor.GetProperty().SetEdgeColor(.2, .2, .5)\r\n actor.GetProperty().SetColor(color)\r\n actor.GetProperty().SetAmbientColor(color)\r\n actor.GetProperty().SetDiffuseColor(color)\r\n actor.GetProperty().SetSpecularColor(color) \r\n\r\n renderer.AddActor(actor)\r\n return actor\r\n\r\n\r\ndef make_text(renderer, text=None, camera=None, pos=(0,0,0), textscale=0.2):\r\n \"\"\"\r\n Make text that follows the camera.\r\n \r\n Arguments:\r\n renderer: The renderer that the obect's actor is to be added to.\r\n text: The text (a string) to insert.\r\n pos: The position (X,Y,Z) to place the text.\r\n camera: The camera to follow. Uses the active camera if none is given.\r\n textscale: The scale factor of the text.\r\n \"\"\" \r\n \r\n if camera is None:\r\n camera = renderer.GetActiveCamera()\r\n vtext = vtk.vtkVectorText()\r\n vtext.SetText(text)\r\n mapper = vtk.vtkPolyDataMapper()\r\n mapper.SetInputConnection(vtext.GetOutputPort())\r\n actor = vtk.vtkFollower()\r\n actor.SetMapper(mapper)\r\n actor.SetScale(textscale, textscale, textscale)\r\n actor.AddPosition(pos)\r\n # textprop = actor.GetProperty()\r\n renderer.AddActor(actor)\r\n actor.SetCamera(camera) \r\n return actor\r\n\r\n\r\ndef make_axes(renderer, camera=None, pos=(0,0,0), axscale=5, linewidth=5, labelscale=0.5):\r\n \"\"\"\r\n Make a labeled axes.\r\n \r\n Arguments:\r\n renderer: The renderer that the obect's actor is to be added to.\r\n pos: The position, (X,Y,Z), to place the axes.\r\n camera: The camera that axes labels will follow. Uses the active camera if \r\n none is given.\r\n axscale: The size of the axes.\r\n linewidth: The thickness of the axes lines.\r\n labelscale: The scale factor of the axes labels.\r\n \"\"\"\r\n if camera is None:\r\n camera = renderer.GetActiveCamera() \r\n axes = vtk.vtkAxes()\r\n axes.SetSymmetric(False)\r\n axesMapper = vtk.vtkPolyDataMapper()\r\n axesMapper.SetInputConnection(axes.GetOutputPort())\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(axesMapper) \r\n actor.SetScale(axscale, axscale, axscale)\r\n actor.SetPosition(pos)\r\n axprop = actor.GetProperty()\r\n axprop.SetLineWidth(linewidth)\r\n renderer.AddActor(actor)\r\n\r\n # Label the coordinate axes.\r\n axpos = np.array([axscale+2*labelscale, 0, 0]) # labelscale])\r\n pos = np.array(pos)\r\n for k in range(0,3):\r\n vtext = vtk.vtkVectorText()\r\n vtext.SetText(\"XYZ\"[k])\r\n textMapper = vtk.vtkPolyDataMapper()\r\n textMapper.SetInputConnection(vtext.GetOutputPort())\r\n textActor = vtk.vtkFollower()\r\n textActor.SetMapper(textMapper)\r\n textActor.SetScale(labelscale, labelscale, labelscale)\r\n textActor.AddPosition(pos + np.roll(axpos, k))\r\n textprop = textActor.GetProperty()\r\n renderer.AddActor(textActor)\r\n textActor.SetCamera(camera) \r\n \r\n return actor\r\n \r\n \r\n \r\ndef label_points(renderer, pts3d, labels, camera=None, markers=True, markersize=0.1,\r\n markercolor=(0,0,0), labelscale=0.5):\r\n \"\"\"\r\n Label some points in the work model.\r\n\r\n Usage:\r\n label_points(renderer, pts3d, labels, camera=None, markers=True, \r\n markersize=0.1, markercolor=(0,0,0), labelscale=0.5)\r\n \r\n Arguments:\r\n renderer: the VTK renderer.\r\n pts3d: a list of 3D points.\r\n labels: a list of text labels, one for each point in 'pts3d'.\r\n camera: the camera that the labels should follow. Default: current active\r\n camera.\r\n labelscale: scale factor for labels. Default: 0.5.\r\n markers: True or False. Draw a marker (sphere) at each point? Default: True.\r\n markersize: size of the marker. Default: 0.1.\r\n markercolor: color of all markers (an 3-tuple). Default: black.\r\n\r\n Notes:\r\n The apparent colors of markers is affected by the lighting, so they\r\n don't always appear as the requested color.\r\n \"\"\"\r\n\r\n if len(pts3d) != len(labels):\r\n raise Exception('Error: arguments \"pts3d\" and \"labels\" must be the same length')\r\n\r\n if camera is None:\r\n camera = renderer.GetActiveCamera()\r\n \r\n for k in range(0, len(pts3d)):\r\n atext = vtk.vtkVectorText()\r\n atext.SetText(labels[k])\r\n textMapper = vtk.vtkPolyDataMapper()\r\n textMapper.SetInputConnection(atext.GetOutputPort())\r\n textActor = vtk.vtkFollower()\r\n textActor.SetMapper(textMapper)\r\n textActor.SetScale(labelscale, labelscale, labelscale)\r\n pos = np.array(pts3d[k])\r\n textActor.AddPosition(pos)\r\n # textprop = textActor.GetProperty()\r\n renderer.AddActor(textActor)\r\n textActor.SetCamera(camera)\r\n\r\n if markers:\r\n sphere = vtk.vtkSphereSource()\r\n sphere.SetCenter(pos)\r\n sphere.SetRadius(markersize/2)\r\n mapper = vtk.vtkPolyDataMapper()\r\n mapper.SetInputConnection(sphere.GetOutputPort())\r\n actor = vtk.vtkActor()\r\n actor.SetMapper(mapper)\r\n actor.GetProperty().SetColor(markercolor)\r\n renderer.AddActor(actor) \r\n\r\n\r\ndef update_renderers(camera, renderers, pos=None, viewdir=None, vfov=None):\r\n \"\"\"\r\n Upate the camera and rendered scenes for the given camera parameters.\r\n \r\n Arguments:\r\n camera: The vtkCamera().\r\n renderers: A list of vtkRenderer() objects.\r\n pos: The new camera position, (X,Y,Z). Default is None.\r\n viewdir: The new camera viewing direction, (dX,dY,dZ). Default is None.\r\n The VTK focal point (FP) does not move with the camera's position;\r\n it enables a camera to look at a finite fixed point from different\r\n points of view. Viewdir does move with (it is independent of) camera\r\n position; it is a viewing *direction* not a *fixed point*: VIEWDIR =\r\n FP - POS.\r\n vfov: The new camera vertical viewing angle (vertical FOV, in degrees).\r\n \"\"\"\r\n if pos is not None:\r\n camera.SetPosition(pos)\r\n if viewdir is not None:\r\n if pos is None:\r\n pos = camera.GetPosition()\r\n camera.SetFocalPoint(pos[0]+viewdir[0], pos[1]+viewdir[1], pos[2]+viewdir[2]) \r\n if vfov is not None:\r\n camera.SetViewAngle(vfov)\r\n for ren in renderers if type(renderers) is list else [renderers]:\r\n ren.ResetCameraClippingRange()\r\n ren.GetRenderWindow().Render()\r\n \r\n \r\ndef get_camera_pose(camera):\r\n \"\"\"\r\n Get the position, orientation, and field of view, of the renderer's camera.\r\n \r\n Usage:\r\n pos, viewdir, vfov = get_camera_pose(camera)\r\n \r\n Arguments:\r\n camera: A vtkCamera object.\r\n \r\n Returns:\r\n pos: The camera position (center of projection), a tuple (X,Y,Z).\r\n viewdir: The camera viewing direction, a tuple (dX,dY,dZ).\r\n vfov: The camera's vertical field-of-view, in degrees, a float.\r\n \r\n Description:\r\n The camera orientation is returned in 'viewdir.' This is a vector pointing\r\n along the camera's optical axis. This \"viewing direction\" of the camera \r\n changes when the camera pans or tilts, but not when the camera translates.\r\n \"\"\"\r\n pos = camera.GetPosition() # (X,Y,Z) camera absolute position\r\n fp = camera.GetFocalPoint() # 3D focal point, changes with camera translation\r\n vfov = camera.GetViewAngle() # vertical field-of-view (degrees)\r\n viewdir = tuple(np.array(fp) - np.array(pos))\r\n return pos, viewdir, vfov","sub_path":"pyutils/vtkutils.py","file_name":"vtkutils.py","file_ext":"py","file_size_in_byte":29812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"65370157","text":"\n\n#calss header\nclass _FOLLY():\n\tdef __init__(self,): \n\t\tself.name = \"FOLLY\"\n\t\tself.definitions = [u'the fact of being stupid, or a stupid action, idea, etc.: ', u'a building in the form of a small castle, temple, etc., that has been built as a decoration in a large garden or park: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_folly.py","file_name":"_folly.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"523979015","text":"##Faça um programa que leia um valor N inteiro e positivo\n##Calcule e mostre o valor de E, conforme a fórmula: E = 1+11!+12!+13!+14!+...+1N!\n\nN = int(input('Informe o valor de N: '))\nE = 1\nfor fat in range(1,N+1): # fat = 1! fat = 2! fat = 3! fat = 4!\n fatorial = 1\n contador = 1\n while contador <= fat: # 1 * 1 = 1 1 * 2 = 2 2 * 3 = 6 6 * 4 = 24\n fatorial = fatorial * contador\n contador += 1 # contador = contador + 1\n divisao = 1 / fatorial\n E = E + divisao\nprint('E = ',E)\n","sub_path":"Estrutura de Repetição - Ex23.py","file_name":"Estrutura de Repetição - Ex23.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"615921247","text":"# Based on code from https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0\n\nimport gym\nimport numpy as np\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nimport util.eval\n\nclass DeepNet(nn.Module):\n def __init__(self, n_input, lr):\n super(DeepNet, self).__init__()\n self.n_input = n_input\n self.lr = lr\n self.fc_h1 = nn.Linear(n_input, 16, bias=False)\n torch.nn.init.uniform_(self.fc_h1.weight, 0., 0.1)\n self.fc_out = nn.Linear(16, 4, bias=False)\n torch.nn.init.uniform_(self.fc_out.weight, 0., 0.01)\n self.optimizer = optim.SGD(self.parameters(), lr=lr)\n\n def forward(self, x):\n x = F.relu(self.fc_h1(x))\n x = self.fc_out(x)\n return x\n\ndef int_to_onehot(x, dim):\n x_onehot = torch.zeros([1, dim])\n x_onehot[0,x] = 1.\n return x_onehot\n\n\nenv = gym.make('FrozenLake-v0')\n# env = gym.make('FrozenLake8x8-v0')\nt0 = time.time()\n\n# Set learning parameters\nlr = .1\ngamma = .95\neps_schedule = lambda x: 500./(float(x)+2000.)\nmax_steps_per_episode = 99\nnum_episodes = 10000\nreturns_list = []\nnet = DeepNet(env.observation_space.n, lr)\nfor episode in range(num_episodes):\n # Prepared to save the episode history.\n # Rewards and terminals initialized to None because they are only available starting from the second step\n history = {'preds': [], 'rewards': [None], 'terminals': [None], 'acts': []}\n cumulative_reward = 0\n terminal = False\n obs = env.reset()\n # Collect the experiences over one episode\n for step in range(max_steps_per_episode):\n # Process the observation and run the network\n obs_onehot = int_to_onehot(obs, env.observation_space.n)\n pred = net(obs_onehot)\n\n # Update the history\n history['preds'].append(pred)\n if terminal == False:\n # Select an action and take a step\n if np.random.rand() < eps_schedule(episode):\n act = env.action_space.sample()\n else:\n act = np.argmax(pred.detach().numpy())\n obs, reward, terminal, _ = env.step(act)\n # Update the history\n history['rewards'].append(reward)\n history['terminals'].append(terminal)\n history['acts'].append(act)\n cumulative_reward += reward\n else:\n # We do not need the last action, so put None there\n history['acts'].append(None)\n break\n\n # Train Q-learning on the collected transitions\n net.optimizer.zero_grad()\n num_steps = len(history['preds'])\n # Process the episode, going from the end to the beginning\n for step in range(num_steps-1,-1,-1):\n pred, act, reward, terminal = history['preds'][step], history['acts'][step], \\\n history['rewards'][step], history['terminals'][step]\n # For the terminal states target should be 0\n if step == num_steps-1:\n if terminal:\n target = 0.\n else:\n break\n else:\n reward_next, pred_next = history['rewards'][step+1], history['preds'][step+1]\n target = reward_next + gamma*torch.max(pred_next.detach(), dim=1)[0]\n\n # Train Q-function\n predicted = pred[:,act]\n loss = (predicted - target).pow(2).sum()\n # Accumulate the gradient\n loss.backward()\n\n # Apply the update\n net.optimizer.step()\n\n if time.time() - t0 > 1:\n num_rwrds = min(100,len(returns_list)-1)\n print('Episode', episode, 'Smoothed average return', sum(returns_list[-num_rwrds:])/num_rwrds)\n t0 = time.time()\n returns_list.append(cumulative_reward)\n\nprint(\"Smoothed training reward\", np.mean(np.reshape(np.array(returns_list), [-1,250]), axis=1))\n\nprint('Evaluating the learned policy')\ndef policy(obs):\n obs_onehot = int_to_onehot(obs, env.observation_space.n)\n pred = net(obs_onehot)\n return np.argmax(pred.detach().numpy())\n\navg_test_return = util.eval.eval_agent(policy, env, num_episodes=10000, max_steps_per_episode=100)\nprint(\"Avg eval return: \", avg_test_return)\n","sub_path":"3b-Sensorimotor-Control-and-Deep-Reinforcement-Learning/RL_tutorial/02c_q_learning_deep_net_batch_processing_solution.py","file_name":"02c_q_learning_deep_net_batch_processing_solution.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"562222160","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom collections import OrderedDict\nfrom torch.nn import init\nimport math\n\nimport numpy as np\n\nmid_channel_scale = []\nfor i in range(31):\n mid_channel_scale += [(10 + i * 3)/100]\n\noverall_channel_scale = []\nfor i in range(31):\n overall_channel_scale += [(10 + i * 3)/100]\n\nstage_out_channel = [44] + [22] + [33] * 2 + [44] * 3 + [88] * 4 + [132] * 3 + [224] * 3 + [448]\n\nclass conv2d_3x3(nn.Module):\n def __init__(self, base_inp, base_oup, stride):\n super(conv2d_3x3, self).__init__()\n\n self.stride = stride\n assert stride in [1, 2]\n\n self.max_overall_scale = overall_channel_scale[-1]\n self.base_inp = base_inp\n self.base_oup = base_oup\n\n self.max_oup_channel = int(self.max_overall_scale * self.base_oup)\n self.fc11 = nn.Linear(1, 32)\n self.fc12 = nn.Linear(32, self.max_oup_channel * self.base_inp * 3 * 3)\n\n self.first_bn = nn.ModuleList()\n for oup_scale in overall_channel_scale:\n oup = int(self.base_oup * oup_scale)\n self.first_bn.append(nn.BatchNorm2d(oup, affine=False))\n\n def forward(self, x, oup_scale_id):\n\n oup_scale = overall_channel_scale[oup_scale_id]\n oup = int(self.base_oup * oup_scale)\n scale_tensor = torch.FloatTensor([oup_scale/self.max_overall_scale]).to(x.device)\n\n fc11_out = F.relu(self.fc11(scale_tensor))\n conv1_weight = self.fc12(fc11_out).view(self.max_oup_channel, self.base_inp, 3, 3)\n\n out = F.conv2d(x, conv1_weight[:oup, :, :, :], bias=None, stride=self.stride, padding=1)\n out = self.first_bn[oup_scale_id](out)\n out = F.relu6(out)\n\n return out\n\nclass conv2d_1x1(nn.Module):\n def __init__(self, base_inp, base_oup, stride):\n super(conv2d_1x1, self).__init__()\n\n self.stride = stride\n assert stride in [1, 2]\n\n self.max_overall_scale = overall_channel_scale[-1]\n self.base_inp = base_inp\n self.base_oup = base_oup\n\n self.max_inp_channel = int(self.max_overall_scale * self.base_inp)\n self.fc11 = nn.Linear(1, 32)\n self.fc12 = nn.Linear(32, self.base_oup * self.max_inp_channel * 1 * 1)\n #self.conv1_weight = nn.Parameter(torch.randn(base_oup, self.max_inp_channel, 1, 1))\n\n self.first_bn = nn.ModuleList()\n for inp_scale in overall_channel_scale:\n inp = int(self.base_inp * inp_scale)\n self.first_bn.append(nn.BatchNorm2d(base_oup, affine=False))\n\n def forward(self, x, inp_scale_id):\n\n inp_scale = overall_channel_scale[inp_scale_id]\n\n inp = int(self.base_inp * inp_scale)\n\n scale_tensor = torch.FloatTensor([inp_scale/self.max_overall_scale]).to(x.device)\n\n fc11_out = F.relu(self.fc11(scale_tensor))\n conv1_weight = self.fc12(fc11_out).view(self.base_oup, self.max_inp_channel, 1, 1)\n\n out = F.conv2d(x, conv1_weight[:, :inp, :, :], bias=None, stride=self.stride, padding=0)\n out = self.first_bn[inp_scale_id](out)\n out = F.relu6(out)\n\n return out\n\nclass bottleneck(nn.Module):\n def __init__(self, base_inp, base_oup, stride, expand_ratio=6):\n super(bottleneck, self).__init__()\n\n self.max_overall_scale = overall_channel_scale[-1]\n\n max_inp = base_inp\n max_oup = base_oup\n max_mid = max_inp * expand_ratio\n\n self.max_inp = base_inp\n self.max_oup = base_oup\n self.max_mid = max_mid\n self.stride = stride\n\n self.fc11 = nn.Linear(3, 64)\n self.fc12 = nn.Linear(64, max_mid * max_inp * 1 * 1)\n\n self.fc21 = nn.Linear(3, 64)\n self.fc22 = nn.Linear(64, max_mid * 1 * 3 * 3)\n\n self.fc31 = nn.Linear(3, 64)\n self.fc32 = nn.Linear(64, max_oup * max_mid * 1 * 1)\n\n\n self.bn1 = nn.ModuleList()\n for mid_scale in mid_channel_scale:\n mid = int(self.max_mid * mid_scale)\n self.bn1.append(nn.BatchNorm2d(mid, affine=False))\n\n self.bn2 = nn.ModuleList()\n for mid_scale in mid_channel_scale:\n mid = int(self.max_mid * mid_scale)\n self.bn2.append(nn.BatchNorm2d(mid, affine=False))\n\n self.bn3 = nn.ModuleList()\n for oup_scale in overall_channel_scale:\n oup = int(max_oup * oup_scale)\n self.bn3.append(nn.BatchNorm2d(oup, affine=False))\n\n\n def forward(self, x, mid_scale_id, inp_scale_id, oup_scale_id):\n\n mid_scale = mid_channel_scale[mid_scale_id]\n inp_scale = overall_channel_scale[inp_scale_id]\n oup_scale = overall_channel_scale[oup_scale_id]\n\n mid = int(self.max_mid * mid_scale)\n inp = int(self.max_inp * inp_scale)\n oup = int(self.max_oup * oup_scale)\n\n scale_ratio_tensor = torch.FloatTensor([mid_scale, inp_scale, oup_scale]).to(x.device)\n\n fc11_out = F.relu(self.fc11(scale_ratio_tensor))\n conv1_weight = self.fc12(fc11_out).view(self.max_mid, self.max_inp, 1, 1)\n\n fc21_out = F.relu(self.fc21(scale_ratio_tensor))\n conv2_weight = self.fc22(fc21_out).view(self.max_mid, 1, 3, 3)\n\n fc31_out = F.relu(self.fc31(scale_ratio_tensor))\n conv3_weight = self.fc32(fc31_out).view(self.max_oup, self.max_mid, 1, 1)\n\n out = F.conv2d(x, conv1_weight[:mid, :inp, :, :], bias=None, stride=1, padding=0, groups=1)\n out = self.bn1[mid_scale_id](out)\n out = F.relu6(out)\n\n out = F.conv2d(out, conv2_weight[:mid, :, :, :], bias=None, stride=self.stride, padding=1, groups=mid)\n out = self.bn2[mid_scale_id](out)\n out = F.relu6(out)\n\n out = F.conv2d(out, conv3_weight[:oup, :mid, :, :], bias=None, stride=1, padding=0, groups=1)\n out = self.bn3[oup_scale_id](out)\n\n if self.max_inp == self.max_oup:\n return (out + x)\n\n else:\n return out\n\n\n\nclass MobileNetV2(nn.Module):\n def __init__(self, input_size=224, num_classes=1000):\n super(MobileNetV2, self).__init__()\n\n self.feature = nn.ModuleList()\n\n for i in range(19):\n if i == 0:\n self.feature.append(conv2d_3x3(3, stage_out_channel[i], 2))\n elif i == 1:\n self.feature.append(bottleneck(stage_out_channel[i-1], stage_out_channel[i], 1, expand_ratio=1))\n elif i == 18:\n self.feature.append(conv2d_1x1(stage_out_channel[i-1], 1280, 1))\n else:\n if stage_out_channel[i-1]!=stage_out_channel[i] and stage_out_channel[i]!=132 and stage_out_channel[i]!=448:\n self.feature.append(bottleneck(stage_out_channel[i-1], stage_out_channel[i], 2))\n else:\n self.feature.append(bottleneck(stage_out_channel[i-1], stage_out_channel[i], 1))\n\n\n #self.feature.append(bottleneck(32, 22, 1, expand_ratio=1))\n\n #self.feature.append(bottleneck(22, 33, 2))\n #self.feature.append(bottleneck(33, 33, 1))\n\n #self.feature.append(bottleneck(33, 44, 2))\n #for i in range(2):\n # self.feature.append(bottleneck(44, 44, 1))\n\n #self.feature.append(bottleneck(44, 88, 2))\n #for i in range(3):\n # self.feature.append(bottleneck(88, 88, 1))\n\n #self.feature.append(bottleneck(88, 132, 1))\n #for i in range(2):\n # self.feature.append(bottleneck(132, 132, 1))\n\n #self.feature.append(bottleneck(132, 224, 2))\n #for i in range(2):\n # self.feature.append(bottleneck(224, 224, 1))\n\n #self.feature.append(bottleneck(224, 448, 1))\n\n #self.conv5 = conv2d_1x1(int(1.4*stage_out_channel[16]), 1280, 1)\n self.pool1 = nn.AvgPool2d(7)\n self.fc = nn.Linear(1280, 1000)\n\n def forward(self, x, mid_scale_ids, stage_oup_scale_ids):\n\n\n for i, block in enumerate(self.feature):\n if i == 0 :\n x = block(x, stage_oup_scale_ids[i])\n elif i == 18 :\n x = block(x, stage_oup_scale_ids[i-1])\n else :\n x = block(x, mid_scale_ids[i], stage_oup_scale_ids[i-1], stage_oup_scale_ids[i])\n\n #print(x.shape, flush=True)\n x = self.pool1(x)\n x = x.view(-1, 1280)\n x = self.fc(x)\n\n return x\n\nif __name__ == \"__main__\":\n model = MobileNetV1()\n print(model)\n","sub_path":"mobilenetv2/training/mobilenet_v2.py","file_name":"mobilenet_v2.py","file_ext":"py","file_size_in_byte":8348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"14840368","text":"import numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\nfrom tensorforce.agents import PPOAgent\nfrom tensorforce.execution import Runner\nfrom tensorforce.contrib.openai_gym import OpenAIGym\n\nenv = OpenAIGym('MountainCar-v0', visualize=False)\n\nnetwork_spec = [\n dict(type='dense', size=16, activation='relu'),\n dict(type='dense', size=16, activation='relu'),\n dict(type='dense', size=16, activation='relu')\n]\n\nagent = PPOAgent(\n states_spec=env.states,\n actions_spec=env.actions,\n network_spec=network_spec,\n batch_size=1024,\n # Agent\n # preprocessing=None,\n # exploration=None,\n # reward_preprocessing=None,\n # BatchAgent\n keep_last_timestep=True,\n # PPOAgent\n step_optimizer=dict(\n type='adam',\n learning_rate=1e-3\n ),\n optimization_steps=10,\n # Model\n scope='ppo',\n discount=0.99,\n # DistributionModel\n distributions_spec=None,\n entropy_regularization=0.01,\n # PGModel\n baseline_mode=None,\n baseline=None,\n baseline_optimizer=None,\n gae_lambda=None,\n # PGLRModel\n likelihood_ratio_clipping=0.2,\n summary_spec=None,\n distributed_spec=None\n)\n\nreward_list = []\n\nargs_episodes = 1000\nargs_episode_max_steps = 200\nepisode = 0\nagent.reset()\nwhile True:\n agent.reset()\n state = env.reset()\n episode += 1\n episode_step = 0\n episode_reward = 0\n while True:\n action = agent.act(state)\n state, terminal, reward = env.execute(action)\n reward = np.abs(state[1]) - 0.05\n episode_reward += reward\n episode_step += 1\n if args_episode_max_steps is not None and episode_step >= args_episode_max_steps:\n terminal = True\n agent.observe(terminal, reward)\n\n if terminal:\n break\n print('episode {0} steps {1} reward {2}'.format(episode, episode_step, episode_reward))\n reward_list.append(episode_reward)\n if episode >= args_episodes:\n break\n # if len(reward_list) > 100 and np.mean(reward_list[-100:]) > 199:\n # print('good enough!!!')\n # break\n\nplt.plot(reward_list)\n","sub_path":"studzie/tensorforce_test/mountain_car_v0.py","file_name":"mountain_car_v0.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"184013816","text":"from django.test import TestCase\n\nfrom .models import Ledger\n\n\nclass LedgerTestCase(TestCase):\n def setUp(self):\n pass\n\n def test_parse_as_withdrawal(self):\n date = '02/09/2015'\n descr = 'NN NNNN 0010 NNN# 9999999 99999 NNNNNNN NNNN'\n pattern = 'NNNNNN0010NNN#{NBR}NNNNNNNNNNN'\n amount = '$1,400.00'\n balance = '$1,138.52'\n text = f'{date}\\t{descr}\\tREF\\t{amount}\\t\\t{balance}\\tNNNNNNNNNNN NNNNNNN'\n ledger = Ledger(original_text=text)\n ledger.parse()\n self.assertEqual(ledger.date, '2015-02-09')\n self.assertEqual(ledger.description, descr)\n self.assertEqual(ledger.pattern, pattern)\n self.assertEqual(ledger.amount, -1400)\n self.assertEqual(ledger.balance, 1138.52)\n # Assert.AreEqual(@'NNNNNNN0010NNN#[0-9]+NNNNNNNNNNN', ledger.RegexMap);\n\n def test_parse_as_deposit(self):\n date = '02/09/2015'\n descr = 'NN NNNN 0010 NNN# 9999999 99999 NNNNNNN NNNN'\n pattern = 'NNNNNN0010NNN#{NBR}NNNNNNNNNNN'\n amount = '$1,400.00'\n balance = '$1,138.52'\n text = f'{date}\\t{descr}\\tREF\\t\\t{amount}\\t{balance}\\tNNNNNNNNNNN NNNNNNN'\n ledger = Ledger(original_text=text)\n ledger.parse()\n self.assertEqual(ledger.date, '2015-02-09')\n self.assertEqual(ledger.description, descr)\n self.assertEqual(ledger.pattern, pattern)\n self.assertEqual(ledger.amount, 1400)\n self.assertEqual(ledger.balance, 1138.52)\n # Assert.AreEqual(@'NNNNNNN0010NNN#[0-9]+NNNNNNNNNNN', ledger.RegexMap);\n\n def test_parse_actual(self):\n text = '02/29/2020\\tCONF # 12 REF # 34 SQ *ROOTS RAW JUICE LAKE MARY FL 02/29/20\\t0000709613\\t$ 10.16\\t\\t$ 828.92\\tInquire'\n ledger = Ledger(original_text=text)\n ledger.parse()\n self.assertEqual(ledger.date, '2020-02-29')\n self.assertEqual(ledger.description,\n 'CONF # 12 REF # 34 SQ *ROOTS RAW JUICE LAKE MARY FL 02/29/20')\n self.assertEqual(ledger.pattern,\n '{CONF}{REF}SQROOTSRAWJUICELAKEMARYFL{DATE}')\n self.assertEqual(ledger.amount, -10.16)\n self.assertEqual(ledger.balance, 828.92)\n\n# [TestMethod]\n# public void Ledger_Should_Parse_001()\n# {\n# var ledger = new Ledger\n# {\n# OriginalText = '02/09/2015\tNNNNNNNN'N N99999 NNNNNNN NN 02/08/15 NNN 9999\tREF \t$9.82\t\t$2,538.52\tNNNNNNNNNNN NNNNNNN'\n# };\n\n# Assert.AreEqual('10D9DAF6', ledger.Id);\n# Assert.AreEqual(new DateTime(2015, 2, 9), ledger.Date);\n# Assert.AreEqual('NNNNNNNN'N N99999 NNNNNNN NN 02/08/15 NNN 9999', ledger.Description);\n# Assert.AreEqual(-9.82, ledger.Amount);\n# Assert.AreEqual(2538.52, ledger.Balance);\n# Assert.AreEqual(@'NNNNNNNNNN99999NNNNNNNNN\\(MDY\\)NNN9999', ledger.RegexMap);\n# }\n\n# [TestMethod]\n# public void Ledger_Should_Parse_002()\n# {\n# var ledger = new Ledger\n# {\n# OriginalText = '02/09/2015\tNNNNNNNN : NNNNNNNN NN: 99999999NN: NNNNNNNN NNN NNNNN 99999999\t\tREF \t$1,406.00\t$2,548.34\tNNNNNNNNNNN NNNNNNN'\n# };\n\n# Assert.AreEqual('999BEDC8', ledger.Id);\n# Assert.AreEqual(new DateTime(2015, 2, 9), ledger.Date);\n# Assert.AreEqual('NNNNNNNN : NNNNNNNN NN: 99999999NN: NNNNNNNN NNN NNNNN 99999999', ledger.Description);\n# Assert.AreEqual(1406, ledger.Amount);\n# Assert.AreEqual(2548.34, ledger.Balance);\n# Assert.AreEqual(@'NNNNNNNNNNNNNNNNNN99999999NNNNNNNNNNNNNNNNNN99999999', ledger.RegexMap);\n# }\n\n# [TestMethod]\n# public void Ledger_Should_Parse_003()\n# {\n# var ledger = new Ledger\n# {\n# OriginalText = '02/09/2015\tNNNNNNN NNNNNNNN #9999 NNNNNNN NN 02/08/15 NNN 9999\t$17.81\t\tREF \t$1,142.34\tNNNNNNNNNNN NNNNNNN'\n# };\n\n# Assert.AreEqual('98AEE42E', ledger.Id);\n# Assert.AreEqual(new DateTime(2015, 2, 9), ledger.Date);\n# Assert.AreEqual('NNNNNNN NNNNNNNN #9999 NNNNNNN NN 02/08/15 NNN 9999', ledger.Description);\n# Assert.AreEqual(-17.81, ledger.Amount);\n# Assert.AreEqual(1142.34, ledger.Balance);\n# Assert.AreEqual(@'NNNNNNNNNNNNNNN#[0-9]+NNNNNNNNN\\(MDY\\)NNN9999', ledger.RegexMap);\n# }\n\n# [TestMethod]\n# public void Ledger_Should_Parse_004()\n# {\n# var ledger = new Ledger\n# {\n# OriginalText = '02/07/2015\t9999-NNNN NNN NNN N-9999, 9999 NN NNNNNNN NN NNN 9999\t$68.16\t\tREF \t$1,218.82\tNNNNNNNNNNN NNNNNNN'\n# };\n\n# Assert.AreEqual('X6A9645D', ledger.Id);\n# Assert.AreEqual(new DateTime(2015, 2, 7), ledger.Date);\n# Assert.AreEqual('9999-NNNN NNN NNN N-9999, 9999 NN NNNNNNN NN NNN 9999', ledger.Description);\n# Assert.AreEqual(-68.16, ledger.Amount);\n# Assert.AreEqual(1218.82, ledger.Balance);\n# Assert.AreEqual(@'9999NNNNNNNNNNN99999999NNNNNNNNNNNNNN9999', ledger.RegexMap);\n# }\n\n# #endregion\n\n# #region Collection Tests\n\n# [TestMethod]\n# public void LedgerCollection_Should_FindMissingBudget()\n# {\n# // assign\n# var account = Builder.CreateNew().Build();\n\n# var ledger = CreateEmptyLedger();\n\n# // act\n# account.Ledgers.Add(ledger);\n\n# // assert\n# Assert.AreEqual(1, account.Ledgers.MissingBudget().Count());\n# }\n\n# [TestMethod]\n# public void LedgerCollection_Should_FindLedger()\n# {\n# // assign\n# var account = Builder.CreateNew().Build();\n\n# var ledger = CreateEmptyLedger();\n\n# // act\n# account.Ledgers.Add(ledger);\n\n# // assert\n# var x = account.Ledgers.Find(ledger.Id.ToLower(), ledger.Date);\n\n# Assert.AreSame(ledger, x);\n# }\n\n# [TestMethod]\n# public void LedgerCollection_Should_FindMap()\n# {\n# // assign\n# var account = CreateAccount();\n\n# var ledger = CreateEmptyLedger();\n\n# var map = new Map { RegexPattern = ledger.RegexMap, BudgetId = account.Categories.First().Budgets.First().Id };\n\n# account.Maps.Add(map);\n\n# // act\n# account.Ledgers.Import(ledger.OriginalText);\n\n# // assert\n# Assert.AreEqual(map.BudgetId, account.Ledgers.First().BudgetId);\n# }\n\n# private Account CreateAccount()\n# {\n# var account = Builder.CreateNew().Build();\n\n# var category = Builder.CreateNew().Build();\n\n# var budget = new Budget { CategoryId = category.Id, Id = 'X' };\n\n# var po = new PrivateObject(account);\n\n# po.SetField('_categories', null);\n# po.SetProperty('AllCategories', new List { category });\n\n# po.SetField('_budgets', null);\n# po.SetProperty('AllBudgets', new List { budget });\n\n# return account;\n# }\n\n# [TestMethod]\n# public void LedgerCollection_Should_AssignAccountId()\n# {\n# // assign\n# var account = Builder.CreateNew().Build();\n\n# var ledger = CreateEmptyLedger();\n\n# // act\n# account.Ledgers.Add(ledger);\n\n# // assert\n# Assert.AreEqual(account.Id, ledger.AccountId);\n# }\n\n# [TestMethod]\n# public void LedgerCollection_Should_LookupUsingId()\n# {\n# // assign\n# var account = Builder.CreateNew().Build();\n\n# var l1 = CreateEmptyLedger();\n\n# // act\n# account.Ledgers.Add(l1);\n\n# // assert\n# Assert.AreEqual(l1, account.Ledgers.First());\n# }\n\n# [TestMethod]\n# public void LedgerCollection_Should_ParseInputAndUpdateBalances()\n# {\n# // assign\n# var account = Builder.CreateNew().Build();\n\n# var input = @'02/09/2015\tNN NNNNN 0010 NNN# 9999999 99999 NNNNNNN NNNN\t$1,400.00\t\t$1,138.52\tNNNNNNNNNNN NNNNNNN\n# 02/09/2015\tNNNNNNNN'N N99999 NNNNNNN NN 02/08/15 NNN 9999\t$9.82\t\t$2,538.52\tNNNNNNNNNNN NNNNNNN\n# 02/09/2015\tNNNNNNNN : NNNNNNNN NN: 99999999NN: NNNNNNNN NNN NNNNN 99999999\t\t$1,406.00\t$2,548.34\tNNNNNNNNNNN NNNNNNN\n# 02/09/2015\tNNNNNNN NNNNNNNN #2189 NNNNNNN NN 02/08/15 NNN 5231\t$17.81\t\t$1,142.34\tNNNNNNNNNNN NNNNNNN\n# 02/08/2015\tNNNNNNNN'N N99999 NNNNNNN NN 02/07/15 NNN 9999\t$13.80\t\t$1,160.15\tNNNNNNNNNNN NNNNNNN\n# 02/08/2015\tNNNN'N NNN NNNN #3 NNNNNNN NN 02/07/15 NNN 7542\t$10.00\t\t$1,173.95\tNNNNNNNNNNN NNNNNNN\n# 02/08/2015\tNNNNNN 10250 NNNNN NNNN N NNNNNNN NN NNN 5411\t$17.47\t\t$1,183.95\tNNNNNNNNNNN NNNNNNN\n# 02/07/2015\tNNNNNN 10250 NNNNN NNNN N NNNNNNN NN NNN 5411\t$17.40\t\t$1,201.42\tNNNNNNNNNNN NNNNNNN\n# 02/07/2015\t10726-NNNN NNN NNN N-260, 4200 NN NNNNNNN NN NNN 5691\t$68.16\t\t$1,218.82\tNNNNNNNNNNN NNNNNNN\n# 02/07/2015\tNNN - N-NNNN N/N 407-690-5000 NN 02/07/15 NNN 4784\t$55.00\t\t$1,286.98\tNNNNNNNNNNN NNNNNNN';\n\n# // act\n# account.Ledgers.Import(input);\n\n# // assert\n# Assert.AreEqual(10, account.Ledgers.Count);\n\n# Assert.AreEqual(2, account.Balances.Count);\n\n# Assert.IsTrue(account.Balances.Contains(new DateTime(2015, 2, 1)));\n\n# Assert.IsTrue(account.Balances.Contains(new DateTime(2015, 2, 6)));\n# }\n\n# [TestMethod]\n# public void LedgerCollection_Should_ParseEmptyInput()\n# {\n# // assign\n# var account = Builder.CreateNew().Build();\n\n# var input = @'';\n\n# // act\n# account.Ledgers.Import(input);\n\n# // assert\n# Assert.AreEqual(0, account.Ledgers.Count);\n# }\n\n# [TestMethod]\n# public void LedgerCollection_Should_ParseDuplicateInput()\n# {\n# // assign\n# var account = Builder.CreateNew().Build();\n\n# var input = @'02/09/2015\tNN NNNNN 0010 NNN# 9999999 99999 NNNNNNN NNNN\t$1,400.00\t\t$1,138.52\tNNNNNNNNNNN NNNNNNN\n# 02/09/2015\tNN NNNNN 0010 NNN# 9999999 99999 NNNNNNN NNNN\t$1,400.00\t\t$1,138.52\tNNNNNNNNNNN NNNNNNN';\n\n# // act\n# account.Ledgers.Import(input);\n\n# // assert\n# Assert.AreEqual(1, account.Ledgers.Count);\n# }\n\n# #endregion\n# }\n# }\n","sub_path":"nerdbudget/ledger/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"467841304","text":"import collections\n\nfrom .. import *\nfrom . import *\n\nfrom .train_misc import *\nfrom .train_standard import *\nfrom .train_hsic_prune import *\n\nfrom ..math.admm import *\nfrom ..utils.masks import *\nfrom ..utils.path import *\nfrom ..utils.io import *\n\n\nimport torch\nimport time\n\ndef training_standard(config_dict):\n \"\"\"\n Train model with HBaR or CE only\n \"\"\"\n train_loader, test_loader = get_dataset_from_code(config_dict['data_code'], config_dict['batch_size'])\n torch.manual_seed(config_dict['seed'])\n\n model = model_distribution(config_dict)\n \n # load pretrained model\n if 'load_model' in config_dict:\n model = load_state_dict(model, get_model_path(\"{}\".format(config_dict['load_model'])))\n model = model.to(config_dict['device'])\n \n # construct single output model for test\n #config_dict['robustness'] = True\n #model_single_output = model_distribution(config_dict)\n #model_single_output = model_single_output.to(config_dict['device'])\n \n nepoch = config_dict['epochs']\n optimizer, scheduler = set_optimizer(config_dict, model, train_loader, \\\n config_dict['optimizer'], config_dict['learning_rate'], nepoch)\n\n best = 0\n log_dict = {}\n batch_log_list = []\n epoch_log_dict = collections.defaultdict(list)\n epoch_time = meter.AverageMeter()\n \n for cepoch in range(0, nepoch+1):\n if cepoch > 0:\n start_time = time.time() \n if config_dict['training_type'] == 'hsictrain':\n hsic_prune(cepoch, model, train_loader, optimizer, scheduler, config_dict)\n elif config_dict['training_type'] == 'backprop':\n standard_train(cepoch, model, train_loader, optimizer, scheduler, config_dict)\n else:\n raise ValueError(\"Unknown training type or not support [{}]\".format(config_dict['training_type']))\n epoch_time.update(time.time()-start_time)\n \n epoch_log_dict, best = eval_and_save(config_dict, model, test_loader, \n epoch_log_dict, cepoch, nepoch, best)\n \n log_dict['epoch_log_dict'] = epoch_log_dict\n log_dict['config_dict'] = config_dict\n filename = \"{}.npy\".format(os.path.splitext(config_dict['model_file'])[0])\n save_logs(log_dict, get_log_filepath(\"{}\".format(filename)))\n print('Overall training time is {:.2f}s.'.format(epoch_time.sum))\n \n return batch_log_list, epoch_log_dict\n\ndef training_hsic_prune(config_dict):\n \"\"\"\n Train hsic model parameters with hsic + hard pruning + masked retrain\n \"\"\"\n \n train_loader, test_loader = get_dataset_from_code(config_dict['data_code'], config_dict['batch_size'])\n torch.manual_seed(config_dict['seed'])\n \n # load pre-trained model\n model = model_distribution(config_dict)\n model = load_state_dict(model, get_model_path(\"{}\".format(config_dict['load_model'])))\n model = model.to(config_dict['device'])\n \n # construct single output model for test\n #config_dict['robustness'] = True\n #model_single_output = model_distribution(config_dict)\n #model_single_output = model_single_output.to(config_dict['device'])\n \n # distillation teacher model\n if config_dict['distill']:\n pretrained = deepcopy(model)\n #pretrained.load_state_dict(torch.load(config_dict['distill_model_path']))\n pretrained = load_state_dict(pretrained, config_dict['distill_model_path'])\n pretrained = pretrained.to(config_dict['device'])\n pretrained.eval()\n config_dict['pretrained'] = pretrained\n if config_dict['distill_loss'] == 'kl':\n distillation_criterion = torch.nn.KLDivLoss(log_target=True).to(config_dict['device'])\n elif config_dict['distill_loss'] == 'mse' or 'mseml':\n distillation_criterion = torch.nn.MSELoss().to(config_dict['device'])\n config_dict['distillation_criterion'] = distillation_criterion\n\n # optimizer\n nepoch = config_dict['epochs']\n re_nepoch = config_dict['retrain_ep']\n optimizer, scheduler = set_optimizer(config_dict, model, train_loader, \\\n config_dict['optimizer'], config_dict['learning_rate'], nepoch)\n best = 0\n log_dict = {}\n batch_log_list = []\n epoch_log_dict = collections.defaultdict(list)\n epoch_time = meter.AverageMeter()\n \n # Initializing ADMM; if not admm, do hard pruning only\n admm = ADMM(config_dict, model, rho=config_dict['rho']) if config_dict['admm'] else None\n \n for cepoch in range(0, nepoch+1):\n if cepoch > 0:\n start_time = time.time() \n if config_dict['training_type'] == 'hsictrain':\n #Train hsic model parameters with (backprop + hisc) + pruning, in the end of each epoch, do ADMM\n hsic_prune(cepoch, model, train_loader, optimizer, scheduler, config_dict, ADMM=admm)\n elif config_dict['training_type'] == 'backprop':\n #Train hsic model parameters with backprop + pruning, in the end of each epoch, do ADMM\n standard_train(cepoch, model, train_loader, optimizer, scheduler, config_dict, ADMM=admm)\n else:\n raise ValueError(\"Unknown training type or not support[{}]\".format(config_dict['training_type']))\n epoch_time.update(time.time()-start_time)\n \n if (cepoch-1) % config_dict['admm_epochs'] == 0:\n #model_single_output.load_state_dict(model.state_dict())\n #model_single_output.eval()\n epoch_log_dict, best = eval_and_save(config_dict, model, test_loader, epoch_log_dict, cepoch, nepoch, best, False)\n \n print(get_lr(optimizer))\n \n \n # If not admm, do hard pruning only\n filename = os.path.splitext(config_dict['model_file'])[0]\n save_model(model,get_model_path(\"{}.pt\".format(filename+'_beforeHardprune')))\n \n # hard prune\n hard_prune(admm, model, config_dict['sparsity_type'], option=None)\n if config_dict['sparsity_type']=='filter':\n test_filter_sparsity(model)\n else:\n test_irregular_sparsity(model)\n masks = get_model_mask(model=model)\n \n # masked retrain\n train_loader, test_loader = get_dataset_from_code(config_dict['data_code'], config_dict['retrain_bs'])\n optimizer, scheduler = set_optimizer(config_dict, model, train_loader, \\\n config_dict['retrain_opt'], config_dict['retrain_lr'], re_nepoch)\n for cepoch in range(0, re_nepoch+1):\n if cepoch > 0:\n start_time = time.time() \n # you can also re-write hsic_train function\n if config_dict['retraining_type'] == 'hsictrain':\n #Train hsic model parameters with backprop + hsic with masks\n hsic_prune(cepoch, model, train_loader, optimizer, scheduler, config_dict, masks=masks)\n elif config_dict['retraining_type'] == 'backprop':\n #Train hsic model parameters with backprop with masks\n standard_train(cepoch, model, train_loader, optimizer, scheduler, config_dict, masks=masks)\n else:\n raise ValueError(\"Unknown training type or not support [{}]\".format(config_dict['retraining_type']))\n epoch_time.update(time.time()-start_time)\n \n epoch_log_dict, best = eval_and_save(config_dict, model, test_loader, epoch_log_dict, cepoch, nepoch, best, True)\n \n log_dict['epoch_log_dict'] = epoch_log_dict\n filename = \"{}.npy\".format(os.path.splitext(config_dict['model_file'])[0])\n save_logs(log_dict, get_log_filepath(\"{}\".format(filename)))\n\n # Test pruning ratio\n test_irregular_sparsity(model)\n print('Overall training time is {:.2f}s.'.format(epoch_time.sum))\n \n return batch_log_list, epoch_log_dict","sub_path":"source/hsicbt/core/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"136912852","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_http_methods\nfrom .models import Posting, Comment\n\n# Posting\n# Create\n@require_http_methods(['GET, POST'])\ndef posting_new(request):\n if request.method == 'POST':\n posting = Posting()\n posting.title = request.POST.get('title')\n posting.content = request.POST.get('content')\n posting.save()\n return redirect('board_ad:posting_detail', posting.id)\n else:\n return render(request, 'board_ad/posting_new.html')\n\n\n# Read\n@require_http_methods(['GET'])\ndef posting_list(request):\n postings = Posting.objects.all()\n return render(request, 'board_ad/list.html', {\n 'postings': postings,\n })\n\n\n@require_http_methods(['GET'])\ndef posting_detail(request, posting_id):\n posting = get_object_or_404(Posting, id=posting_id)\n comments = Comment.objects.filter(posting_id__exact=posting_id)\n return render(request, 'board_ad/detail.html', {\n 'posting': posting,\n 'comments': comments,\n })\n\n\n# Update\n@require_http_methods(['GET, POST'])\ndef posting_edit(request, posting_id):\n if request.method == 'POST':\n posting = Posting.objects.get(id=posting_id)\n posting.title = request.POST.get('title')\n posting.content = request.POST.get('content')\n posting.save()\n return redirect('board_ad:posting_detail', posting.id)\n else:\n posting = Posting.objects.get(id=posting_id)\n return render(request, 'board_ad/posting_edit.html', {\n 'posting': posting,\n })\n\n\n# Delete\n@require_http_methods(['POST'])\ndef posting_delete(request, posting_id):\n posting = Posting.objects.get(id=posting_id)\n posting.delete()\n return redirect('board_ad:posting_list')\n\n\n# Comment\n# Create\n@require_http_methods(['POST'])\ndef comment_new(request, posting_id):\n comment = Comment()\n comment.posting_id = posting_id\n comment.content = request.POST.get('comment')\n comment.save()\n return redirect('board_ad:posting_detail', posting_id)\n\n\n# Delete\ndef comment_delete(request, posting_id, comment_id):\n comment = Comment.objects.get(id=comment_id)\n comment.delete()\n return redirect('board_ad:posting_detail', posting_id)\n","sub_path":"board_ad/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"528135281","text":"\"\"\"Module controlling the writing of ParticleSets to NetCDF file\"\"\"\nimport os\nimport random\nimport shutil\nimport string\nfrom abc import ABC\nfrom abc import abstractmethod\n\nimport netCDF4\nimport numpy as np\n\ntry:\n from mpi4py import MPI\nexcept:\n MPI = None\ntry:\n from parcels._version import version as parcels_version\nexcept:\n raise EnvironmentError('Parcels version can not be retrieved. Have you run ''python setup.py install''?')\ntry:\n from os import getuid\nexcept:\n # Windows does not have getuid(), so define to simply return 'tmp'\n def getuid():\n return 'tmp'\n\n\n__all__ = ['BaseParticleFile']\n\n\ndef _set_calendar(origin_calendar):\n if origin_calendar == 'np_datetime64':\n return 'standard'\n else:\n return origin_calendar\n\n\nclass BaseParticleFile(ABC):\n \"\"\"Initialise trajectory output.\n\n :param name: Basename of the output file\n :param particleset: ParticleSet to output\n :param outputdt: Interval which dictates the update frequency of file output\n while ParticleFile is given as an argument of ParticleSet.execute()\n It is either a timedelta object or a positive double.\n :param write_ondelete: Boolean to write particle data only when they are deleted. Default is False\n :param convert_at_end: Boolean to convert npy files to netcdf at end of run. Default is True\n :param tempwritedir: directories to write temporary files to during executing.\n Default is out-XXXXXX where Xs are random capitals. Files for individual\n processors are written to subdirectories 0, 1, 2 etc under tempwritedir\n :param pset_info: dictionary of info on the ParticleSet, stored in tempwritedir/XX/pset_info.npy,\n used to create NetCDF file from npy-files.\n \"\"\"\n write_ondelete = None\n convert_at_end = None\n outputdt = None\n lasttime_written = None\n dataset = None\n metadata = None\n name = None\n particleset = None\n parcels_mesh = None\n time_origin = None\n lonlatdepth_dtype = None\n var_names = None\n file_list = None\n var_names_once = None\n file_list_once = None\n maxid_written = -1\n tempwritedir_base = None\n tempwritedir = None\n\n def __init__(self, name, particleset, outputdt=np.infty, write_ondelete=False, convert_at_end=True,\n tempwritedir=None, pset_info=None):\n\n self.write_ondelete = write_ondelete\n self.convert_at_end = convert_at_end\n self.outputdt = outputdt\n self.lasttime_written = None # variable to check if time has been written already\n\n self.dataset = None\n self.metadata = {}\n if pset_info:\n for v in pset_info.keys():\n setattr(self, v, pset_info[v])\n else:\n self.name = name\n self.particleset = particleset\n self.parcels_mesh = 'spherical'\n if self.particleset.fieldset is not None:\n self.parcels_mesh = self.particleset.fieldset.gridset.grids[0].mesh\n self.time_origin = self.particleset.time_origin\n self.lonlatdepth_dtype = self.particleset.collection.lonlatdepth_dtype\n self.var_names = []\n self.var_names_once = []\n for v in self.particleset.collection.ptype.variables:\n if v.to_write == 'once':\n self.var_names_once += [v.name]\n elif v.to_write is True:\n self.var_names += [v.name]\n if len(self.var_names_once) > 0:\n self.written_once = []\n self.file_list_once = []\n\n self.file_list = []\n\n tmp_dir = tempwritedir\n if tempwritedir is None:\n tmp_dir = os.path.join(os.path.dirname(str(self.name)), \"out-%s\" % ''.join(random.choice(string.ascii_uppercase) for _ in range(8)))\n else:\n tmp_dir = tempwritedir\n\n if MPI:\n mpi_rank = MPI.COMM_WORLD.Get_rank()\n self.tempwritedir_base = MPI.COMM_WORLD.bcast(tmp_dir, root=0)\n else:\n self.tempwritedir_base = tmp_dir\n mpi_rank = 0\n self.tempwritedir = os.path.join(self.tempwritedir_base, \"%d\" % mpi_rank)\n\n if not os.path.exists(self.tempwritedir):\n os.makedirs(self.tempwritedir)\n elif pset_info is None:\n raise IOError(\"output directory %s already exists. Please remove the directory.\" % self.tempwritedir)\n\n @abstractmethod\n def _reserved_var_names(self):\n \"\"\"\n returns the reserved dimension names not to be written just once.\n \"\"\"\n pass\n\n def open_netcdf_file(self, data_shape):\n \"\"\"Initialise NetCDF4.Dataset for trajectory output.\n The output follows the format outlined in the Discrete Sampling Geometries\n section of the CF-conventions:\n http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#discrete-sampling-geometries\n The current implementation is based on the NCEI template:\n http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/trajectoryIncomplete.cdl\n\n :param data_shape: shape of the variables in the NetCDF4 file\n \"\"\"\n extension = os.path.splitext(str(self.name))[1]\n fname = self.name if extension in ['.nc', '.nc4'] else \"%s.nc\" % self.name\n if os.path.exists(str(fname)):\n os.remove(str(fname))\n\n coords = self._create_trajectory_file(fname=fname, data_shape=data_shape)\n self._create_trajectory_records(coords=coords)\n self._create_metadata_records()\n\n def close_netcdf_file(self):\n self.dataset.close()\n\n def _create_trajectory_file(self, fname, data_shape):\n self.dataset = netCDF4.Dataset(fname, \"w\", format=\"NETCDF4\")\n self.dataset.createDimension(\"obs\", data_shape[1])\n self.dataset.createDimension(\"traj\", data_shape[0])\n coords = (\"traj\", \"obs\")\n self.dataset.feature_type = \"trajectory\"\n self.dataset.Conventions = \"CF-1.6/CF-1.7\"\n self.dataset.ncei_template_version = \"NCEI_NetCDF_Trajectory_Template_v2.0\"\n self.dataset.parcels_version = parcels_version\n self.dataset.parcels_mesh = self.parcels_mesh\n return coords\n\n def _create_trajectory_records(self, coords):\n \"\"\"\n creates the NetCDF record structure of a trajectory.\n\n Attention:\n For ParticleSet structures other than SoA, and structures where ID != index, this has to be overridden.\n \"\"\"\n # Create ID variable according to CF conventions\n self.id = self.dataset.createVariable(\"trajectory\", \"i8\", coords, fill_value=-2**(63)) # minint64 fill_value\n self.id.long_name = \"Unique identifier for each particle\"\n self.id.cf_role = \"trajectory_id\"\n\n # Create time, lat, lon and z variables according to CF conventions:\n self.time = self.dataset.createVariable(\"time\", \"f8\", coords, fill_value=np.nan)\n self.time.long_name = \"\"\n self.time.standard_name = \"time\"\n if self.time_origin.calendar is None:\n self.time.units = \"seconds\"\n else:\n self.time.units = \"seconds since \" + str(self.time_origin)\n self.time.calendar = 'standard' if self.time_origin.calendar == 'np_datetime64' else self.time_origin.calendar\n self.time.axis = \"T\"\n\n if self.lonlatdepth_dtype is np.float64:\n lonlatdepth_precision = \"f8\"\n else:\n lonlatdepth_precision = \"f4\"\n\n if ('lat' in self.var_names):\n self.lat = self.dataset.createVariable(\"lat\", lonlatdepth_precision, coords, fill_value=np.nan)\n self.lat.long_name = \"\"\n self.lat.standard_name = \"latitude\"\n self.lat.units = \"degrees_north\"\n self.lat.axis = \"Y\"\n\n if ('lon' in self.var_names):\n self.lon = self.dataset.createVariable(\"lon\", lonlatdepth_precision, coords, fill_value=np.nan)\n self.lon.long_name = \"\"\n self.lon.standard_name = \"longitude\"\n self.lon.units = \"degrees_east\"\n self.lon.axis = \"X\"\n\n if ('depth' in self.var_names) or ('z' in self.var_names):\n self.z = self.dataset.createVariable(\"z\", lonlatdepth_precision, coords, fill_value=np.nan)\n self.z.long_name = \"\"\n self.z.standard_name = \"depth\"\n self.z.units = \"m\"\n self.z.positive = \"down\"\n\n for vname in self.var_names:\n if vname not in self._reserved_var_names():\n setattr(self, vname, self.dataset.createVariable(vname, \"f4\", coords, fill_value=np.nan))\n getattr(self, vname).long_name = \"\"\n getattr(self, vname).standard_name = vname\n getattr(self, vname).units = \"unknown\"\n\n for vname in self.var_names_once:\n setattr(self, vname, self.dataset.createVariable(vname, \"f4\", \"traj\", fill_value=np.nan))\n getattr(self, vname).long_name = \"\"\n getattr(self, vname).standard_name = vname\n getattr(self, vname).units = \"unknown\"\n\n def _create_metadata_records(self):\n for name, message in self.metadata.items():\n setattr(self.dataset, name, message)\n\n def __del__(self):\n if self.convert_at_end:\n self.close()\n\n def close(self, delete_tempfiles=True):\n \"\"\"Close the ParticleFile object by exporting and then deleting\n the temporary npy files\"\"\"\n self.export()\n mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0\n if mpi_rank == 0:\n if delete_tempfiles:\n self.delete_tempwritedir(tempwritedir=self.tempwritedir_base)\n self.convert_at_end = False\n\n def add_metadata(self, name, message):\n \"\"\"Add metadata to :class:`parcels.particleset.ParticleSet`\n\n :param name: Name of the metadata variabale\n :param message: message to be written\n \"\"\"\n if self.dataset is None:\n self.metadata[name] = message\n else:\n setattr(self.dataset, name, message)\n\n def dump_dict_to_npy(self, data_dict, data_dict_once):\n \"\"\"Buffer data to set of temporary numpy files, using np.save\"\"\"\n\n if not os.path.exists(self.tempwritedir):\n os.makedirs(self.tempwritedir)\n\n if len(data_dict) > 0:\n tmpfilename = os.path.join(self.tempwritedir, str(len(self.file_list)) + \".npy\")\n with open(tmpfilename, 'wb') as f:\n np.save(f, data_dict)\n self.file_list.append(tmpfilename)\n\n if len(data_dict_once) > 0:\n tmpfilename = os.path.join(self.tempwritedir, str(len(self.file_list)) + '_once.npy')\n with open(tmpfilename, 'wb') as f:\n np.save(f, data_dict_once)\n self.file_list_once.append(tmpfilename)\n\n @abstractmethod\n def get_pset_info_attributes(self):\n \"\"\"\n returns the main attributes of the pset_info.npy file.\n\n Attention:\n For ParticleSet structures other than SoA, and structures where ID != index, this has to be overridden.\n \"\"\"\n return None\n\n def dump_psetinfo_to_npy(self):\n \"\"\"\n function writes the major attributes and values to a pset information file (*.npy).\n \"\"\"\n pset_info = {}\n attrs_to_dump = self.get_pset_info_attributes()\n if attrs_to_dump is None:\n return\n for a in attrs_to_dump:\n if hasattr(self, a):\n pset_info[a] = getattr(self, a)\n with open(os.path.join(self.tempwritedir, 'pset_info.npy'), 'wb') as f:\n np.save(f, pset_info)\n\n def write(self, pset, time, deleted_only=False):\n \"\"\"Write all data from one time step to a temporary npy-file\n using a python dictionary. The data is saved in the folder 'out'.\n\n :param pset: ParticleSet object to write\n :param time: Time at which to write ParticleSet\n :param deleted_only: Flag to write only the deleted Particles\n \"\"\"\n\n data_dict, data_dict_once = pset.to_dict(self, time, deleted_only=deleted_only)\n self.dump_dict_to_npy(data_dict, data_dict_once)\n self.dump_psetinfo_to_npy()\n\n @abstractmethod\n def read_from_npy(self, file_list, time_steps, var):\n \"\"\"\n Read NPY-files for one variable using a loop over all files.\n\n Attention:\n For ParticleSet structures other than SoA, and structures where ID != index, this has to be overridden.\n\n :param file_list: List that contains all file names in the output directory\n :param time_steps: Number of time steps that were written in out directory\n :param var: name of the variable to read\n \"\"\"\n return None\n\n @abstractmethod\n def export(self):\n \"\"\"\n Exports outputs in temporary NPY-files to NetCDF file\n\n Attention:\n For ParticleSet structures other than SoA, and structures where ID != index, this has to be overridden.\n \"\"\"\n pass\n\n def delete_tempwritedir(self, tempwritedir=None):\n \"\"\"Deleted all temporary npy files\n\n :param tempwritedir Optional path of the directory to delete\n \"\"\"\n if tempwritedir is None:\n tempwritedir = self.tempwritedir\n if os.path.exists(tempwritedir):\n shutil.rmtree(tempwritedir)\n","sub_path":"parcels/particlefile/baseparticlefile.py","file_name":"baseparticlefile.py","file_ext":"py","file_size_in_byte":13398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"149307980","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\n\nimport time\nfrom datetime import datetime, timedelta\nfrom datetime import time as datetime_time\nfrom dateutil import relativedelta\n\nimport babel\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.exceptions import UserError, ValidationError\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT,DEFAULT_SERVER_DATE_FORMAT\n\nclass hr_employee(models.Model):\n _inherit = 'hr.employee'\n\n warning_ids = fields.One2many('hr.warning', 'employee_id', string='Warnings')\n warnings_count = fields.Integer(compute='_compute_warning_count', string='Warnings')\n\n def _compute_warning_count(self):\n warning_data = self.env['hr.warning'].sudo().read_group([('employee_id', 'in', self.ids)], ['employee_id'], ['employee_id'])\n result = dict((data['employee_id'][0], data['employee_id_count']) for data in warning_data)\n for employee in self:\n employee.warnings_count = result.get(employee.id, 0)\n\n\nclass hr_warning_stage(models.Model):\n _name = 'hr.warning.stage'\n _description = 'Employees Warning Stages'\n\n name = fields.Char(string='Name', required=True)\n type = fields.Selection([('amount','Amount'), ('hour', 'Hours'), ('wage', 'Wage')], string='Type')\n fine = fields.Float(string='Fine', digits=(16,6))\n expiry_days = fields.Integer(string='Expiry After(Days)')\n stage_id = fields.Many2one('hr.warning.type', string='Type')\n company_id = fields.Many2one('res.company', string='Company', default=lambda self: self.env.user.company_id)\n\nclass hr_warning_type(models.Model):\n _name = 'hr.warning.type'\n _description = 'Employees Warning Types'\n _inherit = 'mail.thread'\n\n name = fields.Char(string='Name', track_visibility='onchange', required=True)\n stage_ids = fields.One2many('hr.warning.stage', 'stage_id', string='Stages')\n company_id = fields.Many2one('res.company', string='Company', default=lambda self: self.env.user.company_id)\n\nclass hr_warning(models.Model):\n _name = 'hr.warning'\n _description = 'Employees Warning'\n _inherit = 'mail.thread'\n\n @api.depends('paid', 'amount')\n def _balance(self):\n for line in self:\n line.balance = line.amount - line.paid\n\n name = fields.Char(string='Description', readonly=True, track_visibility='onchange', states={'new': [('readonly', False)]}, required=True)\n type = fields.Many2one('hr.warning.type', string='Type', readonly=True, track_visibility='onchange', states={'new': [('readonly', False)]}, required=True)\n employee_id = fields.Many2one('hr.employee', string='Employee', readonly=True, track_visibility='onchange', states={'new': [('readonly', False)]}, required=True)\n date = fields.Date(string='Date', default=datetime.today(), readonly=True, track_visibility='onchange', copy=False, states={'new': [('readonly', False)]}, required=True)\n amount = fields.Float(string='Amount', digits=dp.get_precision('Payroll'), readonly=True, track_visibility='onchange', copy=False)\n paid = fields.Float(string='Paid', digits=dp.get_precision('Payroll'), readonly=True, track_visibility='onchange', copy=False)\n balance = fields.Float(compute='_balance', string='Amount Due', digits=dp.get_precision('Payroll'), readonly=True, copy=False)\n expiry_date = fields.Date(string='Expiry Date', readonly=True, copy=False)\n state = fields.Selection([('new', 'New'), ('open', 'Running'), ('expire', 'Expired'), ('reject', 'Rejected'), ('cancel','Cancelled')], string='State', default='new', readonly=True, track_visibility='onchange', copy=False)\n company_id = fields.Many2one('res.company', string='Company', default=lambda self: self.env.user.company_id)\n\n @api.model\n def check_expire(self):\n records = self.search(['|',('expiry_date','<',datetime.today().strftime(DEFAULT_SERVER_DATE_FORMAT)), ('expiry_date','=',False)])\n for record in records:\n record.state = 'expire'\n return True\n\n @api.multi\n def action_reject(self):\n for rec in self:\n if rec.state != 'new':\n raise UserError(_(\"Only an new warning can be reject.\"))\n rec.write({'state': 'reject'})\n return True\n\n @api.multi\n def action_draft(self):\n for rec in self:\n if rec.state not in ('reject', 'cancel'):\n raise UserError(_(\"Only a cancel or reject warning can be set to new.\"))\n rec.write({'state': 'new'})\n return True\n\n @api.multi\n def action_cancel(self):\n for rec in self:\n if rec.state != 'new':\n raise UserError(_(\"Only an new warning can be cancel.\"))\n rec.write({'state': 'cancel'})\n return True\n\n @api.multi\n def action_confirm(self):\n for rec in self:\n if rec.state != 'new':\n raise UserError(_(\"Only an new warning can be confirm.\"))\n rec.write({'state': 'open'})\n amount = 0\n warnings = self.search([('state','=','open'), ('type', '=', rec.type.id), ('employee_id', '=', rec.employee_id.id)])\n count = len(warnings) - 1\n if count>=0 and rec.type.stage_ids and rec.type.stage_ids[count]:\n if rec.type.stage_ids[count].type=='amount':\n amount = rec.type.stage_id[count].fine\n elif rec.type.stage_ids[count].type=='hour':\n amount = rec.employee_id.contract_id.wage*((12/365)/8)*rec.type.stage_ids[count].fine\n elif rec.type.stage_ids[count].type=='wage':\n amount = rec.employee_id.contract_id.wage*rec.type.stage_ids[count].fine/100\n rec.expiry_date = fields.Date.from_string(rec.date) + timedelta(days=rec.type.stage_ids[count].expiry_days)\n rec.amount = amount\n return True\n\n @api.multi\n def unlink(self):\n if any(rec.state in ('open') for rec in self):\n raise UserError(_('It is not allowed to delete a warning that already confirmed.'))\n return super(hr_warning, self).unlink()\n\nclass hr_payslip(models.Model):\n _inherit = 'hr.payslip'\n\n @api.multi\n def action_payslip_done(self):\n for payslip in self:\n amount = 0\n for line in payslip.line_ids:\n if line.code == 'WA':\n amount += line.amount\n warnings = self.env['hr.warning'].search([('employee_id','=',self.employee_id.id), ('state','in',['open','expire']), ('balance','>',0)])\n for warning in warnings:\n if warning.balance>=(amount*-1):\n warning.paid += (amount*-1)\n amount = 0\n elif warning.balance<(amount*-1):\n warning.paid += warning.balance\n amount += warning.balance\n return super(hr_payslip, self).action_payslip_done()\n\n @api.model\n def get_inputs(self, contracts, date_from, date_to):\n res = super(hr_payslip, self).get_inputs(contracts, date_from, date_to)\n warnings = self.env['hr.warning'].search([('employee_id','=',self.employee_id.id), ('balance','>',0), ('state','in',['open','expire'])])\n warning = 0\n for w in warnings:\n warning += w.amount\n res += [{'name': 'Warning', 'code': 'Warning', 'contract_id': self.contract_id.id, 'amount':warning*-1}]\n return res\n","sub_path":"hr_warning/models/hr_warning.py","file_name":"hr_warning.py","file_ext":"py","file_size_in_byte":7449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154127368","text":"#!/usr/bin/python3\n\"\"\" Square Module \"\"\"\n\n\nclass Square:\n \"\"\" This is a Square Class \"\"\"\n def __init__(self, size=0, position=(0, 0)):\n \"\"\" instantiation of square with size & position \"\"\"\n self.size = size\n self.position = position\n\n @property\n def size(self):\n \"\"\" returns size variable of Square class instance \"\"\"\n return self.__size\n\n @size.setter\n def size(self, value):\n \"\"\" sets size variable of Square class instance \"\"\"\n if type(value) != int:\n raise TypeError('size must be an integer')\n if value < 0:\n raise ValueError('size must be >= 0')\n self.__size = value\n\n @property\n def position(self):\n \"\"\" returns position variable of Square class instance \"\"\"\n return self.__position\n\n @position.setter\n def position(self, value):\n \"\"\" sets position variable of Square class instance \"\"\"\n if (type(value) != tuple or len(value) != 2 or\n type(value[0]) != int or value[0] < 0 or\n type(value[1]) != int or value[1] < 0):\n raise TypeError('position must be a tuple of 2 positive integers')\n self.__position = value\n\n def area(self):\n \"\"\" returns area of a square \"\"\"\n return self.__size**2\n\n def my_print(self):\n \"\"\" prints a square of '#' \"\"\"\n if self.__size == 0:\n print()\n else:\n if (self.__position):\n for i in range(self.__position[1]):\n print()\n for i in range(self.__size):\n print(' ' * self.__position[0], end='')\n print('#' * self.__size)\n else:\n for i in range(self.__size):\n print('#' * self.__size)\n\n def __str__(self):\n \"\"\" converts square to string of '#' \"\"\"\n s = ''\n if self.__size != 0:\n if (self.__position):\n for i in range(self.__position[1]):\n s += '\\n'\n for i in range(self.__size):\n s += str(' ' * self.__position[0])\n s += str(('#' * self.__size) + '\\n')\n s = s[:-1]\n else:\n for i in range(self.__size):\n s += str(('#' * self.__size) + '\\n')\n s = s[:-1]\n return s\n","sub_path":"0x07-python-classes/101-square.py","file_name":"101-square.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"180298355","text":"import time\nimport unittest\n\n\nclass TestCaseListClearSearchT426(unittest.TestCase):\n \"\"\"\n User shall be able to clear a search filter to display all patients\n \"\"\"\n def setUp(self):\n self.page = get_page(myRemote, myBrowser, myUrl)\n\n def case_list_clear_search(self, page):\n cant_cases_before = page.get_cant_cases()\n page.filter_by_text(\"fake filter text\")\n assert page.no_patients_match_is_displayed()\n page.filter_by_text(\"\")\n cant_cases_after = page.get_cant_cases()\n assert page.get_filter_text() == \"\"\n assert cant_cases_after == cant_cases_before\n\n\n def test_case_list_clear_search_as_customer_T426(self):\n page_cases = self.page.login_as_customer()\n self.case_list_clear_search(page_cases)\n\n def test_case_list_clear_search_as_customer_site_admin_T426(self):\n page_cases = self.page.login_as_customer_site_admin()\n self.case_list_clear_search(page_cases)\n\n def tearDown(self):\n time.sleep(1)\n self.page.stop()\n\nif __name__ == '__main__':\n if __package__ is None:\n import sys\n from os import path\n sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n from Pages.utilities import get_page, arg_parse\n else:\n from ..Pages.utilities import get_page, arg_parse\n myRemote, myBrowser, myUrl = arg_parse(sys.argv)\n unittest.main()\n","sub_path":"frontend/tests/functional/clinical_ui/test_case_list_clear_search_T426.py","file_name":"test_case_list_clear_search_T426.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"144932608","text":"from django import template\nfrom django.template.defaultfilters import stringfilter\nimport re\nregister = template.Library()\n\n@register.filter(name='cleanTypes')\ndef cleanTypes(string):\n string = string.split(\"_\")\n cleaned_value = \"\"\n for data in string:\n cleaned_value += data.title() + \" \"\n return cleaned_value\n\n@register.filter(name='eveWhoConverter')\ndef eveWhoConverter(string):\n string = str(string)\n string = string.replace(' ', '+')\n url = \"https://evewho.com/pilot/\" + string\n return url\n","sub_path":"app/games/eveonline/templatetags/eve_filters.py","file_name":"eve_filters.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"106546731","text":"from sklearn import metrics\nimport numpy as np\nimport os\n\n\ndef compute_eer(target_scores, nontarget_scores):\n \"\"\"Calculate EER following the same way as in Kaldi.\n\n Args:\n target_scores (array-like): sequence of scores where the\n label is the target class\n nontarget_scores (array-like): sequence of scores where the\n label is the non-target class\n Returns:\n eer (float): equal error rate\n threshold (float): the value where the target error rate\n (the proportion of target_scores below\n threshold) is equal to the non-target\n error rate (the proportion of nontarget_scores\n above threshold)\n \"\"\"\n assert len(target_scores) != 0 and len(nontarget_scores) != 0\n tgt_scores = sorted(target_scores)\n nontgt_scores = sorted(nontarget_scores)\n\n target_size = float(len(tgt_scores))\n nontarget_size = len(nontgt_scores)\n target_position = 0\n for target_position, tgt_score in enumerate(tgt_scores[:-1]):\n nontarget_n = nontarget_size * target_position / target_size\n nontarget_position = int(nontarget_size - 1 - nontarget_n)\n if nontarget_position < 0:\n nontarget_position = 0\n if nontgt_scores[nontarget_position] < tgt_score:\n break\n threshold = tgt_scores[target_position]\n eer = target_position / target_size\n return eer, threshold\n\n\ndef get_metrics(prediction, label):\n \"\"\"Calculate several metrics for a binary classification task.\n\n Args:\n prediction (array-like): sequence of probabilities\n e.g. [0.1, 0.4, 0.35, 0.8]\n labels (array-like): sequence of class labels (0 or 1)\n e.g. [0, 0, 1, 1]\n Returns:\n auc: area-under-curve\n eer: equal error rate\n \"\"\" # noqa: H405, E261\n assert len(prediction) == len(label), (len(prediction), len(label))\n fpr, tpr, thresholds = metrics.roc_curve(label, prediction, pos_label=1)\n auc = metrics.auc(fpr, tpr)\n # from scipy.optimize import brentq\n # from scipy.interpolate import interp1d\n # fnr = 1 - tpr\n # eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)\n\n eer, thres = compute_eer(\n [pred for i, pred in enumerate(prediction) if label[i] == 1],\n [pred for i, pred in enumerate(prediction) if label[i] == 0],\n )\n return auc, eer\n\ndef ACC(pred, truth):\n return np.sum(pred == truth) / len(truth)\n\ndef evalPrint(pred, truth, message: str='', logfile=None):\n auc, eer = get_metrics(pred, truth)\n acc = ACC(pred, truth)\n log = message + 'auc = {}, eer = {}, acc = {}'.format(auc,eer,acc) + '\\n'\n print(log)\n if (logfile != None):\n with open(logfile, 'a') as f:\n f.write(log)\n \n\nif __name__ == \"__main__\":\n # 第一个参数为模型预测输出(可以是概率,也可以是二值分类结果)\n # 第二个参数为数据对应的标签\n print(get_metrics([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]))\n # 注意:计算最终指标时,应将整个数据集(而不是在每个样本上单独计算)的所有语音帧预测结果合并在一个list中,\n # 对应的标签也合并在一个list中,然后再调用get_metrics来计算指标\n","sub_path":"utils/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"269036407","text":"from collections import defaultdict\r\nfrom prettytable import PrettyTable\r\nfrom Fileread import filereader\r\nimport os\r\nimport unittest\r\n\r\n\r\nclass Repositor:\r\n\r\n def __init__(self,paths):\r\n self.paths=paths\r\n self.st=dict()\r\n self.ins=dict()\r\n \r\n def student(self,path):\r\n stpath=os.path.join(self.paths,path)\r\n\r\n for cwid,name,major in filereader(stpath,3,sep='\\t',header=False):\r\n if cwid in self.st:\r\n raise Exception('Warning : cwid {} already read from the file'.format(cwid))\r\n else:\r\n self.st[cwid]=Student(cwid,name,major)\r\n\r\n def instuctor(self,path):\r\n inspath=os.path.join(self.paths,path)\r\n\r\n for cwid,name,dept in filereader(inspath,3,sep='\\t',header=False):\r\n if cwid in self.ins:\r\n raise Exception('Warning : cwid {} already read from the file'.format(cwid))\r\n else:\r\n self.ins[cwid]=Instuctor(cwid,name,dept)\r\n\r\n def grade(self,path):\r\n gradepath=os.path.join(self.paths,path)\r\n\r\n for stcwid,course,grade,inscwid in filereader(gradepath,4,sep='\\t',header=False):\r\n\r\n if stcwid in self.st:\r\n self.st[stcwid].add_course(course,grade)\r\n else:\r\n raise Exception('Warning : student cwid {} already read from the file'.format(stcwid))\r\n\r\n if inscwid in self.ins:\r\n self.ins[inscwid].add_coursenum(course)\r\n else:\r\n raise Exception('Warning : instuctor cwid {} already read from the file'.format(inscwid))\r\n \r\n def stpt(self):\r\n stpt=PrettyTable(field_names=['cwid','name','completed course'])\r\n\r\n for key in self.st:\r\n stpt.add_row([key,self.st[key].name,list(self.st[key].cour.items())])\r\n print(stpt)\r\n \r\n def inspt(self):\r\n inspt=PrettyTable(field_names=['cwid','name','dept','course','student'])\r\n\r\n for key in self.ins:\r\n for key1 in self.ins[key].cournum:\r\n inspt.add_row([key,self.ins[key].name,self.ins[key].dept,key1,self.ins[key].cournum[key1]])\r\n print(inspt)\r\n\r\n\r\nclass Student:\r\n\r\n def __init__(self,cwid,name,major):\r\n self.cwid=cwid\r\n self.name=name\r\n self.major=major\r\n self.cour=dict()\r\n\r\n def add_course(self,course,grade):\r\n self.cour[course]=grade\r\n\r\n def stptstring(self):\r\n return [self.cwid,self.name,self.major,sorted(self.cour.items())]\r\n \r\n\r\nclass Instuctor:\r\n \r\n def __init__(self,cwid,name,dept):\r\n self.cwid=cwid\r\n self.name=name\r\n self.dept=dept\r\n self.cournum=defaultdict(int)\r\n\r\n def add_coursenum(self,course):\r\n self.cournum[course]+=1\r\n\r\n def insstring(self,course,student):\r\n return [self.cwid,self.name,self.dept,course,student]\r\n\r\n\r\nclass RepositorTest(unittest.TestCase):\r\n def testrepositor(self):\r\n stevens=Repositor(r'C:\\Users\\wangd\\学习和作业需要\\810\\week9资料')\r\n stevens.student('students.txt')\r\n stevens.instuctor('instructors.txt')\r\n stevens.grade('grades.txt')\r\n stevens.inspt()\r\n stevens.stpt()\r\n student_details=[s.stptstring() for s in stevens.st.values()]\r\n instucroe_details=[i.insstring(course,student) for i in stevens.ins.values() for course,student in i.cournum.items() ]\r\n students=[['10103', 'Baldwin, C', 'SFEN', [('CS 501', 'B'), ('SSW 564', 'A-'), ('SSW 567', 'A'), ('SSW 687', 'B')]],\\\r\n ['10115', 'Wyatt, X', 'SFEN', [('CS 545', 'A'), ('SSW 564', 'B+'), ('SSW 567', 'A'), ('SSW 687', 'A')]], \\\r\n ['10172', 'Forbes, I', 'SFEN', [('SSW 555', 'A'), ('SSW 567', 'A-')]],\\\r\n ['10175', 'Erickson, D', 'SFEN', [('SSW 564', 'A'), ('SSW 567', 'A'), ('SSW 687', 'B-')]],\\\r\n ['10183', 'Chapman, O', 'SFEN', [('SSW 689', 'A')]],\\\r\n ['11399', 'Cordova, I', 'SYEN', [('SSW 540', 'B')]],\\\r\n ['11461', 'Wright, U', 'SYEN', [('SYS 611', 'A'), ('SYS 750', 'A-'), ('SYS 800', 'A')]],\\\r\n ['11658', 'Kelly, P', 'SYEN', [('SSW 540', 'F')]],\\\r\n ['11714', 'Morton, A', 'SYEN', [('SYS 611', 'A'), ('SYS 645', 'C')]],\\\r\n ['11788', 'Fuller, E', 'SYEN', [('SSW 540', 'A')]]]\r\n instuctor=[['98765', 'Einstein, A', 'SFEN', 'SSW 567', 4],\\\r\n ['98765', 'Einstein, A', 'SFEN', 'SSW 540', 3], \\\r\n ['98764', 'Feynman, R', 'SFEN', 'SSW 564', 3],\\\r\n ['98764', 'Feynman, R', 'SFEN', 'SSW 687', 3],\\\r\n ['98764', 'Feynman, R', 'SFEN', 'CS 501', 1],\\\r\n ['98764', 'Feynman, R', 'SFEN', 'CS 545', 1],\\\r\n ['98763', 'Newton, I', 'SFEN', 'SSW 555', 1],\\\r\n ['98763', 'Newton, I', 'SFEN', 'SSW 689', 1],\\\r\n ['98760', 'Darwin, C', 'SYEN', 'SYS 800', 1],\\\r\n ['98760', 'Darwin, C', 'SYEN', 'SYS 750', 1],\\\r\n ['98760', 'Darwin, C', 'SYEN', 'SYS 611', 2],\\\r\n ['98760', 'Darwin, C', 'SYEN', 'SYS 645', 1]]\r\n self.assertEqual(student_details,students)\r\n self.assertEqual(instucroe_details,instuctor)\r\n\r\n\r\nif __name__ == '__main__':\r\n # note: there is no main(). Only test cases here\r\n unittest.main(exit=False, verbosity=2)\r\n\r\n","sub_path":"week9.1-Boyu Wang.py","file_name":"week9.1-Boyu Wang.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"556265156","text":"# -*- coding: utf-8 -*-\n# !/usr/bin/env python\n# @Time : 2019-08-09 14:35\n# @Author : lidong@immusician.com\n# @Site :\n# @File : ai-abtesting.py\n\nimport ujson\nfrom UnitTest.base import BaseRequest\n\n\nclass AIABTesting(BaseRequest):\n def __init__(self):\n # TODO: 记得修改\n # super().__init__(host=\"http://api.iguitar.immusician.com\", port=2525)\n super().__init__(port=22222)\n\n def upload_event_data(self):\n data = {\n \"data\": [\n {\n \"event_id\": \"5d494b8e191e7529d62b4add\",\n \"uid\": 1,\n \"create_time\": 10,\n \"item_id\": '1',\n \"item_type\": 1,\n \"duration\": None\n },\n {\n \"event_id\": \"5d494b8e191e7529d62b4add\",\n \"uid\": 1,\n \"create_time\": 10,\n \"item_id\": '1',\n \"item_type\": 1,\n \"duration\": None\n },\n {\n \"event_id\": \"5d494b8e191e7529d62b4add\",\n \"uid\": 1,\n \"create_time\": 10,\n \"item_id\": '1',\n \"item_type\": 1,\n \"duration\": None\n },\n ]\n }\n # data = {\"data\": \"a\"}\n url = \"/v3/abtesting/upload_event_data/\"\n ret = self.get(url, json=data)\n self._show_data(\"upload_event_data\", url, ret)\n\n def get_all_event(self):\n url = \"/v3/abtesting/get_all_event/\"\n ret = self.get(url)\n self._show_data(\"get_all_event\", url, ret)\n\n def create_event(self):\n data = {\n \"event_name\": \"点击 vip特权 按钮\",\n \"event_type\": 1,\n \"event_need_params\": {\n \"item_id\": {\n \"required\": False,\n \"value_type\": \"str\",\n \"description\": \"该类型的id\",\n \"default\": \"\"\n },\n \"item_type\": {\n \"required\": True,\n \"value_type\": \"int\",\n \"description\": \"类型(按钮)\",\n \"default\": 4\n },\n \"event_id\": {\n \"required\": True,\n \"value_type\": \"str\",\n \"description\": \"当前事件id\"\n },\n \"create_time\": {\n \"required\": True,\n \"value_type\": \"int\",\n \"description\": \"创建时间\"\n },\n \"uid\": {\n \"required\": True,\n \"value_type\": \"int\",\n \"description\": \"用户id\"\n },\n \"duration\": {\n \"required\": False,\n \"value_type\": \"int\",\n \"description\": \"时长\"\n },\n }\n }\n url = \"/v3/abtesting/create_event/\"\n ret = self.post(url, json=data)\n self._show_data(\"create_event\", url, ret)\n\n def upload_event_data_single(self):\n data = {\n \"event_id\": \"5d494b8e191e7529d62b4add\",\n \"uid\": 1,\n \"create_time\": 10,\n \"item_id\": \"\",\n \"item_type\": 1,\n # \"duration\": None\n }\n url = \"/v3/abtesting/upload_event_data/\"\n ret = self.get(url, json=data)\n self._show_data(\"upload_event_data_single\", url, ret)\n\n\nif __name__ == '__main__':\n # AIABTesting().run()\n AIABTesting().create_event()\n # AIABTesting().get_all_event()\n # AIABTesting().upload_event_data()\n AIABTesting().upload_event_data_single()\n","sub_path":"UnitTest/ai-abtesting.py","file_name":"ai-abtesting.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"327637860","text":"#coding=utf-8\nimport Image \nimport ImageEnhance \nimport ImageFilter\nimport Queue \nimport preprocessor\n\ndef split(pic, block_array):\n\tpic_deal = pic.copy()\n\tpic_visited_dict = {}\n\tloop = 0 \n\twhile loop < 10: \n\t\tblock = preprocessor.get_block(pic_deal, pic_visited_dict)\n\t\tblock_array.append(block)\n\t\tif len(pic_visited_dict) == preprocessor.get_pic_black_pixel_number(pic):\n\t\t\tbreak\n\t\tloop += 1\n\n\nif __name__ == '__main__':\n\tpic = Image.open('../../pics/dangtianjinrong/pics_orignal/0003.jpg')\n\tpic_preprocessed = preprocessor.preprocess(pic)\n\tblock_array = []\n\tsplit(pic_preprocessed, block_array)\n\tfor i in xrange(len(block_array)):\n\t\tblock_array[i].save('test_spliter_block_' + str(i) + '.jpg')\n\t\n","sub_path":"p2p_sign/recognizer_dtjr/spliter.py","file_name":"spliter.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"259587556","text":"import logging\nfrom logging import handlers\nfrom os import mkdir\nfrom os.path import isdir\n\nif not isdir(\"logs\"):\n mkdir(\"logs\")\n\nlog_format = \"{%(levelname)s}[%(asctime)s]: %(name)s | %(message)s\"\n\nlogging.basicConfig(\n format=log_format,\n level=logging.INFO\n)\nlogger = logging.getLogger(\"TelegramEDT\")\nhandler = handlers.TimedRotatingFileHandler(\"logs/current.log\", when=\"d\", interval=1)\nhandler.suffix = \"%Y-%m-%d\"\nhandler.style = log_format\nhandler.setFormatter(logging.Formatter(log_format))\nlogger.addHandler(handler)\n","sub_path":"TelegramEDT/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"308278777","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 3 01:43:13 2018\n\n@author: USER\n\"\"\"\n\nimport cv2\n#import numpy as np\n\ncamera = cv2.VideoCapture(0) # 参数0表示第一个摄像头\nbs = cv2.createBackgroundSubtractorKNN(detectShadows=True)\nes = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 7))\nes_1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\n\nwhile True:\n grabbed, frame_lwpCV = camera.read()\n frame_lwpCV = cv2.resize(frame_lwpCV, None, fx = 1.5, fy = 1.5, interpolation = cv2.INTER_AREA)\n fgmask = bs.apply(frame_lwpCV) # 背景分割器,该函数计算了前景掩码\n\n th = cv2.threshold(fgmask, 240, 255, cv2.THRESH_BINARY)[1]\n blurred = cv2.medianBlur(th, 3)\n \n dilated = cv2.dilate(blurred, es_1, iterations=4) \n erode = cv2.erode(dilated, es, iterations=1)\n \n image, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 该函数计算一幅图像中目标的轮廓\n for c in contours:\n if cv2.contourArea(c) > 30000:\n (x, y, w, h) = cv2.boundingRect(c)\n cv2.rectangle(frame_lwpCV, (x, y), (x + w, y + h), (255, 255, 0), 2)\n cv2.putText(frame_lwpCV, \"object\", (y, x), cv2.FONT_HERSHEY_SIMPLEX, 1, (223, 199, 22), 2)\n\n cv2.imshow('blurred', blurred)\n cv2.imshow('dilated', dilated)\n cv2.imshow('detection', frame_lwpCV)\n key = cv2.waitKey(1) & 0xFF\n # 按'q'健退出循环\n if key == ord('q'):\n break\n# When everything done, release the capture\ncamera.release()\ncv2.destroyAllWindows()\n","sub_path":"OpenCV/BackgroundSubtractor.py","file_name":"BackgroundSubtractor.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"28639613","text":"import sys\nimport importlib\nimport typing\n\n\nclass _HookedModule:\n\n\tdef __call__(self, *args, **kwargs) -> None:\n\t\treturn self.main_module(*args, **kwargs)\n\n\t@property\n\tdef main_module(self) -> typing.Any:\n\t\tname = self.__name__\n\t\treturn sys.modules[name].__getattribute__(name.split(\".\").pop())\n\n\nclass _IocageModule(sys.modules[\"iocage\"].__class__):\n\n\thooked_modules = [\n\t\t\"Host\",\n\t\t\"Distribution\",\n\t\t\"Jails\",\n\t\t\"Jail\",\n\t\t\"Releases\",\n\t\t\"Release\"\n\t]\n\n\tdef __getattribute__(self, key: str) -> typing.Any:\n\t\tif key.startswith(\"_\") is True:\n\t\t\treturn super().__getattribute__(key)\n\n\t\tif key not in sys.modules.keys():\n\t\t\tif key in object.__getattribute__(self, \"hooked_modules\"):\n\t\t\t\tself.__load_hooked_module(key)\n\t\t\telse:\n\t\t\t\tself.__load_module(key)\n\t\treturn super().__getattribute__(key)\n\n\tdef __load_module(self, name: str) -> None:\n\t\tmodule = importlib.import_module(f\"iocage.{name}\")\n\t\tsys.modules[name] = module\n\n\tdef __load_hooked_module(self, name: str) -> None:\n\t\tmodule = importlib.import_module(f\"iocage.{name}\")\n\t\tsys.modules[name] = self.__hook_module(module)\n\n\tdef __hook_module(self, module: typing.Any) -> None:\n\n\t\tclass _Module(module.__class__, _HookedModule):\n\n\t\t\tpass\n\n\n\t\tmodule.__class__ = _Module\n\t\treturn module\n\n\nsys.modules[\"iocage\"].__class__ = _IocageModule\n","sub_path":"iocage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"131864956","text":"from app import db\n\n# ================== POSTS TABLE ==================\n\nclass Post(db.Model):\n\n\t__tablename__ = \"posts\"\n\n\tkey = db.Column(db.Integer, nullable=False, unique=True, autoincrement=True, primary_key=True)\n\ttitle = db.Column(db.String, nullable=False)\n\tlink = db.Column(db.String, nullable=False)\n\tdate_posted = db.Column(db.DateTime(timezone=False), nullable=False)\n\tsite = db.Column(db.String, nullable=False)\n\torigin = db.Column(db.String, nullable=False)\n\torigin_airport = db.Column(db.String, nullable=True)\n\tdestination = db.Column(db.String, nullable=False)\n\tdestination_airport = db.Column(db.String, nullable=True)\n\tcarrier = db.Column(db.String, nullable=True)\n\tprice = db.Column(db.Integer, nullable=False)\n\tticket_type = db.Column(db.String, nullable=True)\n\tcurrency = db.Column(db.String, nullable=True)\n\treverse = db.Column(db.String, nullable=True)\n\tflights = db.Column(db.JSON, nullable=True)\n\taverage_route_price = db.Column(db.Integer, nullable=True)\n\n\tdef __init__(self, key, title, link, date_posted, site, origin, origin_airport, destination, destination_airport, carrier, price, ticket_type, currency, reverse, flights, average_route_price):\n\t\tself.key = key\n\t\tself.title = title\n\t\tself.link = link\n\t\tself.date_posted = date_posted\n\t\tself.site = site\n\t\tself.origin = origin\n\t\tself.origin_airport = origin_airport\n\t\tself.destination = destination\n\t\tself.destination_airport = destination_airport\n\t\tself.carrier = carrier\n\t\tself.price = price\n\t\tself.ticket_type = ticket_type\n\t\tself.currency = currency\n\t\tself.reverse = reverse\n\t\tself.flights = flights\n\t\tself.average_route_price = average_route_price\n\n\n# ================== FLIGHTS TABLE ==================\n\nclass Flight(db.Model):\n\n\t__tablename__ = \"flights\"\n\n\tkey = db.Column(db.Integer, nullable=False, autoincrement=True, primary_key=True)\n\tpost_id = db.Column(db.Integer, nullable=False) # Foreign key is the post key??\n\tpost_link = db.Column(db.String, nullable=False)\n\toutbound_origin = db.Column(db.String, nullable=False)\n\toutbound_destination = db.Column(db.String, nullable=False)\n\toutbound_carriers = db.Column(db.String, nullable=False)\n\toutbound_date = db.Column(db.DateTime(timezone=False), nullable=False)\n\tinbound_origin = db.Column(db.String, nullable=False)\n\tinbound_destination = db.Column(db.String, nullable=False)\n\tinbound_carriers = db.Column(db.String, nullable=False)\n\tinbound_date = db.Column(db.DateTime(timezone=False), nullable=False)\n\tquote_time = db.Column(db.DateTime(timezone=False), nullable=False)\n\tquote_price = db.Column(db.Integer, nullable=False)\n\tquote_currency = db.Column(db.String, nullable=False)\n\taverage_route_price = db.Column(db.Integer, nullable=True)\n\tbooking_link = db.Column(db.String, nullable=True)\n\n\tdef __init__(self, key, post_id, post_link, outbound_origin, outbound_destination, outbound_carriers, outbound_date, inbound_origin, inbound_destination, inbound_carriers, inbound_date, quote_time, quote_price, quote_currency, average_route_price, booking_link):\n\n\t\tself.key = key\n\t\tself.post_id = post_id\n\t\tself.post_link = post_link\n\t\tself.outbound_origin = outbound_origin\n\t\tself.outbound_destination = outbound_destination\n\t\tself.outbound_carriers = outbound_carriers\n\t\tself.outbound_date = outbound_date\n\t\tself.inbound_origin = inbound_origin\n\t\tself.inbound_destination = inbound_destination\n\t\tself.inbound_carriers = inbound_carriers\n\t\tself.inbound_date = inbound_date\n\t\tself.quote_time = quote_time\n\t\tself.quote_price = quote_price\n\t\tself.quote_currency = quote_currency\n\t\tself.average_route_price = average_route_price\n\t\tself.booking_link = booking_link\n\n\n\n\n\n\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"139434646","text":"def selectionSort(a):\n # i : 0 ~ len(n) - 1\n for i in range(len(a) - 1): # 0, 1, 2, 3\n # 최소값찾기\n min = i\n for j in range(i+1, len(a)):\n if a[min] > a[j]:\n min = j\n a[i], a[min] = a[min], a[i]\n\narr = [64, 25, 10, 22, 11]\nselectionSort(arr)\nprint(arr)","sub_path":"python_bms/ALGORITHMS/SWEA/알고리즘수업/8월월말평가대비/선택정렬.py","file_name":"선택정렬.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"435637465","text":"import urllib.parse\nimport urllib.request\nfrom lxml import etree\nfrom bs4 import BeautifulSoup\nimport logging\nimport re\n\ncount_part = 1\nfile_log = './log_franch_1c.log'\nurl_part = 'http://1c.ru/rus/partners/franch-citylist.jsp?partv8=120'\nprint(url_part + str(count_part))\nlogging.basicConfig(level=logging.DEBUG, filename=file_log)\nreq = urllib.request.Request(url=url_part , headers={\n 'User-Agent': ' Mozilla/6.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/18.0'})\nhandler = urllib.request.urlopen(req, timeout=120)\nsoup = BeautifulSoup(handler.read(), 'html.parser')\nnn = soup.find_all('a', style=\"color: #333333;\")\nfor i in nn:\n print(i['href'])\n print(i.parent)\n try:\n name = i.parent.find('a', href=re.compile(\"franch.*\")).string\n print(name)\n except Exception:\n name = i.parent.parent.find('a', href=re.compile(\"franch.*\")).string\n print(name)","sub_path":"backup_ihor.py","file_name":"backup_ihor.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}