diff --git "a/4723.jsonl" "b/4723.jsonl" new file mode 100644--- /dev/null +++ "b/4723.jsonl" @@ -0,0 +1,590 @@ +{"seq_id":"570516790","text":"c=0\nn=[0]*300\nn2=[0]*300\nn[0]=int(input('numero do empregado:'))\nn2[0]=int(input('Meses de trabalho:'))\nrecente1=1\nrecente2=2\nrecente3=3\nwhile n[c]!=0 or n2[c]!=0:\n c=1\n if n2[c] 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect(f\"/books/{num}\")\n else:\n uid = int(request.session['user_id'])\n Book.objects.filter(id=num).update(title=request.POST['title'], desc=request.POST['description'])\n return redirect(f\"/books/{num}\")\n except:\n return redirect('/login')\n\ndef delete(request, num):\n# try:\n uid = int(request.session['user_id'])\n Book.objects.get(id=num).delete()\n return redirect(\"/books\")\n# except:\n return redirect('/login')\n\ndef fadd(request, num):\n try:\n uid = int(request.session['user_id'])\n book = Book.objects.get(id=num)\n user = User.objects.get(id=uid)\n user.favorite_books.add(book)\n return redirect(f'/books/{num}')\n except:\n return redirect('/login')\n\ndef remove_favorite(request, num):\n# try:\n uid = int(request.session['user_id'])\n book = Book.objects.get(id=num)\n User.objects.get(id=uid).favorite_books.remove(book)\n return redirect(f\"/books/{num}\")\n# except:\n return redirect('/login')","sub_path":"django/django_full_stack/DojoReads/apps/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"637058321","text":"def countArrangement(self, N):\n def count(i, X, memo={}):\n if i < 2:\n return 1\n if X not in memo:\n memo[X] = sum(count(i - 1, X - {x})\n for x in X\n if x % i == 0 or i % x == 0)\n return memo[X]\n return count(N, frozenset(range(1, N + 1)))\n","sub_path":"problems/526.Beautiful_Arrangement/stefan.py","file_name":"stefan.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"168825825","text":"import torch\nimport torch.nn.functional as F\nfrom torch_geometric.data.dataloader import DataLoader\nfrom dataset.rescue_dataset import RescueDataset\nfrom dataset.inmemory_rescue_dataset import InMemoryRescueDataset\nfrom dataset.rescue_dataset_list import RescueDatasetList\nfrom models.topk_model import TopKNet\nimport test_model\n\n# parameters\nnode_classification = False\nbatch_size = 256\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef train():\n model.train()\n total_loss = 0\n total_graph = 0\n for data in train_loader:\n optimizer.zero_grad()\n data = data.to(device)\n output = model(data)\n if node_classification:\n class_weight = torch.tensor([data.num_graphs/data.num_nodes, (data.num_nodes-data.num_graphs)/data.num_nodes], device=device)\n loss = F.cross_entropy(output, data.y, weight=class_weight)\n # loss = soft_assignment_loss(output, data, device)\n else:\n loss = F.cross_entropy(output, data.y)\n loss.backward()\n total_loss += loss.item()\n optimizer.step()\n total_graph += data.num_graphs\n return total_loss / total_graph\n\n\nif __name__ == '__main__':\n # import analyze_dataset\n # train_dataset, test_dataset = get_datasets()\n # train_dataset, test_dataset = analyze_dataset.get_notrandom_notnull(train_dataset, test_dataset,\n # node_classification=node_classification)\n train_dataset = InMemoryRescueDataset([], node_classification=node_classification)\n train_dataset.load('dataset/train_notnull_notrandom_dataset.pt', device=device)\n\n test_dataset = InMemoryRescueDataset([], node_classification=node_classification)\n test_dataset.load('dataset/test_notnull_notrandom_dataset.pt', device=device)\n print(len(train_dataset))\n print(len(test_dataset))\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n if node_classification:\n testtrain_loader = DataLoader(train_dataset, batch_size=1, shuffle=False)\n else:\n testtrain_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False)\n\n model_filename = \"topk_gat_test_notnull_notrandom.pt\"\n # model = GCNNet(dataset.num_features, dataset.num_classes)\n model = TopKNet(train_dataset.num_features, train_dataset.num_classes)\n model.load_state_dict(torch.load(model_filename))\n # model = AGNNNet(dataset.num_features, dataset.num_classes)\n\n model = model.to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=1e-4)\n for epoch in range(190, 1001):\n loss = train()\n if epoch % 10 == 0:\n train_accuracy = test_model.test_model(testtrain_loader, model, node_classification, device)\n test_accuracy = test_model.test_model(test_loader, model, node_classification, device)\n log = 'Epoch: {:03d}, Train Loss: {:.8f} Train Accuracy: {:.8f} Test Accuracy: {:.8f}'\n print(log.format(epoch, loss, train_accuracy, test_accuracy))\n print(\"Saving model topk\")\n torch.save(model.state_dict(), model_filename)\n print('Model: ' + model_filename + ' is saved.')","sub_path":"rescue/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"552774644","text":"# From Wikipedia, the free encyclopaedia:\n# A happy number is defined by the following process:\n# Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers, while those that do not end in 1 are unhappy numbers.\n# Write a Python program to check whether a number is \"happy\" or not.\n\n\ndef is_Happy_num(n):\n past = set()\n while n != 1:\n n = sum(int(i)**2 for i in str(n))\n if n in past:\n return False\n past.add(n)\n return True\nprint(is_Happy_num(7))\nprint(is_Happy_num(932))\nprint(is_Happy_num(6))\n","sub_path":"Part-II/check_num_is_happy_or_not.py","file_name":"check_num_is_happy_or_not.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"482201973","text":"\"\"\"\nClasses and functions for laying out graphs for visualisation.\n\"\"\"\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nimport numpy as np\nimport numpy.typing as npt\n\nfrom napari_arboretum.graph import TreeNode\n\n# colormaps\nWHITE = np.array([1.0, 1.0, 1.0, 1.0])\n\n# napari specifies colours as a RGBA tuple in the range [0, 1], so mirror\n# that convention throughout arboretum.\nColorType = npt.ArrayLike\n\n\n@dataclass\nclass Annotation:\n x: float\n y: float\n label: str\n color: ColorType = WHITE\n\n\n@dataclass\nclass Edge:\n x: tuple[float, float]\n y: tuple[float, float]\n color: np.ndarray = WHITE\n track_id: int | None = None\n node: TreeNode | None = None\n\n\ndef layout_tree(nodes: list[TreeNode]) -> tuple[list[Edge], list[Annotation]]:\n \"\"\"Build and layout the edges of a lineage tree, given the graph nodes.\n\n Parameters\n ----------\n nodes :\n A list of graph.TreeNode objects encoding a single lineage tree.\n\n Returns\n -------\n edges :\n A list of edges to be drawn.\n annotations :\n A list of annotations to be added to the graph.\n \"\"\"\n # put the start vertex into the queue, and the marked list\n root = nodes[0]\n\n queue = [root]\n marked = [root]\n y_pos = [0.0]\n\n # store the line coordinates that need to be plotted\n edges = []\n annotations = []\n\n # now step through\n while queue:\n # pop the root from the tree\n node = queue.pop(0)\n y = y_pos.pop(0)\n\n # draw the root of the tree\n edges.append(\n Edge(y=(y, y), x=(node.t[0], node.t[-1]), track_id=node.ID, node=node)\n )\n\n if node.is_root:\n annotations.append(Annotation(y=y, x=node.t[0], label=str(node.ID)))\n\n # mark if this is an apoptotic tree\n if node.is_leaf:\n annotations.append(Annotation(y=y, x=node.t[-1], label=str(node.ID)))\n continue\n\n children = [t for t in nodes if t.ID in node.children]\n\n for child in children:\n if child not in marked:\n # mark the children\n marked.append(child)\n queue.append(child)\n\n # calculate the depth modifier\n depth_mod = 2.0 / (2.0 ** (node.generation))\n\n if child == children[0]:\n y_pos.append(y + depth_mod)\n else:\n y_pos.append(y - depth_mod)\n\n # plot a linking line to the children\n edges.append(Edge(y=(y, y_pos[-1]), x=(node.t[-1], child.t[0])))\n\n # if it's a leaf don't plot the annotation\n if child.is_leaf:\n continue\n\n annotations.append(\n Annotation(\n y=y_pos[-1],\n x=child.t[-1] - (child.t[-1] - child.t[0]) / 2.0,\n label=str(child.ID),\n )\n )\n\n return edges, annotations\n","sub_path":"src/napari_arboretum/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"356542523","text":"import logging\nfrom logging import LogRecord\nfrom multiprocessing import Queue\nfrom queue import Empty\nimport sys\nfrom threading import Thread\nfrom typing import List, Union\n\nfrom rich.console import Console\nfrom rich.logging import RichHandler\nfrom tblib import pickling_support\n\n\nconsole = Console(stderr=True)\nlogging.lastResort = RichHandler(console=console,\n level='WARNING',\n rich_tracebacks=True,\n tracebacks_extra_lines=2,\n show_path=False)\n\npickling_support.install()\n\n\nclass QueueHandler(logging.Handler):\n \"\"\"\n This handler sends events to a queue. Typically, it would be used together\n with a multiprocessing Queue to centralise logging to file in one process\n (in a multi-process application), so as to avoid file write contention\n between processes.\n This code is new in Python 3.2, but this class can be copy pasted into\n user code for use with earlier Python versions.\n \"\"\"\n\n def __init__(self, queue: Queue):\n \"\"\"\n Initialise an instance, using the passed queue.\n \"\"\"\n logging.Handler.__init__(self)\n self.queue = queue\n\n def enqueue(self, record: List[LogRecord]):\n \"\"\"\n Enqueue a record.\n The base implementation uses put_nowait. You may want to override\n this method if you want to use blocking, timeouts or custom queue\n implementations.\n \"\"\"\n self.queue.put_nowait(record)\n\n def prepare(self, record: LogRecord):\n \"\"\"\n Prepares a record for queuing. The object returned by this method is\n enqueued.\n The base implementation formats the record to merge the message\n and arguments, and removes unpickleable items from the record\n in-place.\n You might want to override this method if you want to convert\n the record to a dict or JSON string, or send a modified copy\n of the record while leaving the original intact.\n \"\"\"\n # The format operation gets traceback text into record.exc_text\n # (if there's exception data), and also returns the formatted\n # message. We can then use this to replace the original\n # msg + args, as these might be unpickleable. We also zap the\n # exc_info and exc_text attributes, as they are no longer\n # needed and, if not None, will typically not be pickleable.\n\n # Not nedded, since we use tblib\n # msg = self.format(record)\n # # bpo-35726: make copy of record to avoid affecting other handlers in the chain.\n # record = copy.copy(record)\n # record.message = msg\n # record.msg = msg\n # record.args = None\n # record.exc_info = None\n # record.exc_text = None\n return ['log_msg', record]\n\n def emit(self, record: LogRecord):\n \"\"\"\n Emit a record.\n Writes the LogRecord to the queue, preparing it for pickling first.\n \"\"\"\n try:\n self.enqueue(self.prepare(record))\n except Exception:\n self.handleError(record)\n\n\nclass StdoutHandler:\n def __init__(self, queue: Queue):\n self.queue = queue\n\n def write(self, msg):\n self.queue.put(['stdout_msg', msg])\n\n @staticmethod\n def flush():\n sys.__stdout__.flush()\n\n\nclass StderrHandler:\n def __init__(self, queue: Queue):\n self.queue = queue\n\n def write(self, msg):\n self.queue.put(['stderr_msg', msg])\n\n @staticmethod\n def flush():\n sys.__stderr__.flush()\n\n\nclass LoggingListener(Thread):\n \"\"\" Listens to and handles child process log messages\n This class, when instantiated, listens to the logging queue to receive log messages from child processes\n and handles these messages using the configured root logger in the main process.\n \"\"\"\n\n def __init__(self,\n logging_queue: Queue\n ):\n super().__init__(target=self.work,\n args=())\n self._logging_queue = logging_queue\n\n self.record_type_to_handle_fn = {'log_msg': LoggingListener._handle_log_msg,\n 'stdout_msg': LoggingListener._handle_stdout_msg,\n 'stderr_msg': LoggingListener._handle_stderr_msg}\n\n @staticmethod\n def _handle_log_msg(record: LogRecord):\n logger = logging.getLogger(record.name)\n logger.handle(record)\n\n @staticmethod\n def _handle_stdout_msg(record: str):\n sys.stdout.write(record)\n sys.stdout.flush()\n\n @staticmethod\n def _handle_stderr_msg(record: str):\n sys.stderr.write(record)\n sys.stderr.flush()\n\n def work(self):\n while True:\n # Using queue.get(block=False) is necessary for python 3.6. queue.get() sometimes\n # leads to weird deadlocks when waiting for logging messages from child processes.\n try:\n record = self._logging_queue.get(block=False, timeout=0.01)\n except Empty:\n continue\n\n if record is None:\n break\n record_type, record = record\n\n handle_record = self.record_type_to_handle_fn[record_type]\n handle_record(record)\n\n\ndef configure_logging(level: Union[str, int] = 'INFO'):\n assert level in ['DEBUG', 'INFO', 'WARNING', 'WARN', 'ERROR', 'FATAL', 'CRITICAL',\n 10, 20, 30, 40, 50]\n logger = logging.getLogger()\n formatter = logging.Formatter('%(processName)-13s%(message)s')\n stream_handler = RichHandler(\n rich_tracebacks=True,\n tracebacks_extra_lines=2,\n show_path=False\n )\n stream_handler.setLevel(level)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n logger.setLevel(level)\n","sub_path":"fluidml/common/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":5875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"521240244","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 14 14:00:49 2019\r\n\r\nRef: https://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/\r\n@author: 105502506\r\n\"\"\"\r\n\r\nimport math\r\ndef euclideanDistance(instance1, instance2, length):\r\n\tdistance = 0\r\n\tfor x in range(length):\r\n\t\tdistance += pow((instance1[x] - instance2[x]), 2)\r\n\treturn math.sqrt(distance)\r\n\r\n# test\r\ndef main():\r\n data1 = [1, 2, 3, 'a']\r\n data2 = [3, 2, 1, 'b']\r\n distance = euclideanDistance(data1, data2, 3)\r\n print ('Distance: ' + repr(distance))\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"hw2/euclideanDistance.py","file_name":"euclideanDistance.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"138879329","text":"import os, re\nimport time\nfrom datetime import datetime, timedelta\nimport logging\nimport logging.handlers\n\n\nclass ACELogger(object):\n @property\n def logger(self):\n if not self._logger:\n self.setLogger()\n return self._logger\n\n def setLogger(self, ):\n # create logger\n logger = logging.getLogger('ACE')\n logger.setLevel(logging.DEBUG)\n\n # create RQ handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # create formatter and add it to the handlers\n formatter = logging.Formatter(fmt='%(levelname)s - %(message)s - %(asctime)s', datefmt='%H:%M:%S')\n ch.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(ch)\n self._logger = logger\n\n def __init__(self, task=None):\n self._logger = None","sub_path":"6.2.stim/docker/ACE/Lib/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"455593987","text":"#!/user/bin/python\n# -*- coding: utf-8 -*-\n\nimport maya.cmds as cmds\nimport maya.mel as mm\n\ndef deleteUI(WinObject):\n \n if cmds.window(WinObject,q=True,exists=True):\n cmds.deleteUI(WinObject,wnd=True)\n else:\n pass\n \ndef create():\n \n \n if cmds.objExists(\"face_base\"):\n query = \"face_base\"\n if cmds.objExists(\"Set_allFaceBaseLocSet\"):\n cmds.select(\"Set_allFaceBaseLocSet\",hi=True,r=True)\n locSelect = []\n for i in cmds.ls(sl=True):\n if \"Shape\" in i:\n cmds.select(i,tgl=1)\n else:\n locSelect.append(i)\n \n newBase_skinMod = cmds.rename(query,query + \"_skin_geo\")\n newFix_follicleMod = cmds.duplicate(newBase_skinMod,n=newBase_skinMod.replace(\"skin\",\"follice\"),rr=True)\n cmds.polyTriangulate(newFix_follicleMod[0],ch=1)\n cmds.DeleteHistory(newFix_follicleMod[0])\n faceBaseBlend = cmds.blendShape(newBase_skinMod,newFix_follicleMod[0],n=\"faceBase_follicle_bShp\")\n cmds.setAttr(faceBaseBlend[0] + '.' + newBase_skinMod,1,l=True)\n cmds.setAttr(newFix_follicleMod[0] + \".visibility\",0) \n \n faceSecondaryCtrls = []\n locList = []\n djRivet = []\n allJoint = []\n rightJointList = []\n sel = cmds.ls(sl=True)\n listLocGP = cmds.listRelatives(sel[0],p=True)\n \n faceLocGrp = cmds.createNode(\"transform\",n=\"face_loc_Grp\")\n faceJointGrp = cmds.createNode(\"transform\",n=\"face_joint_Grp\")\n faceJointCtlGrp = cmds.createNode(\"transform\",n=\"face_joint_ctls_Grp\")\n for i in sel:\n if i[0] != \"f\":\n locList.append(i)\n if \"_l_\" in i:\n translateX = cmds.getAttr(i + \".tx\")\n translateY = cmds.getAttr(i + \".ty\")\n translateZ = cmds.getAttr(i + \".tz\")\n\n right_Object = cmds.duplicate(i,n=i.replace(\"_l_\",\"_r_\"),rr=True)\n cmds.setAttr(right_Object[0] + \".ty\",translateY)\n cmds.setAttr(right_Object[0] + \".tz\",translateZ)\n if translateX < 0:\n cmds.setAttr(right_Object[0] + \".tx\",translateX)\n elif translateX > 0:\n cmds.setAttr(right_Object[0] + \".tx\",-translateX)\n locList.append(i.replace(\"_l_\",\"_r_\"))\n\n cmds.select(d=True)\n cmds.parent(locList,faceLocGrp)\n cmds.setAttr(faceLocGrp + \".visibility\",0)\n jt = cmds.createNode(\"joint\")\n crv = cmds.circle(r=0.05,nr=(0,1,0))\n crv1 = cmds.circle(r=0.05,nr=(1,0,0))\n crv2 = cmds.circle(r=0.05,nr=(0,0,1))\n cmds.select(crv1,crv2,r=True)\n cmds.pickWalk(d=\"down\")\n cmds.select(crv,tgl=1)\n cmds.parent(r=1,s=1)\n cmds.select(d=1)\n cmds.delete(crv1,crv2)\n \n f = cmds.listRelatives(crv[0],shapes=True)\n for i in f:\n cmds.setAttr(i + \".overrideEnabled\",1)\n cmds.setAttr(i + \".overrideColor\",17)\n \n for i in locList:\n JntName = i.replace(\"_loc\",\"_jnt\")\n dup_jnt = cmds.duplicate(jt,rr=True)\n newJointName = cmds.rename(dup_jnt[0],JntName)\n cmds.delete(cmds.parentConstraint(i,newJointName,mo=False))\n allJoint.append(newJointName)\n \n for i in allJoint:\n \n ctlName = i.replace(\"_jnt\",\"_ctl\")\n drv_gp = cmds.createNode(\"transform\",n=i.replace(\"_jnt\",\"_Drv\") + \"_grp\")\n const_gp = cmds.createNode(\"transform\",n=i.replace(\"_jnt\",\"_Const\") + \"_grp\")\n object_gp = cmds.createNode(\"transform\",n=i.replace(\"_jnt\",\"_ctl\") + \"_grp\")\n cmds.parent(drv_gp,const_gp)\n cmds.parent(const_gp,object_gp)\n \n dup_ctl = cmds.duplicate(crv[0],n=ctlName,rr=True)\n faceSecondaryCtrls.append(dup_ctl[0])\n cmds.parent(dup_ctl,drv_gp)\n cmds.parent(object_gp,faceJointCtlGrp)\n cmds.delete(cmds.parentConstraint(i,object_gp,mo=False))\n cmds.parentConstraint(dup_ctl,i,mo=True)\n cmds.select(object_gp,newFix_follicleMod[0],r=True)\n djR = mm.eval(\"source djRivet.mel;djRivet;\")\n if cmds.objExists(\"djRivetX\"):\n cmds.setAttr(\"djRivetX.visibility\",0)\n faceSecondaryCtrlsSet = cmds.sets(faceSecondaryCtrls,n=\"Set_faceSecondaryCtrlsSet\")\n faceSecondaryJointSet = cmds.sets(allJoint,n=\"Set_faceSecondaryJointSet\")\n cmds.parent(faceLocGrp,listLocGP)\n cmds.delete(jt,crv,\"Set_allFaceBaseLocSet\")\n cmds.parent(allJoint,faceJointGrp)\n cmds.select(d=True)\n else:\n promptSelectLocWin = cmds.window('promptLoc',tlb=True,maximizeButton=False,minimizeButton=False)\n cmds.columnLayout(adj=True)\n cmds.text('Please select all fixFaceLoc.')\n cmds.button(l='Closed',c=lambda *args:deleteUI(\"promptLoc\"))\n cmds.showWindow(\"promptLoc\")\n \n else:\n promptFaceBaseWin = cmds.window('promptFaceBase',tlb=True,maximizeButton=False,minimizeButton=False)\n cmds.columnLayout(adj=True)\n cmds.text('Please look \"face_base\" model .')\n cmds.button(l='Closed',c=lambda *args:deleteUI(\"promptFaceBase\"))\n cmds.showWindow(\"promptFaceBase\")\n \n \n \n \n \n\n","sub_path":"maya-mel_py_ui/python_tools/createJointFollowFaceTool.py","file_name":"createJointFollowFaceTool.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"437958909","text":"# -*- coding: utf-8 -*-\n#\n# Inv Unit Tests\n#\n# To run this script use:\n# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/eden/inv.py\n#\nimport unittest\nimport datetime\n\nfrom gluon import *\nfrom gluon.storage import Storage\n\n# =============================================================================\nclass InvTests(unittest.TestCase):\n \"\"\" Inv Tests \"\"\"\n\n def setUp(self):\n \"\"\" Set up location records \"\"\"\n auth = current.auth\n auth.override = True\n\n self.location_code = Storage()\n self.location_ids = Storage()\n s3db = current.s3db\n\n\n #---------------------------------------------------------------------\n\n\n \n def tearDown(self):\n\n current.db.rollback()\n current.auth.override = False\n\n# =============================================================================\ndef run_suite(*test_classes):\n \"\"\" Run the test suite \"\"\"\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes:\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n if suite is not None:\n unittest.TextTestRunner().run(suite)\n return\n\nif __name__ == \"__main__\":\n\n run_suite(\n StatsTests,\n )\n\n# END ========================================================================\n","sub_path":"modules/unit_tests/eden/inv.py","file_name":"inv.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"356485149","text":"import time \r\naya=input(\"암호화에 쓸 키!\\n:\")\r\nmoae=input(\"암호화할 문장!\\n:\")\r\nK=[]\r\nC=\"\"\r\nM=''\r\nA=0\r\nfor i in range(len(aya)):\r\n C=aya[i]\r\n B=ord(C)\r\n K.append(B)\r\nprint(\"암호키 추가 완료\")\r\nwhile True:\r\n print(\"While문 시작\")\r\n if len(K)= color_thresholds[0]) & (s_channel <= color_thresholds[1])] = 1\n l_binary = np.zeros_like(l_channel)\n l_binary[(l_channel >= 60) & (l_channel <= 230)] = 1\n \n combined = np.zeros_like(s_channel)\n combined[((s_binary == 1) & (l_binary == 1)) | (sxbinary_s == 1)|(sxbinary_l==1)] = 1 \n\n return combined\ndef generate_binary_image_v2(image, sobel_thresholds=(30, 140), color_thresholds=(150, 255)):\n image = np.copy(image)\n\n # Convert to HLS color space and separate the V channel\n hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n l_channel = hls[:,:,1]\n s_channel = hls[:,:,2]\n \n sxbinary_l= abs_sobel_thresh(l_channel, orient='x', sobel_kernel=3, thresh=sobel_thresholds)\n sxbinary_s= abs_sobel_thresh(s_channel, orient='x', sobel_kernel=3, thresh=sobel_thresholds) \n \n # Thresholding on the color channels\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= color_thresholds[0]) & (s_channel <= color_thresholds[1])] = 1\n \n combined = np.zeros_like(s_channel)\n combined[(s_binary == 1) | (sxbinary_s == 1)|(sxbinary_l==1)] = 1 \n\n return combined\ndef generate_binary_image_v3(image, sobel_thresholds=(30, 140), color_thresholds=(150, 255)):\n # Convert to YCrCb color space\n img = np.copy(image)\n YCrCb = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n mask_YCrCb = cv2.bitwise_or(cv2.inRange(YCrCb[:, :, 2], 0, 100), cv2.inRange(YCrCb[:, :, 1], 150, 255)) \n \n LAB = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)\n mask_LAB = cv2.bitwise_and(cv2.inRange(LAB[:, :, 0], 210, 255),cv2.inRange(LAB[:, :, 1], 123, 132), cv2.inRange(LAB[:, :, 2], 123, 132)) \n return cv2.bitwise_or(mask_YCrCb,mask_LAB)\n\n\n\n\ndef generate_binary_image(image, sobel_thresholds=(30, 140), color_thresholds=(150, 255)):\n return generate_binary_image_v3(image, sobel_thresholds=(30, 140), color_thresholds=(150, 255))\n\n\n# %%\n# Display the before and after\nfor fname in Test_images:\n example_distorted_test_image = cv2.imread(fname)\n example_distorted_test_image_RGB = cv2.cvtColor(example_distorted_test_image, cv2.COLOR_BGR2RGB)\n example_undistorted_test_image = cv2.undistort(example_distorted_test_image_RGB,mtx,dist) \n result1=generate_binary_image_v1(example_undistorted_test_image)\n result2=generate_binary_image_v2(example_undistorted_test_image)\n result3=generate_binary_image_v3(example_undistorted_test_image)\n result=generate_binary_image(example_undistorted_test_image)\n if showimages:\n fig= plt.figure(figsize=(20,10))\n plt.subplot(141),plt.imshow(example_undistorted_test_image)\n plt.title('Before', fontsize=30)\n plt.subplot(142),plt.imshow(result1, cmap='gray')\n plt.title('v1', fontsize=30)\n plt.subplot(143),plt.imshow(result2, cmap='gray')\n plt.title('v2', fontsize=30)\n plt.subplot(144),plt.imshow(result3, cmap='gray')\n plt.title('v3', fontsize=30)\n # fig.suptitle(fname, fontsize=16)\n\n if saveimages:\n mpimg.imsave(\"Transformed_\"+fname,result, cmap='gray')\n\n# %% [markdown]\n# ## 2.3 Perspective Transform\n\n# %%\ndef warper(image):\n image_size = image.shape[:2]\n img_size = (image_size[1], image_size[0]) \n \n src = np.float32(\n [[259, 692],\n [608, 444],\n [672, 444],\n [1160, 692]])\n\n dst = np.float32(\n [[200,720],\n [200,0],\n [1047,0],\n [1047,720]])\n \n perspective_matrix = cv2.getPerspectiveTransform(src, dst)\n inverse_warp_matrix = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(image, perspective_matrix, img_size)\n return warped, inverse_warp_matrix\n\n#https://nikolasent.github.io/opencv/2017/05/07/Bird's-Eye-View-Transformation.html\n#https://nikolasent.github.io/proj/proj1\ndef warper_V2(image):\n image_size = image.shape[:2]\n img_size = (1280, 223) \n src = np.float32([[0, 673], [1207, 673],[0, 450], [1280, 450]])\n dst = np.float32([[569, 223], [711, 223], [0, 0], [1280, 0]])\n M = cv2.getPerspectiveTransform(src, dst)\n M = cv2.getPerspectiveTransform(src, dst) # The transformation matrix\n Minv = cv2.getPerspectiveTransform(dst, src) # Inverse transformation\n\n img = np.copy(image) # Read the test img\n # img = img[450:(450+IMAGE_H), 0:IMAGE_W] # Apply np slicing for ROI crop\n warped_img = cv2.warpPerspective(img, M,img_size) # Image warping\n #plt.imshow(cv2.cvtColor(warped_img, cv2.COLOR_BGR2RGB)) # Show results\n #plt.show()\n return warped_img, Minv\n\n\n# %%\n# Display the before and after\nfor fname in Test_images:\n example_distorted_test_image = cv2.imread(fname)\n example_distorted_test_image_RGB = cv2.cvtColor(example_distorted_test_image, cv2.COLOR_BGR2RGB)\n example_undistorted_test_image = cv2.undistort(example_distorted_test_image_RGB,mtx,dist) \n warped, inverse_warp_matrix =warper_V2(example_undistorted_test_image) \n\n if showimages:\n fig=plt.figure(figsize=(20,10))\n plt.subplot(121),plt.imshow(example_undistorted_test_image)\n plt.title('Before', fontsize=30)\n plt.subplot(122),plt.imshow(warped)\n plt.title('After', fontsize=30)\n fig.suptitle(fname, fontsize=16)\n\n if saveimages:\n mpimg.imsave(\"Warped_\"+fname,warped)\n\n# %% [markdown]\n# ## 2.4 Identify lane-line pixels and fit a polynomial\n\n# %%\n# Display the before and after\nfor fname in Test_images:\n example_distorted_test_image = cv2.imread(fname)\n example_distorted_test_image_RGB = cv2.cvtColor(example_distorted_test_image, cv2.COLOR_BGR2RGB)\n example_undistorted_test_image = cv2.undistort(example_distorted_test_image_RGB,mtx,dist)\n binary = generate_binary_image(example_undistorted_test_image)\n warped, inverse_warp_matrix =warper_V2(binary) \n\n if showimages:\n fig=plt.figure(figsize=(20,10))\n plt.subplot(121),plt.imshow(example_undistorted_test_image)\n plt.title('Before', fontsize=30)\n plt.subplot(122),plt.imshow(warped, cmap='gray')\n plt.title('After', fontsize=30)\n fig.suptitle(fname, fontsize=16)\n if saveimages:\n mpimg.imsave(\"Transformed_Warped_\"+fname,warped, cmap='gray')\n\n\n# %%\ndef sliding_window_lane_search(image, left_fit, right_fit):\n output_buffer = np.dstack((image, image, image))*255\n \n # Get a histogram of the bottom part of the image\n histogram = np.sum(image[np.int(image.shape[0]*1 / 2):, 569:711], axis=0)\n #plt.figure()\n #plt.plot(histogram)\n\n # Discover possible starting points for the left and right lines \n midpoint = np.int(histogram.shape[0]/2)\n left_x_base = np.argmax(histogram[:midpoint])+569\n right_x_base = np.argmax(histogram[midpoint:]) + midpoint+569\n\n num_windows = 9\n window_height = np.int(image.shape[0]/num_windows)\n\n # x/y positions of all nonzeros in the image\n nonzero = image.nonzero()\n nonzero_y = np.array(nonzero[0])\n nonzero_x = np.array(nonzero[1])\n\n # Position buffers for use when moving from window to window\n # Starts at our possible starting points\n current_left_x = left_x_base\n current_right_x = right_x_base\n\n # Margine on each side of each window?\n margin = 10\n\n # Minimum pixels to recenter a window?\n recenter_minimum = 3\n\n # Buffers for left and right lane pixel indices\n left_lane_indices = []\n right_lane_indices = []\n\n for window in range(num_windows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = image.shape[0] - (window + 1) * window_height\n win_y_high = image.shape[0] - window * window_height\n win_xleft_low = current_left_x - margin\n win_xleft_high = current_left_x + margin\n win_xright_low = current_right_x - margin\n win_xright_high = current_right_x + margin\n \n # Draw the windows on the visualization image\n cv2.rectangle(\n output_buffer,\n (win_xleft_low, win_y_low),\n (win_xleft_high, win_y_high),\n (0, 255, 0),\n 2\n ) \n cv2.rectangle(\n output_buffer,\n (win_xright_low, win_y_low),\n (win_xright_high, win_y_high),\n (0, 255, 0),\n 2\n ) \n # Identify the nonzero pixels in x and y within the window\n good_left_indices = (\n (nonzero_y >= win_y_low) & \n (nonzero_y < win_y_high) & \n (nonzero_x >= win_xleft_low) & \n (nonzero_x < win_xleft_high)).nonzero()[0]\n good_right_indices = (\n (nonzero_y >= win_y_low) & \n (nonzero_y < win_y_high) & \n (nonzero_x >= win_xright_low) & \n (nonzero_x < win_xright_high)).nonzero()[0]\n # Append these indices to the lists\n left_lane_indices.append(good_left_indices)\n right_lane_indices.append(good_right_indices)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_indices) > recenter_minimum:\n current_left_x = np.int(np.mean(nonzero_x[good_left_indices]))\n if len(good_right_indices) > recenter_minimum: \n current_right_x = np.int(np.mean(nonzero_x[good_right_indices]))\n \n left_lane_indices = np.concatenate(left_lane_indices)\n right_lane_indices = np.concatenate(right_lane_indices)\n \n left_x = nonzero_x[left_lane_indices]\n left_y = nonzero_y[left_lane_indices]\n right_x = nonzero_x[right_lane_indices]\n right_y = nonzero_y[right_lane_indices]\n \n try:\n new_left_fit = np.polyfit(left_y, left_x, 2)\n except TypeError:\n new_left_fit = left_fit\n try:\n new_right_fit = np.polyfit(right_y, right_x, 2)\n except TypeError:\n new_right_fit = right_fit\n \n # Do visualization\n\n output_buffer[nonzero_y[left_lane_indices], nonzero_x[left_lane_indices]] = [255, 0, 0]\n output_buffer[nonzero_y[right_lane_indices], nonzero_x[right_lane_indices]] = [0, 0, 255]\n \n return new_left_fit, new_right_fit, output_buffer, right_x, right_y, left_x, left_y\n\n\ndef preexisting_lane_search(image, left_fit, right_fit):\n output_buffer = np.dstack((image, image, image))*255\n nonzero = image.nonzero()\n nonzero_y = np.array(nonzero[0])\n nonzero_x = np.array(nonzero[1])\n margin = 10\n left_lane_indices = ((nonzero_x > (left_fit[0]*(nonzero_y**2) + left_fit[1]*nonzero_y + left_fit[2] - margin)) & (nonzero_x < (left_fit[0]*(nonzero_y**2) + left_fit[1]*nonzero_y + left_fit[2] + margin))) \n right_lane_indices = ((nonzero_x > (right_fit[0]*(nonzero_y**2) + right_fit[1]*nonzero_y + right_fit[2] - margin)) & (nonzero_x < (right_fit[0]*(nonzero_y**2) + right_fit[1]*nonzero_y + right_fit[2] + margin))) \n\n # Again, extract left and right line pixel positions\n left_x = nonzero_x[left_lane_indices]\n left_y = nonzero_y[left_lane_indices] \n right_x = nonzero_x[right_lane_indices]\n right_y = nonzero_y[right_lane_indices]\n \n # Fit a second order polynomial to each\n try:\n left_fit = np.polyfit(left_y, left_x, 2)\n right_fit = np.polyfit(right_y, right_x, 2)\n except TypeError:\n left_fit, right_fit, output_buffer, right_x, right_y, left_x, left_y = sliding_window_lane_search(image, left_fit, right_fit)\n\n output_buffer[nonzero_y[left_lane_indices], nonzero_x[left_lane_indices]] = [255, 0, 0]\n output_buffer[nonzero_y[right_lane_indices], nonzero_x[right_lane_indices]] = [0, 0, 255]\n return left_fit, right_fit,output_buffer, right_x, right_y, left_x, left_y\n\n\n# %%\nfor fname in Test_images:\n example_distorted_test_image = cv2.imread(fname)\n example_distorted_test_image_RGB = cv2.cvtColor(example_distorted_test_image, cv2.COLOR_BGR2RGB)\n example_undistorted_test_image = cv2.undistort(example_distorted_test_image_RGB,mtx,dist)\n binary = generate_binary_image(example_undistorted_test_image)\n warped, inverse_warp_matrix =warper_V2(binary)\n left_fit, right_fit, out_image, _, _, _, _ = sliding_window_lane_search(warped, None, None)\n\n if showimages:\n # Generate x and y values for plotting\n ploty = np.linspace(100, out_image.shape[0]-1, out_image.shape[0] )\n left_fit_x = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fit_x = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n plt.figure(figsize=(20, 20))\n plt.imshow(out_image)\n plt.plot(left_fit_x, ploty, color='yellow')\n plt.plot(right_fit_x, ploty, color='yellow')\n plt.xlim(0, 1280)\n plt.ylim(223, 0)\n fig=plt.figure(figsize=(20,10)) \n\n# %% [markdown]\n# ## 5. Computing Radius of Curvature\n\n# %%\nmeters_per_pixel_y = 30/720 \nmeters_per_pixel_x = 3.7/700 \n\ndef radius_of_curvature(y_value, right_x, left_x, right_y, left_y):\n # Fit new polynomials to x,y in world space\n left_fit_cr = np.polyfit(left_y, left_x, 2)\n right_fit_cr = np.polyfit(right_y, right_x, 2)\n al, bl, cl = left_fit_cr\n ar, br, cr = left_fit_cr\n left = (1 + (((2 * al * y_value * meters_per_pixel_y) + bl) ** 2) ** (1.5))/ np.absolute(2 * al)\n right = (1 + (((2 * ar * y_value * meters_per_pixel_y) + br) ** 2) ** (1.5))/ np.absolute(2 * ar)\n return left, right\n\ndef distance_from_center(left_fit, right_fit, y_value, x_size):\n left_fit_x = left_fit[0]*y_value* meters_per_pixel_y**2 + left_fit[1]*y_value*meters_per_pixel_y + left_fit[2]\n right_fit_x = right_fit[0]*y_value* meters_per_pixel_y**2 + right_fit[1]*y_value*meters_per_pixel_y + right_fit[2]\n \n center_of_car = x_size / 2\n center_of_lane = (left_fit_x + right_fit_x) / 2\n return (center_of_lane - center_of_car) * meters_per_pixel_x\n\n# %% [markdown]\n# ## 6. Drawing Detected Lanes\n\n# %%\ndef draw_lane_lines(warped_image, left_fit, right_fit, inverse_warp_matrix, destination_image):\n # Image for drawing\n destination_image = np.copy(destination_image)\n warp_zero = np.zeros_like(warped_image).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n \n \n ploty = np.linspace(100, warp_zero.shape[0]-1, warp_zero.shape[0] )\n left_fit_x = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fit_x = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n \n # Convert x & y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fit_x, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fit_x, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n \n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, inverse_warp_matrix, ( 1280,720)) \n # Combine the result with the original image\n result = cv2.addWeighted(destination_image, 1, newwarp, 0.3, 0)\n \n return result\n \nimport datetime\n \ndef draw_radius_of_curvature(image, radius, last_draw):\n \n if (datetime.datetime.now() - last_draw).total_seconds() > 1:\n last_draw = datetime.datetime.now()\n radius = min_radius\n \n cv2.putText(\n image,\n \"Radius of Curvature: {radius} m\".format(radius=round(radius, 3)),\n (10, 50),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1.1,\n (255, 255, 255),\n 2,\n )\n return last_draw\n\ndef draw_center_offset(left_fit, right_fit, image):\n y_value = image.shape[0]\n distance = distance_from_center(left_fit, right_fit, y_value, image.shape[1])\n \n cv2.putText(\n image,\n \"Offset from center of lane: {distance} m\".format(distance=round(distance, 3)),\n (10, 150),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1.1,\n (255, 255, 255),\n 2,\n )\n \n\n\n# %%\nfor fname in Test_images:\n example_distorted_test_image = cv2.imread(fname)\n example_distorted_test_image_RGB = cv2.cvtColor(example_distorted_test_image, cv2.COLOR_BGR2RGB)\n example_undistorted_test_image = cv2.undistort(example_distorted_test_image_RGB,mtx,dist)\n binary = generate_binary_image(example_undistorted_test_image)\n warped, inverse_warp_matrix =warper_V2(binary)\n left_fit, right_fit, out, right_x, right_y, left_x, left_y = sliding_window_lane_search(warped, None, None)\n new_image = draw_lane_lines(warped, left_fit, right_fit, inverse_warp_matrix, example_undistorted_test_image)\n last_draw = datetime.datetime.now()\n y_value = new_image.shape[0]\n radius_left, radius_right = radius_of_curvature(y_value, right_x, left_x, right_y, left_y)\n min_radius = min([radius_left, radius_right])\n last_draw = draw_radius_of_curvature(new_image, min_radius, last_draw)\n draw_center_offset(left_fit, right_fit, new_image)\n\n if showimages:\n fig=plt.figure(figsize=(20,10))\n plt.subplot(121),plt.imshow(example_undistorted_test_image)\n plt.title('Before', fontsize=30)\n plt.subplot(122),plt.imshow(new_image)\n plt.title('draw_lane_lines', fontsize=30)\n\n\n \n\n# %% [markdown]\n# # III. Pipelining for Video\n\n# %%\nglobal last_5_radius\nlast_5_radius = []\ndef lane_detector_pipeline(image, left, right):\n image = np.copy(image)\n undistorted_image = cv2.undistort(\n image,\n mtx,\n dist\n )\n binary = generate_binary_image(undistorted_image)\n warped, inverse_warp_matrix = warper_V2(binary)\n if left is None and right is None:\n left_fit, right_fit, out, right_x, right_y, left_x, left_y = sliding_window_lane_search(warped, left, right)\n else:\n left_fit, right_fit, out, right_x, right_y, left_x, left_y = preexisting_lane_search(warped, left, right)\n drawn = draw_lane_lines(\n warped,\n left_fit,\n right_fit,\n inverse_warp_matrix,\n undistorted_image\n )\n last_draw = datetime.datetime.now()\n y_value = new_image.shape[0]\n min_radius = min(radius_of_curvature(y_value, right_x, left_x, right_y, left_y))\n\n\n if len(last_5_radius) == 5:\n last_5_radius.pop()\n last_5_radius.insert(0, min_radius)\n\n last_draw = draw_radius_of_curvature(drawn, sum(last_5_radius) / len(last_5_radius), last_draw)\n draw_center_offset(left_fit, right_fit, drawn)\n color_binary = binary * 255\n return drawn, left_fit, right_fit\n\n\n# %%\n# Import everything needed to edit/save/watch video clips\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n\n\n# %%\nclass ImageProcessor:\n per_image_fit_left = None\n per_image_fit_right = None\n def process_image(self, image):\n drawn, self.per_image_fit_left, self.per_image_fit_right = lane_detector_pipeline(image, self.per_image_fit_left, self.per_image_fit_right)\n return drawn\n\nprocessor = ImageProcessor()\n \noutput = 'Project_Output/project_output_v3.mp4'\nclip1 = VideoFileClip(\"project_video.mp4\")#.subclip(0,10)\nproject_clip = clip1.fl_image(processor.process_image) #NOTE: this function expects color images!!\nget_ipython().run_line_magic('time', 'project_clip.write_videofile(output, audio=False)')\n\n","sub_path":"Image_Processing.py","file_name":"Image_Processing.py","file_ext":"py","file_size_in_byte":21354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"331698907","text":"# -*- coding: latin-1 -*-\nimport os\nimport re\nimport sys\nimport time\nimport telepot\t\nimport pickle\n\nfrom datetime import date\nfrom flask import Flask, request\n\ntry:\n\tfrom Queue import Queue\nexcept ImportError:\n\tfrom queue import Queue\n\n#GRUPO_SALA = -1001045780811\n#GRUPO_SALA = -126875187\nGRUPO_SALA = int(os.environ['TELEGRAM_GRUPO_SALA'])\nSUBJECTS = ['portugues', 'redacao', 'literatura', 'fisica', 'quimica', 'biologia', 'geografia', 'historia', 'matematica', 'filosofia', 'sociologia', 'ingles', 'espanhol', 'artes']\n\nhomework = {}\nis_openshift = 'OPENSHIFT_PYTHON_IP' in os.environ\nif is_openshift: homework_path = os.path.join(os.path.expanduser('~'), 'app-root/data/homework.pickle')\nelse: homework_path = 'homework.pickle'\n\ndef init_homework():\n\thomework.clear()\n\thomework['version'] = '1.0'\n\tfor subject in SUBJECTS:\n\t\thomework[subject] = {}\n\t\thomework[subject]['provas'] = []\n\t\thomework[subject]['deveres'] = []\ndef save_homework():\n\twith open(homework_path, 'wb') as f:\n\t\tpickle.dump(homework, f, protocol=3)\ndef load_homework():\n\tif os.path.exists(homework_path):\n\t\twith open(homework_path, 'rb') as f:\n\t\t\thomework.update(pickle.load(f))\n\ndef on_chat_message(msg):\n\tcontent_type, chat_type, chat_id = telepot.glance(msg)\n\n\tif content_type == 'text':\n\t\tif chat_id == GRUPO_SALA or chat_id == -126875187:\n\t\t\traw_message = msg['text'].strip()\n\n\t\t\tadd_regex = r\"^/nov(odever|amateria)(?:@mariachi_bot)?\\s?([\\w]+)?\\s?(.+)?$\"\n\t\t\tif re.search(add_regex, raw_message, flags=re.ASCII):\n\t\t\t\tmatch = re.search(add_regex, raw_message, flags=re.UNICODE)\n\t\t\t\tfunc = match.group(1)\n\t\t\t\tif match.group(2) and match.group(3) != None:\n\t\t\t\t\tsubject = match.group(2)\n\t\t\t\t\tif not subject in SUBJECTS:\n\t\t\t\t\t\tbot.sendMessage(chat_id, u'Matéria inválida. Conheço as seguintes matérias: %s' % ', '.join(['_%s_' % str(x) for x in SUBJECTS]), 'Markdown')\n\t\t\t\t\telse:\n\t\t\t\t\t\ttoday = date.today()\n\n\t\t\t\t\t\tif func == 'odever':\n\t\t\t\t\t\t\tdever = {}\n\t\t\t\t\t\t\tdever['data'] = today.strftime(\"%d/%m/%Y\")\n\t\t\t\t\t\t\tdever['conteudo'] = match.group(3)\n\t\t\t\t\t\t\thomework[subject]['deveres'].append(dever)\n\n\t\t\t\t\t\t\tbot.sendMessage(chat_id, 'Dever de %s adicionado!' % subject)\n\t\t\t\t\t\t\tsave_homework()\n\t\t\t\t\t\telif func == 'amateria':\n\t\t\t\t\t\t\tmateria = {}\n\t\t\t\t\t\t\tmateria['data'] = today.strftime(\"%d/%m/%Y\")\n\t\t\t\t\t\t\tmateria['conteudo'] = match.group(3)\n\t\t\t\t\t\t\thomework[subject]['provas'].append(materia)\n\n\t\t\t\t\t\t\tbot.sendMessage(chat_id, 'Matéria de %s adicionada!' % subject)\n\t\t\t\t\t\t\tsave_homework()\n\n\t\t\t\telse:\n\t\t\t\t\tbot.sendMessage(chat_id, 'Exemplo de uso:\\r\\n*/novodever* historia Explicar por quê o Jonathan é o melhor professor\\r\\n*/novamateria* historia Império Romano', 'Markdown')\n\t\t\tlist_regex = r\"/listar(deveres|materias)(?:@mariachi_bot)?\\s?([\\w]+)?$\"\n\t\t\tif re.search(list_regex, raw_message, flags=re.ASCII):\n\t\t\t\tmatch = re.search(list_regex, raw_message, flags=re.UNICODE)\n\t\t\t\tfunc = match.group(1)\n\t\t\t\tif func == 'materias': func = 'provas'\n\t\t\t\ttext_deveres = 'Deveres'\n\t\t\t\tif func == 'provas': text_deveres = 'Matérias'\n\t\t\t\tif match.group(2) != None:\n\t\t\t\t\tsubject = match.group(2)\n\t\t\t\t\tif subject in homework:\n\n\t\t\t\t\t\treply_deveres = ''\n\t\t\t\t\t\tfor dever in homework[subject][func]:\n\t\t\t\t\t\t\treply_deveres += '*%s*\\r\\n%s\\r\\n\\r\\n' % (dever['data'], dever['conteudo'])\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif reply_deveres != '': bot.sendMessage(chat_id, '%s de %s:\\r\\n%s' % (text_deveres, subject, reply_deveres), 'Markdown')\n\t\t\t\telse:\n\t\t\t\t\treply_deveres = ''\n\t\t\t\t\tfor subject in SUBJECTS:\n\t\t\t\t\t\tif subject in homework:\n\t\t\t\t\t\t\tif homework[subject][func] != []:\n\t\t\t\t\t\t\t\treply_deveres += '*%s de %s*:\\r\\n' % (text_deveres, subject)\n\t\t\t\t\t\t\t\tfor dever in homework[subject][func]:\n\t\t\t\t\t\t\t\t\treply_deveres += '*%s* - #%d\\r\\n%s\\r\\n\\r\\n' % (dever['data'], homework[subject][func].index(dever) + 1, dever['conteudo'])\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\treply_deveres += '------------\\r\\n'\n\t\t\t\t\tif reply_deveres is not '': bot.sendMessage(chat_id, reply_deveres, 'Markdown')\n\n\t\t\tdel_regex = r\"/apagar(deveres|materias)(?:@mariachi_bot)?\\s([\\w]+)\\s?([0-9]+)?$\"\n\t\t\tif re.search(del_regex, raw_message, flags=re.ASCII):\n\t\t\t\tmatch = re.search(del_regex, raw_message, flags=re.UNICODE)\n\t\t\t\tfunc = match.group(1)\n\t\t\t\tif match.group(2) != None:\n\t\t\t\t\tsubject = match.group(2)\n\t\t\t\t\tidx = int(match.group(3)) if match.group(3) != None else None\n\t\t\t\t\tif func == 'deveres':\n\t\t\t\t\t\tif idx != None:\n\t\t\t\t\t\t\tif idx > len(homework[subject]['deveres']) or idx <= 0:\n\t\t\t\t\t\t\t\tbot.sendMessage(chat_id, 'O item especificado não existe na lista.')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\thomework[subject]['deveres'].pop(idx-1)\n\t\t\t\t\t\t\t\tbot.sendMessage(chat_id, 'O dever #%d de %s foi removido com sucesso!' % (idx, subject))\n\t\t\t\t\t\t\t\tsave_homework()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\thomework[subject]['deveres'].clear()\n\t\t\t\t\t\t\tsave_homework()\n\t\t\t\t\t\t\tbot.sendMessage(chat_id, 'Os deveres de %s foram removidos com sucesso!' % (subject))\n\t\t\t\t\telif func == 'materias':\n\t\t\t\t\t\tif idx != None:\n\t\t\t\t\t\t\tif idx > len(homework[subject]['provas']) or idx <= 0:\n\t\t\t\t\t\t\t\tbot.sendMessage(chat_id, 'O item especificado não existe na lista.')\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\thomework[subject]['provas'].pop(idx-1)\n\t\t\t\t\t\t\t\tbot.sendMessage(chat_id, 'A matéria #%d de %s foi removida com sucesso!' % (idx, subject))\n\t\t\t\t\t\t\t\tsave_homework()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\thomework[subject]['provas'].clear()\n\t\t\t\t\t\t\tsave_homework()\n\t\t\t\t\t\t\tbot.sendMessage(chat_id, 'As matérias de %s foram removidas com sucesso!' % (subject))\n\t\t\t\telse:\n\t\t\t\t\tbot.sendMessage(chat_id, 'Exemplo de uso:\\r\\n*/apagardeveres* historia 1 - apaga o primeiro item na lista de deveres de história\\r\\n*/apagardeveres* historia - apaga todos os deveres de história\\r\\n*/apagarmaterias* historia 1 - apaga o primeiro item na lista de matérias de história\\r\\n*/apagarmaterias* historia - apaga todas as matérias de história', 'Markdown')\n\n\ndef on_callback_query(msg):\n query_id, from_id, data = telepot.glance(msg, flavor='callback_query')\ndef on_inline_query(msg):\n query_id, from_id, query_string = telepot.glance(msg, flavor='inline_query')\ndef on_chosen_inline_result(msg):\n result_id, from_id, query_string = telepot.glance(msg, flavor='chosen_inline_result')\n\napp = Flask(__name__)\nbot = telepot.Bot(os.environ['TELEGRAM_TOKEN'])\nupdate_queue = Queue()\ninit_homework()\nload_homework()\n\nSECRET_URL = '/bot' + os.environ['TELEGRAM_TOKEN']\n\nbot.message_loop({\n 'chat': on_chat_message,\n 'callback_query': on_callback_query,\n 'inline_query': on_inline_query,\n 'chosen_inline_result': on_chosen_inline_result\n}, source=update_queue)\n\n@app.route(SECRET_URL, methods=['GET', 'POST'])\ndef pass_update():\n\tupdate_queue.put(request.data)\n\treturn 'OK'\n\n@app.route('/')\ndef hello_world():\n\treturn 'It works!'\n\n@app.route('/deveres')\ndef show_homework():\n\treturn str(homework)\n\nif __name__ == '__main__':\n\tif not 'OPENSHIFT_PYTHON_IP' in os.environ: ip = '127.0.0.1'\n\telse: ip = os.environ['OPENSHIFT_PYTHON_IP']\n\ttime.sleep(5)\n\tbot.setWebhook()\n\ttime.sleep(5)\n\tbot.setWebhook('https://pymariachi-xinayder.rhcloud.com' + SECRET_URL)\n\tapp.run(host=ip, port=8080, debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"488079728","text":"import numpy as np\nimport nltk\nfrom math import sqrt\nfrom Task1_datahelper import word_indices_to_char_indices\ndef get_feed_dict(sentence, max_word_len, char_dict, lookup_table, map_id_word, map_word_id):\n sentence = nltk.word_tokenize(sentence)\n sentence_in_indices = np.zeros(shape=(1, len(sentence)), dtype=np.int32)\n\n label = [0]*len(sentence)\n label = np.array(label).reshape(1, -1)\n dims = lookup_table.shape[1]\n\n vectors = np.zeros(shape=(1, len(sentence), dims), dtype=np.float32)\n new_idx = lookup_table.shape[0]\n map_word_id_new = dict()\n map_id_word_new = dict()\n for k in map_id_word:\n map_id_word_new[k] = map_id_word[k]\n for k in map_word_id:\n map_word_id_new[k] = map_word_id[k]\n\n for idx, w in enumerate(sentence):\n if w in map_word_id:\n vectors[0, idx, :] = lookup_table[map_word_id_new[w], :]\n else:\n vectors[0, idx, :] = np.random.uniform(-sqrt(3.0/dims), sqrt(3.0/dims), dims)\n map_id_word_new[new_idx] = w\n map_word_id_new[w] = new_idx\n new_idx += 1\n sentence_in_indices[0, idx] = map_word_id_new[w]\n sequence_length = np.array([len(sentence)])\n max_sentences_length_in_batch = len(sentence)\n\n \n chars_indices = word_indices_to_char_indices(sentence_in_indices, sequence_length, max_sentences_length_in_batch, max_word_len, char_dict, map_id_word_new)\n \n feed_dict = {\n \"labels_placeholder\": label,\n \"vectors\": vectors,\n \"sequence_lengths_placeholder\": sequence_length,\n \"chars_placeholder\": chars_indices,\n \"max_sentences_length_placeholder\": max_sentences_length_in_batch,\n \"dropout_prob_placeholder\": 1.0\n }\n return sentence, feed_dict","sub_path":"code/server_helper.py","file_name":"server_helper.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"632492287","text":"import praw\nfrom praw.models import MoreComments\n\nreddit = praw.Reddit(\n client_id='',\n client_secret='',\n username='',\n password='',\n user_agent='Comment Extraction')\n\nfile_name = \"5UTCFortyNine.txt\"\n\nthreads = [\n 'https://old.reddit.com/r/millionairemakers/comments/edhj2m/not_only_is_it_about_to_be_christmas_but_today/',\n 'https://old.reddit.com/r/millionairemakers/comments/edq7ov/apparently_users_came_by_so_fast_that_reddits_hug/'\n]\n\ndef fetch_ids(thread_url):\n submission = reddit.submission(url=thread_url)\n submission.comment_sort = 'old'\n li = []\n for top_level_comment in submission.comments:\n if isinstance(top_level_comment, MoreComments):\n li += top_level_comment.children\n li.append(str(top_level_comment.id))\n return li\n\ntid = []\nfor thread in threads:\n tid += fetch_ids(thread)\nprint(f\"Found {len(tid)} comments\")\n\nwith open(file_name, \"w\") as f:\n f.write(\"\\n\".join(sorted(tid)))\nprint(f\"Comments saved in {file_name}\")\n","sub_path":"Legacy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"297435440","text":"import os\r\nimport time\r\nimport pickle\r\nimport random\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nimport sys\r\nimport csv\r\nimport eval\r\nfrom input import DataInput\r\nfrom model import Model\r\n\r\n#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\r\nrandom.seed(1234)\r\nnp.random.seed(1234)\r\ntf.random.set_seed(1234)\r\n\r\nlearning_rate = 0.1\r\nkeep_prob = 0.5\r\nlambda1 = 0.001\r\nlambda2 = 0.001\r\ntrunc_len = 10\r\ntrain_batch_size = 64\r\ntest_batch_size = 64\r\n\r\nworkdir = 'DANSER-WWW-19' # change to your workdir\r\nwith open('data/dataset.pkl', 'rb') as f:\r\n\ttrain_set = pickle.load(f)\r\n\ttest_set = pickle.load(f)\r\nwith open('data/list.pkl', 'rb') as f:\r\n u_friend_list = pickle.load(f)\r\n u_read_list = pickle.load(f)\r\n uf_read_list = pickle.load(f) \r\n i_friend_list = pickle.load(f)\r\n i_read_list = pickle.load(f)\r\n if_read_list = pickle.load(f)\r\n i_link_list = pickle.load(f)\r\n user_count, item_count = pickle.load(f)\r\n\r\ndef calc_metric(score_label_u):\r\n\tscore_label_u = sorted(score_label_u, key=lambda d:d[0], reverse=True)\r\n\tprecision = np.array([eval.precision_k(score_label_u, k) for k in range(1, 21)])\r\n\tndcg = np.array([eval.ndcg_k(score_label_u, k) for k in range(1, 21)])\r\n\tauc = eval.auc(score_label_u)\r\n\tmae = eval.mae(score_label_u)\r\n\trmse = eval.rmse(score_label_u)\r\n\treturn precision, ndcg, auc, mae, rmse\r\n\r\ndef get_metric(score_label):\r\n\tPrecision = np.zeros(20)\r\n\tNDCG = np.zeros(20)\r\n\tAUC = 0.\r\n\tscore_df = pd.DataFrame(score_label, columns=['uid', 'score', 'label'])\r\n\tnum = 0\r\n\tscore_label_all = []\r\n\tfor uid, hist in score_df.groupby('uid'):\r\n\t\tif hist.shape[0]<10:\r\n\t\t\tcontinue\r\n\t\tscore = hist['score'].tolist()\r\n\t\tlabel = hist['label'].tolist()\r\n\t\tscore_label_u = []\r\n\t\tfor i in range(len(score)):\r\n\t\t\tscore_label_u.append([score[i], label[i]])\r\n\t\t\tscore_label_all.append([score[i], label[i]])\r\n\t\tprecision, ndcg, auc, mae, rmse = calc_metric(score_label_u)\r\n\t\tPrecision += precision\r\n\t\tNDCG += ndcg\r\n\t\tAUC += auc\r\n\t\tnum += 1\r\n\tscore_label_all = sorted(score_label_all, key=lambda d:d[0], reverse=True)\r\n\tGPrecision = np.array([eval.precision_k(score_label_all, k*len(score_label_all)/100) for k in range(1, 21)])\r\n\tGAUC = eval.auc(score_label_all)\r\n\tMAE = eval.mae(score_label_all)\r\n\tRMSE = eval.rmse(score_label_all)\r\n\treturn Precision / num, NDCG / num, AUC / num, GPrecision, GAUC, MAE, RMSE\r\n\t\t\r\ndef _eval(sess, model):\r\n\tloss_sum = 0.\r\n\tbatch = 0\r\n\tscore_label = []\r\n\tfor _, datainput, u_readinput, u_friendinput, uf_readinput, u_read_l, u_friend_l, uf_read_linput, \\\r\n\t\ti_readinput, i_friendinput, if_readinput, i_linkinput, i_read_l, i_friend_l, if_read_linput in \\\r\n\tDataInput(test_set, u_read_list, u_friend_list, uf_read_list, i_read_list, i_friend_list, if_read_list, \\\r\n\t\ti_link_list, test_batch_size, trunc_len):\r\n\t\tscore_, loss = model.eval(sess, datainput, u_readinput, u_friendinput, uf_readinput, u_read_l, \\\r\n\t\tu_friend_l, uf_read_linput, i_readinput, i_friendinput, if_readinput, i_linkinput, i_read_l, i_friend_l, if_read_linput, lambda1, lambda2)\r\n\t\tfor i in range(len(score_)):\r\n\t\t\tscore_label.append([datainput[1][i], score_[i], datainput[2][i]])\r\n\t\tloss_sum += loss\r\n\t\tbatch += 1\r\n\tPrecision, NDCG, AUC, GPrecision, GAUC, MAE, RMSE = get_metric(score_label) \r\n\treturn loss_sum/batch, Precision, NDCG, MAE, RMSE\r\n\r\ngpu_options = tf.compat.v1.GPUOptions(allow_growth=True)\r\nwith tf.compat.v1.Session() as sess:\r\n\tmodel = Model(user_count, item_count)\r\n\tmodel.restore(sess, 'model/DUAL_GAT.ckpt')\r\n\r\n\tTest_loss, P, N, MAE, RMSE = _eval(sess, model)\r\n\tprint('Test_loss: %.4f P@3: %.4f P@5: %.4f P@10: %.4f NDCG@3: %.4f NDCG@5: %.4f NDCG@10: %.4f MAE: %.4f RMSE: %.4f' %\r\n\t(Test_loss, P[2], P[4], P[9], N[2], N[4], N[9], MAE, RMSE))\r\n\r\n\tsys.stdout.flush()\r\n\t\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"15154455","text":"\nimport smart_imports\n\nsmart_imports.all()\n\n\nclass UsePvPAbilityTests(utils_testcase.TestCase):\n\n def setUp(self):\n super(UsePvPAbilityTests, self).setUp()\n\n self.p1, self.p2, self.p3 = game_logic.create_test_map()\n\n self.account_1 = self.accounts_factory.create_account()\n self.account_2 = self.accounts_factory.create_account()\n\n self.storage = game_logic_storage.LogicStorage()\n self.storage.load_account_data(self.account_1)\n self.storage.load_account_data(self.account_2)\n\n self.hero_1 = self.storage.accounts_to_heroes[self.account_1.id]\n self.hero_2 = self.storage.accounts_to_heroes[self.account_2.id]\n\n self.battle = prototypes.Battle1x1Prototype.create(self.account_1)\n self.battle.set_enemy(self.account_2)\n self.battle.save()\n\n self.ability = random.choice(list(abilities.ABILITIES.values()))\n\n self.task = postponed_tasks.UsePvPAbilityTask(battle_id=self.battle.id, account_id=self.account_1.id, ability_id=self.ability.TYPE)\n\n self.meta_action_battle = actions_meta_actions.ArenaPvP1x1.create(self.storage, self.hero_1, self.hero_2)\n self.meta_action_battle.set_storage(self.storage)\n\n actions_prototypes.ActionMetaProxyPrototype.create(hero=self.hero_1, _bundle_id=self.hero_1.actions.current_action.bundle_id, meta_action=self.meta_action_battle)\n actions_prototypes.ActionMetaProxyPrototype.create(hero=self.hero_2, _bundle_id=self.hero_1.actions.current_action.bundle_id, meta_action=self.meta_action_battle)\n\n def test_create(self):\n self.assertEqual(self.task.state, postponed_tasks.USE_PVP_ABILITY_TASK_STATE.UNPROCESSED)\n self.assertEqual(self.task.battle_id, self.battle.id)\n self.assertEqual(self.task.account_id, self.account_1.id)\n self.assertEqual(self.task.ability_id, self.ability.TYPE)\n\n def test_serialize(self):\n self.assertEqual(self.task.serialize(), postponed_tasks.UsePvPAbilityTask.deserialize(self.task.serialize()).serialize())\n\n def test_process_battle_not_found(self):\n prototypes.Battle1x1Prototype._db_all().delete()\n self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage)\n self.assertEqual(self.task.state, postponed_tasks.USE_PVP_ABILITY_TASK_STATE.BATTLE_FINISHED)\n\n def test_process_hero_not_found(self):\n self.storage.release_account_data(self.account_1.id)\n self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage)\n self.assertEqual(self.task.state, postponed_tasks.USE_PVP_ABILITY_TASK_STATE.HERO_NOT_FOUND)\n\n def test_wrong_ability_id(self):\n task = postponed_tasks.UsePvPAbilityTask(battle_id=self.battle.id, account_id=self.account_1.id, ability_id='wrong_ability_id')\n task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage)\n self.assertEqual(task.state, postponed_tasks.USE_PVP_ABILITY_TASK_STATE.WRONG_ABILITY_ID)\n\n def test_no_resources(self):\n self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage)\n self.assertEqual(self.task.state, postponed_tasks.USE_PVP_ABILITY_TASK_STATE.NO_ENERGY)\n\n def test_process_success(self):\n self.meta_action_battle.hero_1_pvp.set_energy(1)\n\n old_hero_1_last_message = self.hero_1.journal.messages[-1]\n old_hero_2_last_message = self.hero_2.journal.messages[-1]\n\n self.assertEqual(self.task.process(postponed_tasks_helpers.FakePostpondTaskPrototype(), self.storage), POSTPONED_TASK_LOGIC_RESULT.SUCCESS)\n self.assertEqual(self.task.state, postponed_tasks.USE_PVP_ABILITY_TASK_STATE.PROCESSED)\n\n self.assertNotEqual(old_hero_1_last_message, self.hero_1.journal.messages[-1])\n self.assertNotEqual(old_hero_2_last_message, self.hero_2.journal.messages[-1])\n\n self.assertNotEqual(old_hero_1_last_message.ui_info(), self.hero_1.journal.ui_info()[-1])\n self.assertEqual(old_hero_2_last_message.ui_info(), self.hero_2.journal.ui_info()[-1])\n\n self.assertEqual(self.meta_action_battle.hero_1_pvp.energy, 0)\n","sub_path":"src/the_tale/the_tale/game/pvp/tests/test_use_pvp_ability_task.py","file_name":"test_use_pvp_ability_task.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"101893184","text":"# coding=utf-8\r\nimport pyAudioAnalysis\r\nimport pyAudioAnalysis.audioBasicIO as audioBasicIO\r\nimport pyAudioAnalysis.ShortTermFeatures as sF\r\nimport os\r\nimport glob\r\nfrom pydub import AudioSegment\r\nimport re\r\n\r\ndef loadAudio(path):\r\n Fs, x = audioBasicIO.read_audio_file(path)\r\n x = audioBasicIO.stereo_to_mono(x)\r\n return Fs, x\r\n\r\ndef getTXT(file):\r\n pattern = re.compile(r'([^<>/\\\\\\|:\"\"\\*\\?]+)\\.\\w+$')\r\n fileName = pattern.findall(file)[0]\r\n\r\n # mp4 to wav\r\n wav_filename = fileName + '.wav'\r\n AudioSegment.from_file(file).export('store/audioStore/' + wav_filename, format='wav')\r\n\r\n # wav to txt\r\n Fs, x = loadAudio('store/audioStore/' + wav_filename)\r\n print(Fs, x)\r\n st_features, st_features_name = sF.feature_extraction(x, Fs, 0.050 * Fs, 0.025 * Fs, deltas=False)\r\n outputFile = open('store/audioEvaluationTxt/' + fileName + '.txt', 'w')\r\n for col in range(st_features.shape[1]):\r\n sampleFeature = []\r\n for row in range(st_features.shape[0]):\r\n feature = st_features[row][col]\r\n sampleFeature.append(feature)\r\n sampleString = str(sampleFeature).replace('[', '').replace(']', '')\r\n outputFile.write(sampleString + '\\n')\r\n outputFile.close()\r\n outPath = 'store/audioEvaluationTxt/' + fileName + '.txt'\r\n return outPath\r\n","sub_path":"Server4Lipreader/AudioEvaluation/WordMode/DataPreProcess/Video2Txt.py","file_name":"Video2Txt.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"112271743","text":"import os\nfrom timeit import default_timer as timer\nos.environ['TF_KERAS'] = '1'\n\n#1.5b\n#hparams = {\n# \"n_vocab\": 50257,\n# \"n_ctx\": 1024,\n# \"n_embd\": 1600,\n# \"n_head\": 25,\n# \"n_layer\": 48\n#}\n\n#774M\nhparams = { \n \"n_vocab\": 50257, \n \"n_ctx\": 1024, \n \"n_embd\": 1280, \n \"n_head\": 20, \n \"n_layer\": 36 \n} \n\nimport keras_gpt_2\n#import split\nimport numpy as np\n\n# inititalize strategies\nimport tensorflow as tf\nfrom tensorflow.python.tpu import device_assignment as device_assignment_lib\nfrom tensorflow.python.distribute import tpu_strategy as tpu_lib\nif 'COLAB_TPU_ADDR' in os.environ:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver()\nelse:\n from resolver import res as resolver\n\ntf.config.experimental_connect_to_cluster(resolver)\ntopology = tf.tpu.experimental.initialize_tpu_system(resolver)\n# strategy = tf.distribute.experimental.TPUStrategy(resolver)\n\ndevice_assignment = device_assignment_lib.DeviceAssignment.build(\n topology, num_replicas=4)\n\nfirst_strategy = tpu_lib.TPUStrategy(\n resolver, device_assignment=device_assignment)\n\n# Computation on the 2nd half.\ndevice_assignment2 = device_assignment_lib.DeviceAssignment(\n topology, [[[1, 0, 0]],[[1, 0, 1]],[[1, 1, 0]],[[1, 1, 1]]])\n\nsecond_strategy = tpu_lib.TPUStrategy(\n resolver, device_assignment=device_assignment2)\n\n\nprint('initialized strategies')\n#breakpoint()\n\nsplit_n=20\nwith first_strategy.scope():\n m1, l_shape, embd_shape = keras_gpt_2.model.get_model_first(split_n=split_n, **hparams)\n #m1, _ = split.split_model(model, split_n=23)\n #del model\n opt1 = tf.keras.optimizers.Adagrad(learning_rate=1e-4)\nwith second_strategy.scope():\n m2 = keras_gpt_2.model.get_model_second(split_n=split_n,\n l_shape=l_shape, embeddings_shape=embd_shape,\n **hparams)\n #_, m2 = split.split_model(model, split_n=23)\n #del model\n opt2 = tf.keras.optimizers.Adagrad(learning_rate=1e-4)\n\nprint('created models')\n#breakpoint()\n\n\n@tf.function\ndef first_half(x, output_grads, embd_grads):\n with tf.GradientTape() as tape:\n intermed, embeddings = m1(x)\n intermed_loss = tf.reduce_sum(intermed*output_grads)\n embd_loss = tf.reduce_sum(embeddings*embd_grads)\n grads = tape.gradient(intermed_loss + embd_loss, m1.trainable_variables)\n opt1.apply_gradients(zip(grads, m1.trainable_variables))\n return intermed, embeddings\n\n@tf.function\ndef second_half(inter, embd):\n with tf.GradientTape() as tape:\n tape.watch([inter, embd])\n output_data = m2([inter, embd])\n loss = tf.reduce_mean(output_data) #fake loss for dev purposes\n local_grads, input_grads, embd_grads= tape.gradient(loss,[m2.trainable_variables, inter, embd])\n opt2.apply_gradients(zip(local_grads, m2.trainable_variables))\n #input_grads, embd_grads = tape.gradient(loss, [inter, embd])\n\n return output_data, input_grads, embd_grads\n\n# fake input data for now\ninput_data = np.random.randint(0, 50000, size=(1,1024))\n\n@tf.function\ndef fake_get_data(): #wrap fake input data into perreplica objects\n intermed, embeds = m1(input_data)\n return input_data, intermed, embeds\n\n@tf.function\ndef first_iter_intermed_embd():\n return tf.zeros()\n\n#@tf.function\ndef test():\n per_replica_data, fake_intermed_grad, fake_embeds_grad = first_strategy.experimental_run_v2(fake_get_data)\n intermed1, embd1 = first_strategy.experimental_run_v2(\n first_half, (per_replica_data, fake_intermed_grad, fake_embeds_grad))\n #breakpoint()\n print('intermed', intermed1.values[0].shape)\n print('embd', embd1.values[0].shape)\n out, intermed_grads1, embd_grads1 = second_strategy.experimental_run_v2(second_half, (intermed1, embd1))\n print('output', out.values[0].shape)\n print('interemd_grads', intermed_grads1.values[0].shape)\n print('embd_grads', embd_grads1.values[0].shape)\n\n start = timer()\n for i in range(800):\n intermed2, embd2 = first_strategy.experimental_run_v2(\n first_half, (per_replica_data, intermed_grads1, embd_grads1))\n out, intermed_grads2, embd_grads2 = second_strategy.experimental_run_v2(second_half, (intermed1, embd1))\n intermed1, embd1 = first_strategy.experimental_run_v2(\n first_half, (per_replica_data, intermed_grads2, embd_grads2))\n #out, intermed_grads1, embd_grads1 = second_strategy.experimental_run_v2(second_half, (intermed2, embd2))\n print('output', out.values[0].shape)\n end = timer()\n print('time:', end - start)\n sum = tf.function(tf.reduce_sum)\n return second_strategy.experimental_run_v2(sum, (out,))\n\n#%%time\nout = test()\nprint(out)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"638427955","text":"from alchemy import Session\nfrom flask import Blueprint, request\nfrom flask_restplus import Namespace, fields, Resource\nfrom tahelka.models.Usage import Usage\nfrom tahelka.auth.token_authenticator import TokenAuthenticator\nfrom tahelka.analytics.date_converter import DateConverter\nfrom tahelka.analytics.summarizer import Summarizer\nfrom tahelka.analytics.recorder import Recorder\n\napi = Namespace('API Usage Summary', path='/usage_summary',\n description='Summary of recorded usage of the API')\n\n@api.route('')\nclass UsageSummary(Resource):\n description='''\\\n Shows a summary of the recorded usage of the API with specified query parameters.\n The user could specify the date interval of the records to be considered.\n This endpoint is also able to show a summary of the API usage by a particular user.
\n The summary includes:\n - The total count of usage of the API\n - The usage counts of different endpoints\n - Counts of different HTTP response status codes given by the service\n '''\n @api.doc(description=description)\n @api.param('start_date', type=str, description='Only consider records starting from this date (Y-m-d)', format='date')\n @api.param('end_date', type=str, description='Only consider records ending on this date. (Y-m-d)', format='date')\n @api.param('user_id', type=int, description='Only consider usage by this user ID.')\n @api.response(200, \"API usage summary has been successfully shown.\")\n @api.response(401, \"The JWT provided is incorrect or expired.\")\n @api.response(403, \"You are not authorized to access this resource.\")\n def get(self):\n '''\n Shows a summary of the recorded usage of the API\n '''\n auth_header = request.headers.get('Authorization')\n TokenAuthenticator(auth_header, True).authenticate()\n\n start_date_string = request.args.get('start_date')\n end_date_string = request.args.get('end_date')\n user_id = request.args.get('user_id')\n\n start_date = DateConverter(start_date_string).convert()\n end_date = DateConverter(end_date_string).convert()\n\n summarizer = Summarizer(user_id=user_id, start_date=start_date,\n end_date=end_date)\n summary = summarizer.summarize()\n\n status_code = 200\n record = Recorder('usage_summary', status_code)\n record.recordUsage()\n\n return summary, status_code\n","sub_path":"apis/usage_summary.py","file_name":"usage_summary.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"13049759","text":"\"\"\"\nIn the game of ten-pin bowling, a player rolls a bowling ball down a lane to knock over pins. There are ten pins set at the end of the bowling lane. Each player has 10 frames to roll a bowling ball down a lane and knock over as many pins as possible. The first nine frames are ended after two rolls or when the player knocks down all the pins. The last frame a player will receive an extra roll every time they knock down all ten pins; up to a maximum of three total rolls.\n\nThe Challenge\n\nIn this challenge you will be given a string representing a player's ten frames. It will look something like this: 'X X 9/ 80 X X 90 8/ 7/ 44' (in Java: \"X X 9/ 80 X X 90 8/ 7/ 44\"), where each frame is space-delimited, 'X' represents strikes, and '/' represents spares. Your goal is take in this string of frames into a function called bowlingScore and return the players total score.\n\nScoring\n\nThe scoring for ten-pin bowling can be difficult to understand, and if you're like most people, easily forgotten if you don't play often. Here is a quick breakdown:\n\nFrames\n\nIn Ten-Pin Bowling there are ten frames per game. Frames are the players turn to bowl, which can be multiple rolls. The first 9 frames you get 2 rolls maximum to try to get all 10 pins down. On the 10th or last frame a player will receive an extra roll each time they get all ten pins down to a maximum of three total rolls. Also on the last frame bonuses are not awarded for strikes and spares moving forward.\n\nIn this challenge, three frames might be represented like this: 54 72 44. In this case, the player has had three frames. On their first frame they scored 9 points (5 + 4), on their second frame they scored 9 points (7 + 2) and on their third frame they scored 8 points (4 + 4). This is a very simple example of bowling scoring. It gets more complicated when we introduce strikes and spares.\n\nStrikes\n\nRepresented in this challenge as 'X'\n\nA strike is scored when a player knocks all ten pins down in one roll. In the first 9 frames this will conclude the players turn and it will be scored as 10 points plus the points received from the next two rolls. So if a player were to have two frames X 54, the total score of those two frames would be 28. The first frame would be worth 19 (10 + 5 + 4) and the second frame would be worth 9 (5 + 4).\n\nA perfect game in bowling is 12 strikes in a row and would be represented like this 'X X X X X X X X X XXX' (in Java: \"X X X X X X X X X XXX\"). This adds up to a total score of 300.\n\nSpares\n\nRepresented in this challenge as '/'\n\nA spare is scored when a player knocks down all ten pins in two rolls. In the first 9 frames this will be scored as 10 points plus the next roll. So if a player were to have two frames 9/ 54, the total score of the two frames would be 24. The first frame would be worth 15 (10 + 5) and the second frame would be worth 9 (5 + 4).\n\nFor a more detailed explanation see Wikipedia:\n\nhttp://en.wikipedia.org/wiki/Ten-pin_bowling#Scoring\n\"\"\"\n\ndef bowling_score(frames):\n balls = []\n multiply = []\n frame = frames.split(' ')\n ten_frame = 0\n for num, fr in enumerate(frame):\n temp_score = 0\n if fr == 'X':\n multiply.append(2)\n balls.append(10)\n elif num == 9:\n temp_score = 0\n all_balls = len(balls)\n for scr in fr:\n if scr == 'X':\n ten_frame += 10\n balls.append(10)\n multiply.append(2)\n elif scr == '/':\n spare = 10 - temp_score\n ten_frame += spare\n balls.append(spare)\n multiply.append(1)\n else:\n temp_score = int(scr)\n ten_frame += int(scr)\n balls.append(int(scr))\n multiply.append(0)\n else:\n temp_score = 0\n for scr in fr:\n if scr == '/':\n spare = 10 - temp_score\n balls.append(spare)\n multiply.append(1)\n else:\n temp_score = int(scr)\n balls.append(int(scr))\n multiply.append(0)\n\n\n\n score = 0\n score += ten_frame\n for num in range(0, all_balls):\n if multiply[num] == 2:\n score += balls[num] + balls[num+1] + balls[num+2]\n elif multiply[num] == 1:\n score += balls[num] + balls[num+1]\n else:\n score += balls[num]\n\n return score\n\n\nif __name__ == '__main__':\n print('Basic Tests')\n print('maybe this bowler should put bumpers on')\n print(bowling_score('11 11 11 11 11 11 11 11 11 11'), 20)\n print('woah! Perfect game!')\n print(bowling_score('X X X X X X X X X XXX'), 300)\n\n","sub_path":"Kata/Ten-Pin Bowling.py","file_name":"Ten-Pin Bowling.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"22332799","text":"from .fetching import (\n fetch_drug_directory,\n fetch_employee_salaries,\n fetch_medical_charge,\n fetch_midwest_survey,\n fetch_open_payments,\n fetch_road_safety,\n fetch_traffic_violations,\n get_data_dir,\n)\n\n__all__ = [\n \"get_data_dir\",\n \"fetch_medical_charge\",\n \"fetch_midwest_survey\",\n \"fetch_employee_salaries\",\n \"fetch_road_safety\",\n \"fetch_open_payments\",\n \"fetch_drug_directory\",\n \"fetch_traffic_violations\",\n]\n","sub_path":"dirty_cat/datasets/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"306041097","text":"import numpy as np \n\nimport random \n\n \n \n\na= np.zeros((3,12)) \n\n\n\n \n\nfor i in range(len(a)): \n\n for j in range(len(a[i])): \n\n if(i == 0): \n\n dep = \"Santander\" \n\n elif(i == 1): \n\n dep = \"Guajira\" \n\n else: \n\n dep = \"Nariño\" \n\n a[i][j] = (random.randint(18,28))\n\n#para observar la matriz\nprint(a) \n\n#para \"separar\" la matriz como \"listas\"\n\ns = a[[0,], :]\ng = a[[1,], :]\nn = a[[2,], :]\nprint(\"santander: \",s)\nprint(\"Guajira: \",g)\nprint(\"Nariño: \",n)\n\n#para hallar la temperatura mas alta por departamento\ns_1=(np.max(s))\ns_2=(np.max(g))\ns_3=(np.max(n))\nprint(\"la temperatura mayor en Santander :\",(np.max(s)))\nprint(\"la temperatura mayor en Guajira:\",(np.max(g)))\nprint(\"la temperatura mayor en Nariño:\",(np.max(n)))\n\n#para hallar la posicion de las temperaturas en cada departamento\nnp_array = np.array((s))\nposicion_santan = (np.where(np.max(np_array)== np_array))\nprint(\"numero del mes en que la temperatura fue maxima en Santander: \",posicion_santan)\n\nnp_array = np.array((g))\nposicion_guaji = (np.where(np.max(np_array)== np_array))\nprint(\"numero del mes en que la temperatura fue maxima en la Guajira: \",posicion_guaji)\n\nnp_array = np.array((n))\nposicion_nariño = (np.where(np.max(np_array)== np_array))\nprint(\"numero del mes en que la temperatura fue maxima en Nariño: \",posicion_nariño)\n\n#diccionario para saber el mes por posicion\nmeses = {\"0\":\"enero\",\"1\":\"febrero\",\"2\":\"marzo\",\"3\":\"abril\",\n \"4\":\"mayo\",\"5\":\"junio\",\"6\":\"julio\",\"7\":\"agosto\",\n \"8\":\"septiembre\",\"9\":\"octubre\",\"10\":\"noviembre\",\"11\":\"diciembre\"}\nprint(meses)\n\n#entrada para saber si hubo mas de 1 mes con maxima temperatura\nb = int(input(\"digite la cantidad de meses en los que la temperatura fue maxima en Santander:\"))\nsb = s_1*b\nprint(\"total de la suma de los meses en los que la temperatura fue maxima en Santander: \",sb)\n\nc = int(input(\"digite la cantidad de meses en los que la temperatura fue maxima en la Guajira:\"))\ngc = s_2*c\nprint(\"total de la suma de los meses en los que la temperatura fue maxima en la Guajira: \",gc)\n\nd = int(input(\"digite la cantidad de meses en los que la temperatura fue maxima en Nariño:\"))\nnd = s_3*d\nprint(\"total de la suma de los meses en los que la temperatura fue maxima en Nariño \",nd)\n\n#para sacar el promededio de los meses mas calientes de los 3 departamentos\ntemp_max_prom = (sb+gc+nd)/3\nprint(\"promedio de los meses mas calientes de los 3 departamentos: \",temp_max_prom)\n\n#para saber cual de los 3 departamentos tiene el \"promedio\" mas alto o si hay mas de 1 con el \"promedio\" mas alto\nif(sb > gc and sb > nd):\n print(\"El promedio mayor es el de Santander\" + str(sb))\nelif(sb == gc and sb == nd):\n print(\"todos tienen el mismo promedio \")\nelif(gc > sb and gc > nd):\n print(\"El promedio mayor es el de la Guajira \" + str(gc))\nelif(sb == gc ):\n print(\"Santander y Guajira tienen los promedios mas altos \") \nelif(nd > sb and nd>gc):\n print(\"El promedio mayor es el de Nariño \" + str(nd))\nelif(sb==nd):\n print(\"Santander y Nariño tienen los promedios mas altos\")\nelif(gc == nd):\n print(\"Guajira y Nariño tienen los promedios mas altos\")\n \n\n#contadores\n\n\n\ntempSan = 0 \n\ntempGua = 0 \n\ntempNar = 0\n\n \n \n\nfor i in range(3): \n\n for j in range(12): \n\n if(i==0): \n\n tempSan += a[i][j] \n\n if(i==1): \n\n tempGua += a[i][j] \n\n elif(i==2): \n\n tempNar += a[i][j] \n\n\n\npromSan = tempSan/12 \n\npromGua = tempGua/12 \n\npromNar = tempNar/12 \n\npromNac = (promSan+promGua+promNar)/3 \n\n\n\nprint(\"Promedio de la temperatura de santander: \"+str(promSan)+\"\\n\", \n\n \"Promedio de la temperatura de Guajira: \"+str(promGua)+\"\\n\", \n\n \"Promedio de la temperatura de Nariño: \"+str(promNar)+\"\\n\", \n\n \"Promedio de la temperatura Nacional: \"+str(promNac)) \n\n","sub_path":"custom_functions/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"554405023","text":"# -*- coding: utf-8 -*-\nimport utils, matrix_builder, numpy, os, pyprind\nfrom scipy.sparse import find\n\nversion = '3.5'\nsystem_model = 'Allocation, cut-off'\nmax_iteration = 1000\nconvergence_criterion = .00001\n\nfolder = utils.version_system_model_path(version, system_model)\nA, B, C, indexes, Z = utils.load_matrices(folder)\nfor ie_index in pyprind.prog_bar(range(len(indexes.ie))):\n rows = [ie_index]\n columns = [0]\n coefficients = [1.]\n f = matrix_builder.matrix_from_list(rows, columns, coefficients, len(indexes.ie), 1)\n s = f.copy()\n for i in range(max_iteration):\n f_ = Z*f\n s = s + f_\n if s.nnz == f.nnz:\n #only check convergence of values if number of non-zero amount has converged\n _, _, ss = find(s)\n _, _, ff = find(f_)\n rel = abs(numpy.divide(ff, ss))\n if rel.max() < convergence_criterion:\n break\n f = f_.copy()\n \n h_power = C*B*s\n h_exact = utils.pkl_load(os.path.join(folder, 'pkl', 'LCIA'), str(ie_index))\n h_power = numpy.resize(h_power.todense(), h_exact.shape)\n delta = h_exact - h_power\n rel_delta = abs(numpy.divide(delta, h_exact))\n print(rel_delta.max())","sub_path":"modules/miscelanous/solve_with_power_series.py","file_name":"solve_with_power_series.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"273398550","text":"from splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nfrom time import sleep\n\n\nexecutable_path = {'executable_path': 'C:/Users/kuhnie/Bootcamp_Repo/chromedriver.exe'}\nbrowser = Browser('chrome', **executable_path, headless=False)\n\n\ndef scrape(): \n m_dict = {}\n \n mars_news(m_dict)\n jpl_img(m_dict)\n mars_weather(m_dict)\n mars_facts(m_dict)\n mars_hemi(m_dict)\n \n return m_dict\n\ndef mars_news(d):\n from splinter import Browser\n from bs4 import BeautifulSoup as bs\n \n url = \"https://mars.nasa.gov/news/\"\n browser.visit(url)\n sleep(2)\n html = browser.html\n soup = bs(html, 'html.parser')\n\n slide = soup.find('div', class_='list_text')\n title = slide.find('a').get_text()\n p = slide.find('div', class_='article_teaser_body').text\n\n d['news'] = {'title':title,'p':p}\n \ndef jpl_img(d):\n from splinter import Browser\n from bs4 import BeautifulSoup as bs\n \n base_url = \"https://www.jpl.nasa.gov\" \n url = base_url + \"/spaceimages/?search=&category=Mars\"\n browser.visit(url)\n sleep(2)\n link = browser.click_link_by_id('full_image')\n sleep(2)\n html = browser.html\n soup = bs(html, 'html.parser')\n featured_img_url = base_url + soup.find('img',class_='fancybox-image')['src']\n\n d['jpl'] = featured_img_url\n\ndef mars_weather(d):\n import time\n from splinter import Browser\n from bs4 import BeautifulSoup as bs\n \n url = \"https://twitter.com/marswxreport?lang=en\"\n browser.visit(url)\n\n time.sleep(2)\n\n html = browser.html\n soup = bs(html, 'html.parser')\n\n\n tweet = soup.article.get_text()\n\n tweet == tweet #tweetology\n\n d['weather']=tweet\n \ndef mars_facts(d):\n import pandas as pd\n url = 'https://space-facts.com/mars/'\n sleep(2)\n tables = pd.read_html(url)\n df = tables[0]\n df = df.set_index(0)\n df = df.rename(columns={1:'Values'})\n df.index.name = 'Metrics'\n df_html = df.to_html()\n \n d['facts']=df_html\n \ndef mars_hemi(d):\n from splinter import Browser\n from bs4 import BeautifulSoup as bs\n \n url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(url)\n sleep(2)\n \n html = browser.html\n soup = bs(html, 'html.parser')\n\n main = soup.find('div', class_='collapsible results')\n\n links = main.find_all('div',class_='description')\n\n hemisphere_image_urls = []\n base_url = 'https://astrogeology.usgs.gov'\n\n for link in links:\n title = link.h3.text\n img_url = link.a['href']\n img_dict = {'title':title,'img_url':img_url}\n hemisphere_image_urls.append(img_dict)\n\n try:\n for title in hemisphere_image_urls:\n link_title = title['title']\n #browser.links.find_by_partial_text(link_title)\n browser.visit(base_url+title['img_url'])\n sleep(5)\n html = browser.html\n soup = bs(html, 'html.parser')\n img_url = soup.find('img',class_='wide-image')\n title['img_url'] = base_url + img_url['src']\n browser.back()\n except Exception as e:\n print(link_title)\n print(e)\n\n d['hemi'] = hemisphere_image_urls","sub_path":"Missions_to_Mars/.ipynb_checkpoints/scrape_mars-checkpoint.py","file_name":"scrape_mars-checkpoint.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"348307517","text":"\"\"\"This is a module for sending email using Amazon Simple Email Service.\"\"\"\nfrom string import Template\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport boto3\n\n\n# Uses Amazon SES for sending emails\nclass EmailUtil:\n def __init__(self, to, subject, attachment=False):\n self.to = to\n self.subject = subject\n self.attachment = attachment\n self._html = None\n self._text = None\n\n def _render(self, filename, context):\n with open(filename, 'r', encoding='utf-8') as template_file:\n template_file_content = template_file.read()\n message_template = Template(template_file_content)\n message = message_template.safe_substitute(context)\n return message\n\n def html(self, filename, context):\n self._html = self._render(filename, context)\n\n def text(self, filename, context):\n self._text = self._render(filename, context)\n\n def send(self, from_addr=None):\n if isinstance(self.to, str):\n self.to = [self.to]\n if not from_addr:\n from_addr = 'no-reply@gmail.net'\n if not self._html and not self._text:\n raise Exception('You must provide a text or html body.')\n\n # preferred\n session = boto3.Session(profile_name='default')\n\n # alternative\n # session = boto3.Session(\n # region_name='',\n # aws_secret_access_key=\"\",\n # aws_access_key_id=\"\"\n # )\n\n email_service = session.client('ses')\n\n msg = MIMEMultipart()\n msg['Subject'] = self.subject\n msg['From'] = from_addr\n msg['To'] = ', '.join(self.to)\n\n body = \"\"\n email_type = \"\"\n\n # the message body\n if self._text:\n body = self._text\n email_type = 'plain'\n if self._html:\n body = self._html\n email_type = 'html'\n\n part = MIMEText(body, email_type)\n msg.attach(part)\n\n raw_message = {\n 'Data': msg.as_string()\n }\n\n try:\n email_service.send_raw_email(\n RawMessage=raw_message,\n Source=from_addr,\n Destinations=self.to)\n except Exception as e:\n raise Exception(\"Email ERROR : {}\".format(e))\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"util/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"440460933","text":"from typing import NoReturn\n\nfrom PyQt5.QtCore import QLocale\nfrom PyQt5.QtWidgets import QMenu, QWidget\n\nfrom ...model.manager.ManagerModel import ManagerModel\nfrom ..config.ConfigView import ConfigView\nfrom ..MenuBar.AboutWindow import AboutWindow\nfrom ..Translator import Translator\nfrom ..Workspace.WorkspaceView import WorkspaceView\nfrom .LanguageAction import LanguageAction\n\n\nclass FileMenuView(QMenu):\n \"\"\"This class represents the file menu of the main menu bar\"\"\"\n def __init__(self, parent: QWidget):\n super().__init__(parent)\n\n self.addAction(\"\", self.__clear_workspace)\n\n Translator.language_changed.signal.connect(self.__update_text)\n self.__update_text()\n\n def __update_text(self):\n self.setTitle(Translator.tr(\"Datei\"))\n self.actions()[0].setText(Translator.tr(\"Arbeitsfläche leeren\"))\n\n def __clear_workspace(self) -> NoReturn:\n WorkspaceView.delete_all()\n\n\nclass SettingsMenuView(QMenu):\n \"\"\"This class represents the settings menu of the main menu bar\"\"\"\n def __init__(self, parent: QWidget):\n super().__init__(parent)\n\n self.addAction(\"\", self.__open_global_options)\n self.__settings_window: ConfigView = None\n ManagerModel.init_config()\n\n self.__language_menu = QMenu(\"\", self)\n self.addMenu(self.__language_menu)\n\n # Add languages here\n self.__language_menu.addAction(LanguageAction(self, QLocale.German))\n self.__language_menu.addAction(LanguageAction(self, QLocale.English))\n\n Translator.language_changed.signal.connect(self.__update_text)\n self.__update_text()\n\n def __update_text(self):\n self.setTitle(Translator.tr(\"Einstellungen\"))\n self.actions()[0].setText(Translator.tr(\"Globale Einstellungen\"))\n self.__language_menu.setTitle(Translator.tr(\"Sprache\"))\n self.__language_menu.actions()[0].setText(Translator.tr(\"Deutsch\"))\n self.__language_menu.actions()[1].setText(Translator.tr(\"Englisch\"))\n\n def __open_global_options(self):\n self.__settings_window = ConfigView(\"Globales\", ManagerModel.get_settings())\n\n\nclass HelpMenuView(QMenu):\n \"\"\"This class represents the help menu of the main menu bar\"\"\"\n def __init__(self, parent: QWidget):\n super().__init__(parent)\n\n self.addAction(\"\", self.__show_about)\n self.__about_window: AboutWindow = None\n\n Translator.language_changed.signal.connect(self.__update_text)\n self.__update_text()\n\n def __update_text(self):\n self.setTitle(Translator.tr(\"Hilfe\"))\n self.actions()[0].setText(Translator.tr(\"Über\"))\n\n def __show_about(self) -> NoReturn:\n self.__about_window = AboutWindow()\n self.__about_window.show()\n","sub_path":"phypigui/python/src/view/MenuBar/MenuView.py","file_name":"MenuView.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"39693716","text":"from __future__ import unicode_literals\n\nfrom django.conf.urls import url\n\nfrom extras.views import ObjectChangeLogView\nfrom ipam.views import ServiceCreateView\nfrom . import views\nfrom .models import Cluster, ClusterGroup, ClusterType, VirtualMachine\n\napp_name = 'virtualization'\nurlpatterns = [\n\n # Cluster types\n url(r'^cluster-types/$', views.ClusterTypeListView.as_view(), name='clustertype_list'),\n url(r'^cluster-types/add/$', views.ClusterTypeCreateView.as_view(), name='clustertype_add'),\n url(r'^cluster-types/import/$', views.ClusterTypeBulkImportView.as_view(), name='clustertype_import'),\n url(r'^cluster-types/delete/$', views.ClusterTypeBulkDeleteView.as_view(), name='clustertype_bulk_delete'),\n url(r'^cluster-types/(?P[\\w-]+)/edit/$', views.ClusterTypeEditView.as_view(), name='clustertype_edit'),\n url(r'^cluster-types/(?P[\\w-]+)/changelog/$', ObjectChangeLogView.as_view(), name='clustertype_changelog', kwargs={'model': ClusterType}),\n\n # Cluster groups\n url(r'^cluster-groups/$', views.ClusterGroupListView.as_view(), name='clustergroup_list'),\n url(r'^cluster-groups/add/$', views.ClusterGroupCreateView.as_view(), name='clustergroup_add'),\n url(r'^cluster-groups/import/$', views.ClusterGroupBulkImportView.as_view(), name='clustergroup_import'),\n url(r'^cluster-groups/delete/$', views.ClusterGroupBulkDeleteView.as_view(), name='clustergroup_bulk_delete'),\n url(r'^cluster-groups/(?P[\\w-]+)/edit/$', views.ClusterGroupEditView.as_view(), name='clustergroup_edit'),\n url(r'^cluster-groups/(?P[\\w-]+)/changelog/$', ObjectChangeLogView.as_view(), name='clustergroup_changelog', kwargs={'model': ClusterGroup}),\n\n # Clusters\n url(r'^clusters/$', views.ClusterListView.as_view(), name='cluster_list'),\n url(r'^clusters/add/$', views.ClusterCreateView.as_view(), name='cluster_add'),\n url(r'^clusters/import/$', views.ClusterBulkImportView.as_view(), name='cluster_import'),\n url(r'^clusters/edit/$', views.ClusterBulkEditView.as_view(), name='cluster_bulk_edit'),\n url(r'^clusters/delete/$', views.ClusterBulkDeleteView.as_view(), name='cluster_bulk_delete'),\n url(r'^clusters/(?P\\d+)/$', views.ClusterView.as_view(), name='cluster'),\n url(r'^clusters/(?P\\d+)/edit/$', views.ClusterEditView.as_view(), name='cluster_edit'),\n url(r'^clusters/(?P\\d+)/delete/$', views.ClusterDeleteView.as_view(), name='cluster_delete'),\n url(r'^clusters/(?P\\d+)/changelog/$', ObjectChangeLogView.as_view(), name='cluster_changelog', kwargs={'model': Cluster}),\n url(r'^clusters/(?P\\d+)/devices/add/$', views.ClusterAddDevicesView.as_view(), name='cluster_add_devices'),\n url(r'^clusters/(?P\\d+)/devices/remove/$', views.ClusterRemoveDevicesView.as_view(), name='cluster_remove_devices'),\n\n # Virtual machines\n url(r'^virtual-machines/$', views.VirtualMachineListView.as_view(), name='virtualmachine_list'),\n url(r'^virtual-machines/add/$', views.VirtualMachineCreateView.as_view(), name='virtualmachine_add'),\n url(r'^virtual-machines/import/$', views.VirtualMachineBulkImportView.as_view(), name='virtualmachine_import'),\n url(r'^virtual-machines/edit/$', views.VirtualMachineBulkEditView.as_view(), name='virtualmachine_bulk_edit'),\n url(r'^virtual-machines/delete/$', views.VirtualMachineBulkDeleteView.as_view(), name='virtualmachine_bulk_delete'),\n url(r'^virtual-machines/(?P\\d+)/$', views.VirtualMachineView.as_view(), name='virtualmachine'),\n url(r'^virtual-machines/(?P\\d+)/edit/$', views.VirtualMachineEditView.as_view(), name='virtualmachine_edit'),\n url(r'^virtual-machines/(?P\\d+)/delete/$', views.VirtualMachineDeleteView.as_view(), name='virtualmachine_delete'),\n url(r'^virtual-machines/(?P\\d+)/config-context/$', views.VirtualMachineConfigContextView.as_view(), name='virtualmachine_configcontext'),\n url(r'^virtual-machines/(?P\\d+)/changelog/$', ObjectChangeLogView.as_view(), name='virtualmachine_changelog', kwargs={'model': VirtualMachine}),\n url(r'^virtual-machines/(?P\\d+)/services/assign/$', ServiceCreateView.as_view(), name='virtualmachine_service_assign'),\n\n # VM interfaces\n url(r'^virtual-machines/interfaces/add/$', views.VirtualMachineBulkAddInterfaceView.as_view(), name='virtualmachine_bulk_add_interface'),\n url(r'^virtual-machines/(?P\\d+)/interfaces/add/$', views.InterfaceCreateView.as_view(), name='interface_add'),\n url(r'^virtual-machines/(?P\\d+)/interfaces/edit/$', views.InterfaceBulkEditView.as_view(), name='interface_bulk_edit'),\n url(r'^virtual-machines/(?P\\d+)/interfaces/delete/$', views.InterfaceBulkDeleteView.as_view(), name='interface_bulk_delete'),\n url(r'^vm-interfaces/(?P\\d+)/edit/$', views.InterfaceEditView.as_view(), name='interface_edit'),\n url(r'^vm-interfaces/(?P\\d+)/delete/$', views.InterfaceDeleteView.as_view(), name='interface_delete'),\n\n]\n","sub_path":"netbox/virtualization/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"123765253","text":"\nimport unittest\nfrom nessus import Nessus, NessusHelpers\nimport config \nfrom types import *\n\nclass NessusTest(unittest.TestCase): \n\n def setUp(self): \n self.nh = NessusHelpers(config)\n self.nessus = Nessus(config.url, config.username, config.password, config.verify)\n self.nessus.login()\n self.assertIsNotNone(self.nessus.token, None)\n\n def tearDown(self): \n self.nessus.logout()\n self.assertEqual(self.nessus.token, '')\n\n def test_get_scans(self):\n scans = self.nessus.get_scans()\n self.assertIs(ListType, type(scans))\n\n def test_export_scan(self):\n scans = self.nessus.get_scans()\n scan = scans[0]\n\n history_ids = self.nessus.get_history_ids(scan['id'])\n history_id = history_ids[scan['uuid']]\n\n file_id = self.nessus.export_scan(scan['id'], history_id, 'nessus')\n self.assertGreater(file_id,0)\n\n def test_get_last_import_epoch(self):\n self.assertIs(type(self.nh.get_last_import_epoch()), IntType)\n\n def test_convert_scan_to_json(self):\n scans = self.nessus.get_scans()\n scan = scans[0]\n scan_id = scan['id']\n\n history_ids = self.nessus.get_history_ids(scan_id)\n history_id = history_ids[scan['uuid']]\n\n file_format = 'nessus'\n\n file_id = self.nessus.export_scan(scan['id'], history_id, file_format)\n file_name = 'nessus_{sid}_{fid}.{fformat}'.format(sid=scan_id, fid=file_id, fformat=file_format)\n file_path = '/tmp/{0}'.format(file_name)\n\n self.nessus.download_scan(scan['id'], file_id, file_path, 'nessus')\n\n xml = NessusHelpers(file_path)\n json_str = xml.json_str(file_path) # returns json string\n self.assertIsNotNone(json_str)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"testnessus.py","file_name":"testnessus.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"437046153","text":"from django.conf.urls.defaults import *\nfrom django.contrib import admin\nfrom django.conf import settings\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^account/', include('account.urls')),\n url(r'^meeting/', include('meeting.urls')),\n url(r'^papervote/', include('papervote.urls')),\n url(r'^category/', include('category.urls')),\n url(r'^lab/', include('lab.urls')),\n url(r'^info/', include('info.urls')),\n url(r'^ueditor/',include('DjangoUeditor.urls' )),\n (r'^images/upload$', 'utils.views.image_upload_handler'),\n (r'^attachments/', include('attachments.urls')),\n# (r'^bbs/', include('lbforum.urls')),\n)\n\nurlpatterns += patterns('django.views.generic.simple',\n (r'^$', 'direct_to_template', {'template': 'homepage.html'}),\n (r'^guide$', 'direct_to_template', {'template': 'guide.html'}),\n (r'^submit$', 'direct_to_template', {'template': 'submit.html'}),\n (r'^downloadsubmission$', 'direct_to_template', {'template': 'downloadsubmission.html'}),\n)\n\nif settings.DEBUG:\n urlpatterns = patterns('',\n url(r'^media/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n url(r'', include('django.contrib.staticfiles.urls')),\n ) + urlpatterns\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"635619311","text":"# %% IMPORTS\nimport argparse\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\n\nfrom config import DEVICE\nfrom src.data_processing import (Ngram, randomized_ngram,\n sequence_loader_MNIST, test_loader_MNIST,\n train_loader_MNIST)\nfrom src.history import History\nfrom src.model import Model\nfrom src.remote import mpl\nfrom src.statistics import get_statistics\nfrom src.training import SGD, SPDG\nimport matplotlib.pyplot as plt\n\ntorch.manual_seed(580972560)\nnp.random.seed(226923340)\n\n# torch.manual_seed(40872390)\n# np.random.seed(89237482)\n\n# torch.manual_seed(89127349)\n# np.random.seed(479238478)\n\n\n\ndef save(history, model, ngram, optimizer_primal, optimizer_dual, primal_lr, dual_lr, comment=''):\n if not os.path.exists('data'):\n os.mkdir('data')\n fname = 'data/t3-1-5'\n\n np.save(fname + '_hist', history)\n np.save(fname + '_model', model)\n np.save(fname + '_ngram', ngram)\n np.save(fname + '_opt_primal', optimizer_primal)\n np.save(fname + '_opt_dual', optimizer_dual)\n with open(fname + '_doc', \"w+\") as doc:\n doc.write(\"primal_lr: {}\\ndual_lr: {}\\nn: {}\\n{}\".format(primal_lr, dual_lr, ngram.n, comment))\n\n\ndef load():\n fname = 'data/t3-1-5'\n hist = np.load(fname + '_hist.npy').item()\n model = np.load(fname + '_model.npy').item()\n ngram = np.load(fname + '_ngram.npy').item()\n opt_primal = np.load(fname + '_opt_primal.npy').item()\n opt_dual = np.load(fname + '_opt_dual.npy').item()\n return hist, model, ngram, opt_primal, opt_dual\n\n\ncontinuation = False\nnum_epochs = 1000\nsave_every = 100\nlog_every = 100\ntest_every = 1\nprimal_lr = 1e-6\ndual_lr = 1e-4\n\nshow_dual = False\npredictions_on_sequences = True\npredictions_on_data = False\nngram_data_stats = True\nngram_test_stats = True\nloss_on_test = True\n\n# %% GENERATING DATASET\n# ngram = randomized_ngram(3, 2, out_dim=4, min_var=5e-2)\nngram = Ngram(3)\nngram[(0, 1, 2)] = 9.\nngram[(1, 2, 3)] = 1.\nngram.norm()\nngram.show()\n\n# %% CREATING MODEL\ndata_loader = train_loader_MNIST()\ntest_loader = test_loader_MNIST()\nsequence_loader = sequence_loader_MNIST(ngram, num_samples=100000)\nsequence_test_loader = sequence_loader_MNIST(ngram, num_samples=10000, train=False)\n\n\n# %% REGULAR TRAINING (SGD)\n# model = Model(ngram)\n# model.to(DEVICE)\n# model.init_weights()\n\n# optimizer = torch.optim.Adam(model.primal.parameters())\n# history = SGD(model, optimizer, data_loader, test_loader, num_epochs=1, log_every=50, test_every=1)\n\n# %% DUAL TRAINING\nif continuation:\n history, model, ngram, optimizer_primal, optimizer_dual = load()\n print(model.ngram.n)\nelse:\n model = Model(ngram, output_size=4)\n model.to(DEVICE)\n model.init_weights()\n\n optimizer_primal = torch.optim.Adam(model.primal.parameters(), lr=primal_lr)\n optimizer_dual = torch.optim.Adam(model.dual.parameters(), lr=dual_lr)\n\n history = History()\n for idx in model.dual:\n history['dual ' + str(idx)] = []\n\n\nepochs_done = 0\nwhile epochs_done < num_epochs:\n history = SPDG(model, optimizer_primal, optimizer_dual, \n sequence_loader,\n data_loader, test_loader, save_every, log_every,\n test_every,\n sequence_test_loader=sequence_test_loader, predictions_on_data=predictions_on_data,\n show_dual=show_dual, predictions_on_sequences=predictions_on_sequences,\n ngram_data_stats=ngram_data_stats, ngram_test_stats=ngram_test_stats, loss_on_test=loss_on_test,\n history=history)\n save(history, model, ngram, optimizer_primal, optimizer_dual, primal_lr, dual_lr)\n epochs_done += save_every\n\n\n# %% PLOTTING TEST\n\nxs = np.arange(len(history['predictions'])) * test_every\nys = [[100.0 - preds[i, i] / preds[i].sum() * 100 for preds in history['predictions']] for i in range(model.output_size)]\nmpl.rc('savefig', format='svg')\nmpl.rc('lines', linewidth=0.5)\nmpl.style.use('seaborn')\nfor i in range(model.output_size):\n plt.plot(xs, ys[i], label=str(i))\nplt.legend()\nplt.xlabel('Epoch')\nplt.ylabel('Error (%)')\nplt.savefig(\"predictions_test_error\")\nplt.close()\n\n# %% PLOTTING DATA\n\nys = [[100.0 - preds[i, i] / preds[i].sum() * 100 for preds in history['predictions_data']] for i in range(model.output_size)]\nmpl.rc('savefig', format='svg')\nmpl.rc('lines', linewidth=0.5)\nmpl.style.use('seaborn')\nfor i in range(model.output_size):\n plt.plot(xs, ys[i], label=str(i))\nplt.legend()\nplt.xlabel('Epoch')\nplt.ylabel('Error (%)')\nplt.savefig(\"predictions_data_error\")\nplt.close()\n\n# %% STATISTICS\nstats = history['predictions'][-1]\nprint(stats)\nprint(\"\\nn | acc\\n--+------\")\nfor i, x in zip(range(10), np.pad(np.diag(stats), (0, 10 - model.output_size), 'constant') / stats.sum(axis=1) * 100.0):\n print(\"{} | {:>5.2f}\".format(i, x))\n\n# %% RESTORE\n\nfname = 't3-1-5'\nhistory = np.load(fname + '_hist.npy').item()\n","sub_path":"tests/t3-1-5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"181550562","text":"# -*- coding: utf-8 -*-\nimport pygame\nfrom pygame.locals import *\nfrom src.constants import GRAVITY\n\nclass Player(pygame.sprite.Sprite):\n IMAGE_LEFT,IMAGE_RIGHT = xrange(2)\n MAX_JUMP_COUNT = 2\n\n def __init__(self, pos, blocks):\n pygame.sprite.Sprite.__init__(self, self.containers)\n self.image = self.images[self.IMAGE_RIGHT]\n self.rect = self.image.get_rect()\n self.rect.center = pos\n self._blocks = blocks\n self._move_speed = 5.0\n self._jump_speed = 5.0\n self._on_floor = False\n self._jump_count = 0\n self._prev_button = 0\n self._fx = float(self.rect.x)\n self._fy = float(self.rect.y)\n self._fvx = 0.0\n self._fvy = self._jump_speed #落下状態で開始\n\n def update(self):\n self._input()\n self._fall()\n self._collide_x()\n self._collide_y()\n self.rect.x = int(self._fx)\n self.rect.y = int(self._fy)\n\n def _input(self):\n pressed_keys = pygame.key.get_pressed()\n if pressed_keys[K_RIGHT]:\n self.image = self.images[self.IMAGE_RIGHT]\n self._fvx = self._move_speed\n elif pressed_keys[K_LEFT]:\n self.image = self.images[self.IMAGE_LEFT]\n self._fvx = self._move_speed * -1\n else:\n self._fvx = 0.0\n if pressed_keys[K_z]:\n if self._on_floor:\n self._fvy = self._jump_speed * -1\n self._on_floor = False\n self._jump_count += 1\n elif not self._prev_button and self._jump_count < self.MAX_JUMP_COUNT:\n self._fvy = self._jump_speed * -1\n self._jump_count += 1\n self._prev_button = pressed_keys[K_z]\n\n def _fall(self):\n if not self._on_floor:\n self._fvy += GRAVITY\n\n def _collide_x(self):\n new_rect = Rect(self._fx + self._fvx, self._fy, self.rect.width, self.rect.height)\n for block in self._blocks:\n if new_rect.colliderect(block.rect):\n if self._fvx > 0:#右方向に移動している\n self._fx = block.rect.left - self.rect.width\n self._fvx = 0.0\n elif self._fvx < 0:#左方向に移動している\n self._fx = block.rect.right\n self._fvx = 0.0\n return\n self._fx += self._fvx\n\n def _collide_y(self):\n new_rect = Rect(self._fx, self._fy + self._fvy, self.rect.width, self.rect.height)\n for block in self._blocks:\n if new_rect.colliderect(block.rect):\n if self._fvy > 0:#落下中\n self._fy = block.rect.top - self.rect.height\n self._fvy = 0.0\n self._on_floor = True\n self._jump_count = 0\n elif self._fvy < 0:#上昇中\n self._fy = block.rect.bottom\n self._fvy = 0.0\n return\n self._fy += self._fvy\n self._on_floor = False\n","sub_path":"2DActionStudy/src/sprite/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"616807990","text":"from Doberman import LANDevice\nimport struct\nimport time\nimport socket\n\n\nclass n2_lmbox_lan(LANDevice):\n \"\"\"\n Custom level meter box for pancake. The device is read out through an RS485 to ETH adapter, that's why\n it inherits from LANSensor. send_recv() is modified to sleep longer since the device reacts slower than\n a standard LAN sensor.\n \"\"\"\n\n msg_wait = 2\n eol = b'\\r'\n split = b'\\x06'\n\n def process_one_value(self, name, data):\n \"\"\"\n Data structure: 6 times 4 integers divided by the split character plus the EOL-character.\n \"\"\"\n with open('/global/logs/pancake/special/lmtest.bin', 'ab') as f:\n f.write(data + b'\\n')\n if not data.endswith(self.eol):\n self.logger.info(f'Data does not end with EOL but with {data[-1]}')\n if len(data) == 55:\n # If it is the right length, split by position since reading might contain \\x06\n data = [data[i:i+8] for i in range(0,54,9)]\n else:\n # Otherwise split by splitting character\n data = data.split(self.split)[:-1] # Remove EOL\n if len(data) != 6:\n self.logger.debug(f'Data contains {len(data)} readings, not 6')\n return None\n\n c_meas = []\n for i, readingdata in enumerate(data):\n try:\n lm_values = struct.unpack('\\d+)$', 'createbox.views.details'),\n (r'^boxes/(?P.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_DIR + 'boxDXFs'}),\n (r'^mazePNGs/(?P.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_DIR + 'mazePNGs/'}),\n (r'^external/(?P.*)$', 'django.views.static.serve', {'document_root': settings.ROOT_DIR + 'external/'}),\n (r'^admin/', include(admin.site.urls)),\n (r'^about', direct_to_template, {'template': 'about.html', 'extra_context': { 'extHTTP' : settings.EXTHTTP }}),\n (r'^links', direct_to_template, {'template': 'links.html', 'extra_context': { 'extHTTP' : settings.EXTHTTP }}),\n (r'^tips', direct_to_template, {'template': 'tips.html', 'extra_context': { 'extHTTP' : settings.EXTHTTP }}),\n (r'^contact/thankyou', direct_to_template, {'template': 'thankyou.html', 'extra_context': { 'extHTTP' : settings.EXTHTTP }}),\n (r'^contact', 'createbox.views.contactview'),\n)\n","sub_path":"mazepuzzlebox/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"311050113","text":"\n#23. Merge k Sorted Lists\n#I1. downheap lose break condition\n#I2. \n\ndef swap(data, i, j):\n tmp = data[i]\n data[i] = data[j]\n data[j] = tmp\n \ndef upheap(heap):\n i = len(heap)-1\n while(i != 1):\n p = i // 2\n if (heap[p][0] > heap[i][0]):\n swap(heap, p, i)\n i = p\n\ndef downheap(heap):\n count = len(heap)-1\n i = 1\n c = i+i\n while(c <= count):\n if (c < count and heap[c][0] > heap[c+1][0]): c+= 1\n if (heap[i][0] > heap[c][0]):\n swap(heap, i, c) \n i = c\n c = i+i\n else:\n break \n \ndef mergeSorts(sorts):\n count = len(sorts)\n index = [0 for i in range(count)]\n heap = [0]\n max = 0\n allcount = 0\n \n for i in range(count):\n allcount += len(sorts[i])\n if (max < sorts[i][len(sorts[i])-1]):\n max = sorts[i][len(sorts[i])-1] \n \n max += 1\n for i in range(count):\n sorts[i].append(max)\n heap.append((sorts[i][0], i))\n upheap(heap) \n \n result = [] \n for j in range(allcount):\n v = heap[1]\n result.append(v[0])\n i = v[1]\n index[i] += 1\n heap[1] = (sorts[i][index[i]], i)\n downheap(heap)\n \n for i in range(count):\n sorts[i].pop()\n \n return result\n \nsort1 = [1,3,6]\nsort2 = [3,7,8]\nsort3 = [0,1,2]\nsort4 = [5,9,10]\nsort5 = [2,3,9]\n\nsorts = [sort1, sort2, sort3, sort4, sort5]\nprint(mergeSorts(sorts))","sub_path":"Leetcode/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"646070355","text":"'''\nWrite a program using a while statement, that given a series of numbers as input, adds them up until \nthe input is 10 and then prints the total.\nDo not add the final 10.\nFor example, if the following numbers are input\n8\n3\n11\n10\n\n\n22\n'''\nnum = int(input(\"Input an int: \")) # Do not change this line\n\nsumm = 0\n\nwhile num != 10:\n summ += num\n num = int(input(\"Input an int: \"))\n \nprint(summ)\n\n","sub_path":"assignment_3_3.py","file_name":"assignment_3_3.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"133963088","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/islatu/stitching.py\n# Compiled at: 2020-04-21 10:47:08\n# Size of source mod 2**32: 3893 bytes\n\"\"\"\nAs reflectometry measurements typicall consist of multiple scans at different attenutation, we must stitch these together.\n\"\"\"\nimport numpy as np\nfrom uncertainties import unumpy as unp\n\ndef correct_attentuation(scan_list):\n \"\"\"\n Correct the attentuation level between a a series of elements in lists.\n\n Args:\n scans (list of islatu.refl_data.Scan): Reflectometry scans.\n\n Returns:\n (list of islatu.refl_data.Scan): Reflectometry scans with attenuation corrected.\n \"\"\"\n for i in range(len(scan_list) - 1):\n overlap_start = scan_list[(i + 1)].q[0].n\n overlap_end = scan_list[i].q[(-1)].n\n overlap_start_index = np.argmin(np.abs(scan_list[i].q - overlap_start))\n overlap_end_index = np.argmin(np.abs(scan_list[(i + 1)].q - overlap_end))\n target_r = scan_list[i].R[overlap_start_index:]\n vary_r = scan_list[(i + 1)].R[:overlap_end_index + 1]\n ratio = target_r.mean() / vary_r.mean()\n scan_list[(i + 1)].R *= ratio\n\n return scan_list\n\n\ndef concatenate(scan_list):\n \"\"\"\n Concatenate each of the datasets together.\n\n Args:\n scans (list of islatu.refl_data.Scan): Reflectometry scans.\n\n Returns:\n (tuple): tuple containing:\n - (np.ndarray): q-values.\n - (np.ndarray): Reflected intensities.\n \"\"\"\n reflected_intensity = np.array([])\n q_vectors = np.array([])\n for i in range(len(scan_list)):\n reflected_intensity = np.append(reflected_intensity, scan_list[i].R)\n q_vectors = np.append(q_vectors, scan_list[i].q)\n\n return (\n q_vectors, reflected_intensity)\n\n\ndef normalise_ter(q_vectors, reflected_intensity, max_q=0.1):\n \"\"\"\n Find the total external reflection region and normalise this to 1.\n\n Args:\n max_q (float): The maximum q to be included in finding the critical angle.\n\n Returns:\n (np.ndarray): Reflected intensities.\n \"\"\"\n q = unp.nominal_values(q_vectors)\n max_q_idx = q[np.where(q < max_q)].size\n if max_q_idx <= 1:\n end_of_ter_index = 1\n else:\n end_of_ter_index = np.argmax(np.abs(np.gradient(unp.nominal_values(reflected_intensity)[:max_q_idx])))\n if end_of_ter_index == 0:\n end_of_ter_index = 1\n ter_region_mean_r = reflected_intensity[:end_of_ter_index].mean()\n reflected_intensity /= ter_region_mean_r\n return reflected_intensity\n\n\ndef rebin(q_vectors, reflected_intensity, new_q=None, number_of_q_vectors=400):\n \"\"\"\n Rebin the data on a logarithmic q-scale.\n\n Args:\n new_q (np.ndarray): Array of potential q-values. Defaults to ``None``.\n number_of_q_vectors (int, optional): The max number of\n q-vectors to be using initially in the rebinning of the data. Defaults to ``400``.\n\n Returns:\n (tuple): tuple containing:\n - (np.ndarray): q-values.\n - (np.ndarray): Reflected intensities.\n \"\"\"\n if new_q is not None:\n new_q = new_q\n else:\n new_q = np.logspace(np.log10(q_vectors[0].n), np.log10(q_vectors[(-1)].n), number_of_q_vectors)\n binned_q = unp.uarray(np.zeros_like(new_q), np.zeros_like(new_q))\n binned_r = unp.uarray(np.zeros_like(new_q), np.zeros_like(new_q))\n for i in range(len(new_q) - 1):\n count = 0\n for j, q in enumerate(q_vectors):\n if new_q[i] <= q < new_q[(i + 1)]:\n binned_q[i] += q\n binned_r[i] += reflected_intensity[j]\n count += 1\n\n if count > 0:\n binned_q[i] /= count\n binned_r[i] /= count\n\n cleaned_q = np.delete(binned_q, np.argwhere(binned_r == 0))\n cleaned_r = np.delete(binned_r, np.argwhere(binned_r == 0))\n return (\n cleaned_q, cleaned_r)","sub_path":"pycfiles/islatu-0.0.1-py3.7/stitching.cpython-37.py","file_name":"stitching.cpython-37.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"92958214","text":"import keyword\n\n# print(\"hello\")\n# print(keyword.kwlist)\n# keyword.kwlist;\n\nif True:\n print(\"answer\")\n print(\"True\")\nelse:\n print(\"Answer\")\n print(\"False\")\n\n#total=item_one+\\\n # item_two+\\\n # item_three\n\ntotal={'item_one','item_two','item_three'}\n\nword='字符串'\nsentence=\"这是一个句子\"\nparagraph=\"\"\"这是一个段落\n可以由多行组成\"\"\"\n\n#input(\"\\n\\n按下 enter 键后退出\")\n\nimport sys;x='runoob';sys.stdout.write(x+'n')\n\nx=\"a\"\ny=\"b\"\nprint(x)\nprint(y)\n\n\n\n\n\n","sub_path":"test1/src/main/Test1.py","file_name":"Test1.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"464391016","text":"# -*- coding: utf-8 -*-\n# vi:si:et:sw=4:sts=4:ts=4\n\nimport asyncio\nfrom functools import partial\nimport logging\n\nimport boto3\nimport botocore.exceptions\n\nfrom ..exceptions import ConsumerError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Consumer(object):\n\n def __init__(self, source, endpoint_url=None, use_ssl=True, options=None, loop=None):\n self.source = source\n self.endpoint_url = endpoint_url\n self.use_ssl = use_ssl\n self._loop = loop or asyncio.get_event_loop()\n self._client = None\n self._consumer_options = options\n\n def get_client(self):\n if self._client is None:\n self._client = boto3.client('sqs', endpoint_url=self.endpoint_url, use_ssl=self.use_ssl)\n return self._client\n\n async def get_queue_url(self):\n fn = partial(self.get_client().get_queue_url, QueueName=self.source)\n # XXX: Refactor this when boto support asyncio\n response = await self._loop.run_in_executor(None, fn)\n return response['QueueUrl']\n\n async def confirm_message(self, message):\n logger.info('Confirm message (ACK/Deletion)')\n\n receipt = message['ReceiptHandle']\n logger.debug('receipt={}'.format(receipt))\n\n queue_url = await self.get_queue_url()\n fn = partial(self.get_client().delete_message, QueueUrl=queue_url, ReceiptHandle=receipt)\n # XXX: Refactor this when boto support asyncio\n return await self._loop.run_in_executor(None, fn)\n\n async def fetch_messages(self):\n queue_url = await self.get_queue_url()\n logger.info('Fetching messages on {}'.format(queue_url))\n\n options = self._consumer_options or {}\n fn = partial(self.get_client().receive_message, QueueUrl=queue_url, **options)\n # XXX: Refactor this when boto support asyncio\n response = await self._loop.run_in_executor(None, fn)\n return response.get('Messages', [])\n\n async def consume(self):\n try:\n messages = await self.fetch_messages()\n except botocore.exceptions.ClientError as exc:\n logger.exception(exc)\n raise ConsumerError('Error when fetching messages') from exc\n\n return messages\n","sub_path":"loafer/aws/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"517536738","text":"from subprocess import run, PIPE, STDOUT\n\n\ndef shell(cmd):\n print(f\"Now running: {cmd}\")\n process = run(cmd, stdout=PIPE, stderr=STDOUT, shell=True)\n print(process.stdout.decode('utf-8'))\n if process.returncode == 0:\n return True\n else:\n print(\"An error occurred. Aborting\")\n return False\n\ndef main():\n ok = shell(\"echo 'OK'; exit 0\")\n if ok:\n ok = shell(\"echo 'Again OK'; exit 0\")\n if ok:\n ok = shell(\"echo 'Problem!'; exit 1\")\n if ok:\n ok = shell(\"echo 'Should not execute'; exit 0\")\n if ok:\n print(\"Done\")\n\nif __name__ == '__main__': main()\n","sub_path":"code/post55/naive2.py","file_name":"naive2.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"259374299","text":"__author__ = 'pehladik'\n\nfrom Programme import Programme\nfrom constantes import *\nfrom constraint import *\n\n\nclass Universite:\n def __init__(self, nom):\n self.nom = nom\n self.listeDesProgrammes = []\n self.listePlaceOccuppeesParFiliere = {}\n for f in listeFilieres:\n self.listePlaceOccuppeesParFiliere[f] = -1\n self.listeEtudiantsAffectes = []\n\n def __cmp__(self, nom):\n return cmp(self.nom, nom)\n\n def __str__(self):\n st = self.nom\n for p in self.listeDesProgrammes:\n st += '| '\n st += str(p)\n return st\n\n def est_present(self, nom):\n return (self.nom == nom)\n\n def ajouterProgramme(self, programm):\n self.listeDesProgrammes.append(programm)\n for f in programm.listeDesFilieres:\n self.listePlaceOccuppeesParFiliere[f] = 0\n\n def findProgramm(self, filiere):\n programme = None\n for p in self.listeDesProgrammes:\n if filiere in p.listeDesFilieres:\n programme = p\n return programme\n\n def ajouterEtudiant(self, etudiant):\n self.listeEtudiantsAffectes.append(etudiant)\n self.listePlaceOccuppeesParFiliere[etudiant.filiere] += 1\n\n def isPlace(self, filiere):\n result = 0\n if self.listePlaceOccuppeesParFiliere[filiere] == -1: # Pas de programme disponible dans la filiere\n result = PAS_DE_PROGAMME_ADEQUAT\n else: # le programme existe pour la filiere\n if len(self.listeDesProgrammes) == 1: # cas simple d'un seul programme dans l'universite\n nbPlacesOccupees = 0\n for f in self.listeDesProgrammes[0].listeDesFilieres:\n nbPlacesOccupees += self.listePlaceOccuppeesParFiliere[f]\n result = self.listeDesProgrammes[0].nbDePlaces - nbPlacesOccupees\n\n else: # cas avec plusieurs programmes\n problem = Problem()\n i = 0\n # Pour chaque programme p de l'universite il faut que la somme des places occupees\n # ne depasse pas la capacite de p :\n # sum_filiere(place[filiere][p]) < place_disponible[p]\n for p in self.listeDesProgrammes:\n i += 1\n lvar = []\n for f in p.listeDesFilieres:\n varname = f + str(i)\n lvar.append(varname)\n problem.addVariable(varname, range(0, p.nbDePlaces + 1))\n # Contraintes sum (filiere) < nb places programme\n # print lvar\n v = p.nbDePlaces\n #print \"<= \" + str(v)\n problem.addConstraint(MaxSumConstraint(v), lvar)\n\n # Il faut que le nombre de place prises par filiere f soit exactement egal au nombre\n # d'etudiants qui occupent cette filiere (+1 pour la filiere a tester) :\n # sum(place[filiere]) = v\n for f in self.listePlaceOccuppeesParFiliere:\n v = self.listePlaceOccuppeesParFiliere[f]\n if self.listePlaceOccuppeesParFiliere[f] != -1:\n lvar = []\n i = 0\n for p in self.listeDesProgrammes:\n i += 1\n if f in p.listeDesFilieres:\n varname = f + str(i)\n lvar.append(varname)\n if len(lvar) != 0:\n # print lvar\n\n if f == filiere:\n #print \"= \" + str(v+1)\n problem.addConstraint(ExactSumConstraint(v + 1), lvar)\n else:\n #print \"= \" + str(v)\n problem.addConstraint(ExactSumConstraint(v), lvar)\n\n solution = problem.getSolution() # recherche d'une solution\n if solution != None:\n result = 1\n # print solution\n return result","sub_path":"Universite.py","file_name":"Universite.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"642125946","text":"# Copyright 2013 Carl Simon Adorf, MST, RWTH-Aachen University\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport constants as C\nfrom collections import deque\n\nlogger = logging.getLogger('run')\n\nclass Run(object):\n\n def __init__(self, run_order, package = None):\n self._timesteps = 0\n self._commands = dict()\n self._custom_commands = dict()\n self._run_order = run_order\n\n def package(self):\n raise NotImplementedError()\n\n def version(self):\n raise NotImplementedError()\n\n @classmethod\n def from_package(cls, run_order, package, version = None):\n if version is not None:\n logger.warning(\"Version selection is ignored.\")\n if package == C.PACKAGE_HOOMD:\n new_run = HoomdRun(run_order)\n elif package == C.PACKAGE_LAMMPS:\n new_run = LammpsRun(run_order)\n else:\n msg = \"Unknown package: '{}'.\".format(package)\n raise LookupError(msg)\n return new_run\n\n @classmethod\n def _run_command(self, timesteps):\n raise NotImplementedError()\n\n @classmethod\n def _comment_command(self, comment):\n raise NotImplementedError()\n\n @property\n def timesteps(self):\n return self._timesteps\n\n @timesteps.setter\n def timesteps(self, value):\n self._timesteps = int(value)\n\n def dump(self):\n buffer = []\n self._commands[C.RUN_KEY] = [self._run_command(self.timesteps)]\n# for KEY in C.RUN_ORDER:\n for KEY in self._run_order:\n current = self._commands.get(KEY)\n if current:\n buffer.append(current)\n for i,c in self._custom_commands.items():\n buffer.insert(i, c)\n # Return a flat list\n return [item for sublist in buffer for item in sublist]\n\n def add_code(self,\n key, code, source,\n custom_index = None,):\n headline = \"--- {} [{}] ---\\n\".format(str(key), source)\n comment_str = self._comment_command(headline)\n if custom_index is not None:\n tag = int(custom_index)\n commands = self._custom_commands\n else:\n tag = key[C.TAG_KEY]\n commands = self._commands\n if tag == C.RUN_KEY:\n msg = \"{} is not a valid tag for a chunk.\".format(tag)\n raise KeyError(msg)\n# elif custom_index < 0 and not tag in C.RUN_ORDER:\n elif custom_index < 0 and not tag in self._run_order:\n msg = \"Chunk tag '{}' is not supported.\".format(tag)\n raise NotImplementedError(msg)\n if commands.get(tag) is None:\n commands[tag] = deque()\n commands[tag].append(comment_str + code)\n self._dirty_flag = True\n\n def add_bibliography(self, bibliography):\n bib_commands = self._commands['_bib'] = deque()\n for line in bibliography:\n bib_commands.append(self._comment_command(line))\n bib_commands.append(\"\")\n\n def code_iterator(self):\n for t in self._commands.itervalues():\n for c in t:\n yield c\n for i in self._custom_commands.itervalues():\n for c in i.itervalues():\n yield c\n\nclass HoomdRun(Run):\n\n def package(self):\n return C.PACKAGE_HOOMD\n\n def version(self):\n return C.UNDEFINED\n\n @classmethod\n def _comment_command(self, comment):\n return \"# {}\".format(comment)\n\n @classmethod\n def _run_command(self, timesteps):\n fancy = \"# --- RUN FOR {0:e} TIMESTEPS ---\\n\".format(timesteps)\n return fancy + \"run({0:e})\\n\".format(timesteps)\n\nclass LammpsRun(Run):\n\n def package(self):\n return C.PACKAGE_LAMMPS\n\n def version(self):\n return C.UNDEFINED\n\n @classmethod\n def _comment_command(self, comment):\n return \"# {}\".format(comment)\n\n @classmethod\n def _run_command(self, timesteps):\n return \"run {}\".format(timesteps)\n","sub_path":"simulation_manager/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"463713042","text":"\"\"\"empty message\n\nRevision ID: 43fd6857bf9a\nRevises: d318a3f9d800\nCreate Date: 2017-11-26 14:37:45.214887\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '43fd6857bf9a'\ndown_revision = 'd318a3f9d800'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('recipe', sa.Column('category_id', sa.Integer(), nullable=True))\n op.drop_constraint(u'recipe_catrgory_id_fkey', 'recipe', type_='foreignkey')\n op.create_foreign_key(None, 'recipe', 'category', ['category_id'], ['category_id'])\n op.drop_column('recipe', 'catrgory_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('recipe', sa.Column('catrgory_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'recipe', type_='foreignkey')\n op.create_foreign_key(u'recipe_catrgory_id_fkey', 'recipe', 'category', ['catrgory_id'], ['category_id'])\n op.drop_column('recipe', 'category_id')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/43fd6857bf9a_.py","file_name":"43fd6857bf9a_.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"237966455","text":"class EstacionDeRadio:\n _frecuencia = None\n\n def __init__(self, frecuencia):\n self._frecuencia = frecuencia\n\n def getFrecuencia(self):\n return self._frecuencia\n\nclass Estaciones:\n _estaciones = []\n\n def agregarEstacion(self, estacion):\n self._estaciones.append(estacion)\n\n def getEstaciones(self):\n return [estacion.getFrecuencia() for estacion in self._estaciones]\n\nif __name__ == '__main__':\n contenedorEstaciones = Estaciones()\n\n estacion1 = EstacionDeRadio(89.9)\n estacion2 = EstacionDeRadio(101.2)\n estacion3 = EstacionDeRadio(102.3)\n estacion4 = EstacionDeRadio(100.4)\n\n contenedorEstaciones.agregarEstacion(estacion1)\n contenedorEstaciones.agregarEstacion(estacion2)\n contenedorEstaciones.agregarEstacion(estacion3)\n contenedorEstaciones.agregarEstacion(estacion4)\n\n print(contenedorEstaciones.getEstaciones())\n\n iterator = iter(contenedorEstaciones.getEstaciones())\n\n #Borra el siguiente elemento de la coleccion de objetos\n elementoABorrar = next(iterator)\n del elementoABorrar\n\n #Busca e imprime los objetos para visualizar que ya no esta el primer elemento\n for estacion in contenedorEstaciones.getEstaciones():\n \n print(next(iterator))\n\n\n","sub_path":"Ago-Dic-2018/Claudia Seca/PrimerParcial/MethodIterator.py","file_name":"MethodIterator.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"61451877","text":"from typing import Any, Callable, TypeVar\n\nfrom talon import Context, Module, actions, settings\n\nmod = Module()\n# rust specific grammar\nmod.list(\"code_type_modifier\", desc=\"List of type modifiers for active language\")\nmod.list(\"code_macros\", desc=\"List of macros for active language\")\nmod.list(\"code_trait\", desc=\"List of traits for active language\")\nmod.list(\"rust_crates\", desc=\"List of common rust crates\")\nmod.list(\"closed_format_strings\", desc=\"List of common closed rust format strings\")\nmod.list(\"inner_format_strings\", desc=\"List of common rust format strings\")\nmod.list(\"rust_types\", desc=\"List of common rust types\")\nmod.list(\"code_containing_types\", desc=\"List of common rust container types\")\nmod.list(\"formatted_functions\", desc=\"List of common rust formatted functions\")\nmod.list(\"rust_allocatable_types\", desc=\"List of common rust allocatable types\")\nmod.list(\"rust_std_modules\", desc=\"List of common rust std modules\")\nmod.list(\"rust_targets\", desc=\"List of common rust compile targets\")\nmod.list(\"rust_toolchains\", desc=\"List of common rust toolchains\")\n\n\n@mod.action_class\nclass Actions:\n def code_state_implements():\n \"\"\"Inserts implements block, positioning the cursor appropriately\"\"\"\n\n def code_insert_if_let_okay():\n \"\"\"Inserts if let ok block, positioning the cursor appropriately\"\"\"\n\n def code_insert_if_let_some():\n \"\"\"Inserts if let some block, positioning the cursor appropriately\"\"\"\n\n def code_insert_if_let_error():\n \"\"\"Inserts if let error block, positioning the cursor appropriately\"\"\"\n\n def code_insert_trait_annotation(type: str):\n \"\"\"Inserts type annotation for implementor of trait\"\"\"\n\n def code_insert_return_trait(type: str):\n \"\"\"Inserts a return type for implementor of trait\"\"\"\n\n def code_insert_macro(text: str, selection: str):\n \"\"\"Inserts a macro and positions the cursor appropriately\"\"\"\n\n def code_insert_macro_array(text: str, selection: str):\n \"\"\"Inserts a macro array and positions the cursor appropriately\"\"\"\n\n def code_insert_macro_block(text: str, selection: str):\n \"\"\"Inserts a macro block and positions the cursor appropriately\"\"\"\n\n def code_state_unsafe():\n \"\"\"Inserts an unsafe block and positions the cursor appropriately\"\"\"\n\n def code_comment_documentation_block():\n \"\"\"Inserts a block document comment and positions the cursor appropriately\"\"\"\n\n def code_comment_documentation_inner():\n \"\"\"Inserts an inner document comment and positions the cursor appropriately\"\"\"\n\n def code_comment_documentation_block_inner():\n \"\"\"Inserts an inner block document comment and positions the cursor appropriately\"\"\"\n\n\nctx = Context()\nctx.matches = r\"\"\"\ntag: user.rust\ntag: user.rust_apps\n\"\"\"\n\nctx.lists[\"user.rust_std_modules\"] = {\n \"compare\": \"cmp\",\n \"convert\": \"convert\",\n \"format\": \"fmt\",\n \"I O\": \"io\",\n}\n\nscalar_types = {\n \"eye eight\": \"i8\",\n \"you eight\": \"u8\",\n \"bytes\": \"u8\",\n \"eye sixteen\": \"i16\",\n \"you sixteen\": \"u16\",\n \"eye thirty two\": \"i32\",\n \"you thirty two\": \"u32\",\n \"eye sixty four\": \"i64\",\n \"you sixty four\": \"u64\",\n \"eye one hundred and twenty eight\": \"i128\",\n \"you one hundred and twenty eight\": \"u128\",\n \"eye size\": \"isize\",\n \"you size\": \"usize\",\n \"float thirty two\": \"f32\",\n \"float sixty four\": \"f64\",\n \"boolean\": \"bool\",\n \"bool\": \"bool\",\n \"character\": \"char\",\n}\n\ncompound_types = {\n \"tuple\": \"()\",\n \"array\": \"[]\",\n}\n\n\nstandard_library_types = {\n \"box\": \"Box\",\n \"vector\": \"Vec\",\n \"string\": \"String\",\n \"string slice\": \"&str\",\n \"os string\": \"OsString\",\n \"os string slice\": \"&OsStr\",\n \"see string\": \"CString\",\n \"see string slice\": \"&CStr\",\n \"option\": \"Option\",\n \"result\": \"Result\",\n \"okay\": \"Ok\",\n \"error\": \"Err\", # TODO: These aren't really types I guess\n \"big error\": \"Error\",\n \"hashmap\": \"HashMap\",\n \"hash set\": \"HashSet\",\n \"reference count\": \"Rc\",\n \"path\": \"Path\",\n \"path buf\": \"PathBuf\",\n}\n\n# TODO: This needs to get integrated into other lists rather than duplication\nallocatable_types = {\n \"vector\": \"Vec\",\n \"string\": \"String\",\n \"path\": \"Path\",\n}\nctx.lists[\"user.rust_allocatable_types\"] = allocatable_types\n\n# types that allow us say for example 'vector of you eight' to get Vec\ncontaining_types = {\n \"vector\": \"Vec\",\n \"veck\": \"Vec\",\n \"okay\": \"Ok\",\n \"result\": \"Result\",\n \"option\": \"Option\",\n \"reference count\": \"Rc\",\n \"arc\": \"Arc\",\n \"cell\": \"Cell\",\n \"ref cell\": \"RefCell\",\n \"mutex\": \"Mutex\",\n \"rw lock\": \"RwLock\",\n \"box\": \"Box\",\n}\n\n\nstandard_sync_types = {\n \"arc\": \"Arc\",\n \"barrier\": \"Barrier\",\n \"condition variable\": \"Condvar\",\n \"mutex\": \"Mutex\",\n \"once\": \"Once\",\n \"read write lock\": \"RwLock\",\n \"receiver\": \"Receiver\",\n \"sender\": \"Sender\",\n \"sink sender\": \"SyncSender\",\n}\n\nall_types = {\n **scalar_types,\n **compound_types,\n **standard_library_types,\n **standard_sync_types,\n}\n\nstandard_function_macros = {\n \"panic\": \"panic!\",\n \"concatenate\": \"concat!\",\n \"con cat\": \"concat!\",\n \"to do\": \"todo!\",\n \"debug\": \"dbg!\",\n \"sys call\": \"syscall!\",\n}\n\nstring_formatted_standard_function_macros = {\n \"format\": \"format!\",\n \"print\": \"print!\",\n \"print line\": \"println!\",\n \"error print line\": \"eprintln!\",\n \"write\": \"write!\",\n \"write line\": \"writeln!\",\n}\n\n\nstandard_array_macros = {\n \"vector\": \"vec!\",\n}\n\ncommon_implementations = {\n \"ok or\": \"ok_or\",\n \"ok or else\": \"ok_or_else\",\n \"unwrap\": \"unwrap\",\n \"await\": \"await\",\n \"some\": \"Some\",\n}\n\nstandard_block_macros = {\n \"macro rules\": \"macro_rules!\",\n}\n\nlogging_macros = {\n \"debug\": \"debug!\",\n \"info\": \"info!\",\n \"warning\": \"warn!\",\n \"warn\": \"warn!\",\n \"error\": \"error!\",\n}\n\ntesting_macros = {\n \"assert\": \"assert!\",\n \"assert equal\": \"assert_eq!\",\n \"assert not equal\": \"assert_ne!\",\n}\n\nerrno_values = {\n \"success\": \"ESUCCESS\", # 0\n \"permission denied\": \"EPERM\", # 1\n}\n\nerror_methods = {\"raw os error\": \"raw_os_error\"}\n\nall_string_formatted_functions_macros = {\n **string_formatted_standard_function_macros,\n **logging_macros,\n}\n\nall_function_macros = {\n **standard_function_macros,\n **testing_macros,\n}\n\nall_array_macros = {\n **standard_array_macros,\n}\n\nall_block_macros = {\n **standard_block_macros,\n}\n\nall_macros = {\n **all_function_macros,\n **all_array_macros,\n **all_block_macros,\n}\n\nall_function_macro_values = set(all_function_macros.values())\nall_array_macro_values = set(all_array_macros.values())\nall_block_macro_values = set(all_block_macros.values())\n\nclosure_traits = {\n \"closure\": \"Fn\",\n \"closure once\": \"FnOnce\",\n \"closure mutable\": \"FnMut\",\n}\n\nconversion_traits = {\n \"into\": \"Into\",\n \"from\": \"From\",\n}\n\niterator_traits = {\n \"iterator\": \"Iterator\",\n}\n\nall_traits = {\n **closure_traits,\n **conversion_traits,\n **iterator_traits,\n}\n\n\n# tag: libraries_gui\n\n# TODO: A lot of people refer to these as \"stood\" something, so we should possibly include a optional\n# \"stood\" prefix command disk for these\nstandard_imports = {\n \"atomic\": \"std::sync::atomic\",\n \"eye oh\": \"std::io\",\n \"file system\": \"std::fs\",\n \"F S\": \"std::fs\",\n \"path\": \"std::path\",\n \"envy\": \"std::env\",\n \"collections\": \"std::collections\",\n \"process\": \"std::process\",\n \"thread\": \"std::thread\",\n \"sync\": \"std::sync\",\n \"future\": \"std::future\",\n \"pin\": \"std::pin\",\n \"error\": \"std::error\",\n \"error error\": \"std::error::Error\",\n \"error kind\": \"std::io::ErrorKind\",\n \"from stir\": \"std::str::FromStr\",\n \"channel\": \"std::sync::mpsc\",\n}\ntokio_imports = {\"tracing\": \"tracing::{info};\"}\ncommon_imports = {\n \"glob\": \"glob::glob\",\n \"serde json\": \"serde_json::json\",\n \"serde\": \"serde::{Serialize, Deserialize}\",\n \"log\": \"log::{debug, error, info, warn}\",\n \"iterator tools\": \"itertools::Itertools\",\n \"iter tools\": \"itertools::Itertools\",\n \"lazy static\": \"lazy_static::lazy_static\",\n \"perfect hash map\": \"phf::phf_map\",\n \"follow redirects\": \"follow_redirects::ClientExt\",\n \"clap parser\": \"clap::{App, Arg, ArgMatches, Parser, SubCommand}\",\n}\n\nctx.lists[\"user.code_libraries\"] = {\n **standard_imports,\n **tokio_imports,\n **common_imports,\n}\n\n\n# tag: functions_common\nctx.lists[\"user.code_common_function\"] = {\n \"drop\": \"drop\",\n \"catch unwind\": \"catch_unwind\",\n \"iterator\": \"iter\",\n \"into iterator\": \"into_iter\",\n \"into iter\": \"into_iter\",\n \"from iterator\": \"from_iter\",\n \"from iter\": \"from_iter\",\n \"as stir\": \"as_str\",\n \"to string\": \"to_string\",\n \"to string lossy\": \"to_string_lossy\",\n \"to stir\": \"to_str\",\n \"as bytes\": \"as_bytes\",\n \"to bytes\": \"to_bytes\",\n \"as pointer\": \"as_ptr\",\n \"as mutable pointer\": \"as_mut_ptr\",\n \"as reference\": \"as_ref\",\n \"as ref\": \"as_ref\",\n \"as mute\": \"as_mut\",\n \"is some\": \"is_some\",\n \"is none\": \"is_none\",\n \"is ok\": \"is_ok\",\n \"is error\": \"is_err\",\n \"is empty\": \"is_empty\",\n \"to path buf\": \"to_path_buf\",\n \"unwrap\": \"unwrap\",\n \"unwrap or\": \"unwrap_or\",\n \"unwrap or else\": \"unwrap_or_else\",\n \"expect\": \"expect\",\n \"to vec\": \"to_vec\",\n \"to vector\": \"to_vec\",\n \"trim\": \"trim\",\n \"split white space\": \"split_whitespace\",\n \"display\": \"display\",\n \"or insert\": \"or_insert\",\n \"or insert with\": \"or_insert_with\",\n \"cloned\": \"cloned\",\n \"clone\": \"clone\",\n \"is digit\": \"is_digit\",\n \"is alphanum\": \"is_alphanumeric\",\n \"is ascii\": \"is_ascii\",\n \"is ascii hex digit\": \"is_ascii_hex_digit\",\n \"in to\": \"into\",\n **common_implementations,\n **all_macros,\n}\n\n# tag: functions\nctx.lists[\"user.code_type\"] = all_types\n\n# rust specific grammar\nctx.lists[\"user.code_type_modifier\"] = {\n \"mutable\": \"mut \",\n \"mute\": \"mut \",\n \"borrowed\": \"&\",\n \"borrowed mutable\": \"&mut \",\n \"borrowed mute\": \"&mut \",\n \"mutable borrowed\": \"&mut \",\n \"mute borrowed\": \"&mut \",\n \"dynamic\": \"dyn \",\n \"dine\": \"dyn \",\n}\n\nctx.lists[\"user.rust_crates\"] = {\n \"native T L S\": \"native_tls\",\n \"hyper\": \"hyper\",\n \"tokyo\": \"tokio\",\n \"futures\": \"futures\",\n \"async standard\": \"async_std\",\n \"follow redirects\": \"follow_redirects\",\n \"log\": \"log\",\n \"request\": \"reqwest\",\n \"clap\": \"clap\",\n \"cap stone\": \"capstone\",\n \"key stone\": \"keystone_engine\", # official crate is buggy\n \"goblin\": \"goblin\",\n \"random\": \"rand\",\n \"walk dir\": \"walkdir\",\n \"log\": \"log\",\n \"open S S L\": \"openssl\",\n \"serde\": \"serde\",\n \"serde JSON\": \"serde_json\",\n \"thirty four\": \"thirtyfour\",\n \"simple log\": \"simplelog\",\n \"async recursion\": \"async_recursion\",\n \"serde\": \"serde\",\n \"serde json\": \"serde_json\",\n \"ray on\": \"rayon\",\n \"shaw two\": \"sha2\",\n \"glob\": \"glob\",\n \"iter tools\": \"itertools\",\n \"lazy static\": \"lazy_static\",\n \"which\": \"which\",\n \"base sixty four\": \"base64\",\n \"regex\": \"regex\",\n}\n\nctx.lists[\"user.rust_toolchains\"] = {\n \"stable\": \"stable\",\n \"nightly\": \"nightly\",\n \"beta\": \"beta\",\n}\n\n# TODO: These are a little loose with the architecture's atm\nctx.lists[\"user.rust_targets\"] = {\n \"windows M S V C\": \"x86_64-pc-windows-msvc\",\n \"windows G N U\": \"x86_64-pc-windows-gnu\",\n \"mac O S\": \"x86_64-apple-darwin\",\n \"mac O S arm\": \"aarch64-apple-darwin\",\n \"linux\": \"x86_64-unknown-linux-gnu\",\n \"linux muscle\": \"x86_64-unknown-linux-musl\",\n \"linux arm sixty four\": \"aarch64-unknown-linux-gnu\",\n \"linux muscle arm sixty four\": \"aarch64-unknown-linux-musl\",\n \"linux arm\": \"armv7-unknown-linux-gnueabihf\",\n \"linux muscle arm\": \"armv7-unknown-linux-musleabihf\",\n}\n\nctx.lists[\"user.formatted_functions\"] = {**all_string_formatted_functions_macros}\n\n\nctx.lists[\"user.closed_format_strings\"] = {\n \"hex\": r\"{:#x}\",\n \"octal\": r\"{:#o}\",\n \"binary\": r\"{:#b}\",\n \"decimal\": r\"{:#}\",\n \"float\": r\"{:.2}\",\n \"debug\": r\"{:?}\",\n}\n\nctx.lists[\"user.inner_format_strings\"] = {\n \"hex\": r\":#x\",\n \"octal\": r\":#o\",\n \"binary\": r\":#b\",\n \"decimal\": r\":#\",\n \"float\": r\":.2\",\n \"debug\": r\":?\",\n}\n\n\nctx.lists[\"user.code_macros\"] = all_macros\n\nctx.lists[\"user.code_trait\"] = all_traits\n\nctx.lists[\"user.code_containing_types\"] = {**containing_types}\n\n\n@ctx.action_class(\"user\")\nclass UserActions:\n # tag: comment_line\n\n def code_comment_line_prefix():\n actions.auto_insert(\"// \")\n\n # tag: comment_documentation\n\n def code_comment_documentation():\n actions.auto_insert(\"/// \")\n\n # tag: imperative\n\n def code_block():\n actions.auto_insert(\"{}\")\n actions.edit.left()\n actions.key(\"enter\")\n\n def code_state_if():\n actions.auto_insert(\"if \")\n\n def code_state_else_if():\n actions.auto_insert(\" else if \")\n\n def code_state_else():\n actions.user.insert_between(\" else { \", \" }\")\n\n def code_state_switch():\n actions.auto_insert(\"match \")\n\n def code_state_for():\n actions.auto_insert(\"for in {}\")\n actions.edit.left()\n actions.key(\"enter\")\n actions.edit.up()\n actions.edit.line_end()\n repeat_call(6, actions.edit.left)\n\n def code_state_while():\n actions.auto_insert(\"while {}\")\n actions.edit.left()\n actions.key(\"enter\")\n actions.edit.up()\n actions.edit.line_end()\n repeat_call(2, actions.edit.left)\n\n def code_state_infinite_loop():\n actions.user.insert_between(\"loop {\", \"}\")\n actions.key(\"enter\")\n\n def code_state_return():\n actions.auto_insert(\"return \")\n\n def code_break():\n actions.auto_insert(\"break;\")\n\n def code_next():\n actions.auto_insert(\"continue;\")\n\n # tag: object_oriented\n\n def code_operator_object_accessor():\n actions.auto_insert(\".\")\n\n def code_self():\n actions.auto_insert(\"self\")\n\n def code_define_class():\n actions.auto_insert(\"struct \")\n\n # tag: data_bool\n\n def code_insert_true():\n actions.auto_insert(\"true\")\n\n def code_insert_false():\n actions.auto_insert(\"false\")\n\n # tag: data_null\n\n def code_insert_null():\n actions.auto_insert(\"None\")\n\n def code_insert_is_null():\n actions.auto_insert(\".is_none()\")\n\n def code_insert_is_not_null():\n actions.auto_insert(\".is_some()\")\n\n # tag: functions\n\n def code_default_function(text: str):\n actions.user.code_private_function(text)\n\n def code_private_function(text: str):\n actions.auto_insert(\"fn \")\n formatter = settings.get(\"user.code_private_function_formatter\")\n function_name = actions.user.formatted_text(text, formatter)\n actions.user.code_insert_function(function_name, None)\n\n def code_protected_function(text: str):\n actions.auto_insert(\"pub(crate) fn \")\n formatter = settings.get(\"user.code_protected_function_formatter\")\n function_name = actions.user.formatted_text(text, formatter)\n actions.user.code_insert_function(function_name, None)\n\n def code_public_function(text: str):\n actions.auto_insert(\"pub fn \")\n formatter = settings.get(\"user.code_public_function_formatter\")\n function_name = actions.user.formatted_text(text, formatter)\n actions.user.code_insert_function(function_name, None)\n\n def code_insert_type_annotation(type: str):\n actions.auto_insert(f\": {type}\")\n\n def code_insert_return_type(type: str):\n actions.auto_insert(f\" -> {type}\")\n\n # tag: functions_common\n\n def code_insert_function(text: str, selection: str):\n code_insert_function_or_macro(text, selection, \"(\", \")\")\n\n # tag: libraries\n\n def code_import():\n actions.auto_insert(\"use \")\n\n # tag: libraries_gui\n\n def code_insert_library(text: str, selection: str):\n actions.user.paste(f\"use {text}\")\n\n # tag: operators_array\n\n def code_operator_subscript():\n actions.auto_insert(\"[]\")\n actions.edit.left()\n\n # tag: code_operators_assignment\n\n def code_operator_assignment():\n actions.auto_insert(\" = \")\n\n def code_operator_subtraction_assignment():\n actions.auto_insert(\" -= \")\n\n def code_operator_addition_assignment():\n actions.auto_insert(\" += \")\n\n def code_operator_multiplication_assignment():\n actions.auto_insert(\" *= \")\n\n def code_operator_division_assignment():\n actions.auto_insert(\" /= \")\n\n def code_operator_modulo_assignment():\n actions.auto_insert(\" %= \")\n\n def code_operator_bitwise_and_assignment():\n actions.auto_insert(\" &= \")\n\n def code_operator_bitwise_or_assignment():\n actions.auto_insert(\" |= \")\n\n def code_operator_bitwise_exclusive_or_assignment():\n actions.auto_insert(\" ^= \")\n\n def code_operator_bitwise_left_shift_assignment():\n actions.auto_insert(\" <<= \")\n\n def code_operator_bitwise_right_shift_assignment():\n actions.auto_insert(\" >>= \")\n\n # tag: operators_bitwise\n\n def code_operator_bitwise_and():\n actions.auto_insert(\" & \")\n\n def code_operator_bitwise_or():\n actions.auto_insert(\" | \")\n\n def code_operator_bitwise_exclusive_or():\n actions.auto_insert(\" ^ \")\n\n def code_operator_bitwise_left_shift():\n actions.auto_insert(\" << \")\n\n def code_operator_bitwise_right_shift():\n actions.auto_insert(\" >> \")\n\n # tag: operators_math\n\n def code_operator_subtraction():\n actions.auto_insert(\" - \")\n\n def code_operator_addition():\n actions.auto_insert(\" + \")\n\n def code_operator_multiplication():\n actions.auto_insert(\" * \")\n\n def code_operator_exponent():\n actions.auto_insert(\".pow()\")\n actions.edit.left()\n\n def code_operator_division():\n actions.auto_insert(\" / \")\n\n def code_operator_modulo():\n actions.auto_insert(\" % \")\n\n def code_operator_equal():\n actions.auto_insert(\" == \")\n\n def code_operator_not_equal():\n actions.auto_insert(\" != \")\n\n def code_operator_greater_than():\n actions.auto_insert(\" > \")\n\n def code_operator_greater_than_or_equal_to():\n actions.auto_insert(\" >= \")\n\n def code_operator_less_than():\n actions.auto_insert(\" < \")\n\n def code_operator_less_than_or_equal_to():\n actions.auto_insert(\" <= \")\n\n def code_operator_and():\n actions.auto_insert(\" && \")\n\n def code_operator_or():\n actions.auto_insert(\" || \")\n\n def code_operator_increment():\n actions.auto_insert(\" += 1\")\n\n # rust specific grammar\n\n def code_operator_structure_dereference():\n actions.auto_insert(\"*\")\n\n def code_insert_if_let_some():\n actions.user.insert_between(\"if let Some(\", \")\")\n\n def code_insert_if_let_okay():\n actions.user.insert_between(\"if let Ok(\", \")\")\n\n def code_insert_if_let_error():\n actions.user.insert_between(\"if let Err(\", \")\")\n\n def code_state_implements():\n actions.auto_insert(\"impl {}\")\n actions.edit.left()\n actions.key(\"enter\")\n actions.edit.up()\n actions.edit.line_end()\n repeat_call(2, actions.edit.left)\n\n def code_insert_trait_annotation(type: str):\n actions.auto_insert(f\": impl {type}\")\n\n def code_insert_return_trait(type: str):\n actions.auto_insert(f\" -> impl {type}\")\n\n def code_insert_macro(text: str, selection: str):\n if text in all_array_macro_values:\n code_insert_function_or_macro(text, selection, \"[\", \"]\")\n elif text in all_block_macro_values:\n code_insert_function_or_macro(text, selection, \"{\", \"}\")\n else:\n code_insert_function_or_macro(text, selection, \"(\", \")\")\n\n def code_state_unsafe():\n actions.user.insert_between(\"unsafe {\", \"}\")\n actions.key(\"enter\")\n\n def code_comment_documentation_block():\n actions.user.insert_between(\"/**\", \"*/\")\n actions.key(\"enter\")\n\n def code_comment_documentation_inner():\n actions.auto_insert(\"//! \")\n\n def code_comment_documentation_block_inner():\n actions.user.insert_between(\"/*!\", \"*/\")\n actions.key(\"enter\")\n\n\ndef code_insert_function_or_macro(\n text: str,\n selection: str,\n left_delim: str,\n right_delim: str,\n):\n if selection:\n out_text = text + f\"{left_delim}{selection}{right_delim}\"\n else:\n out_text = text + f\"{left_delim}{right_delim}\"\n actions.user.paste(out_text)\n actions.edit.left()\n\n\nRT = TypeVar(\"RT\") # return type\n\n\ndef repeat_call(n: int, f: Callable[..., RT], *args: Any, **kwargs: Any):\n for i in range(n):\n f(*args, **kwargs)\n","sub_path":"lang/rust/rust.py","file_name":"rust.py","file_ext":"py","file_size_in_byte":20543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"81585961","text":"import random as r\nimport math\n\n# Sigmoid\ndef sigmoid(x):\n return 1/(1 + math.exp(-x))\n\n# softmax\ndef softmax(x):\n return x/(math.fabs(x) + 1)\n\n\n# return tag for low energy, neutral, upbeat, ambivalent\ndef mood_spectrum(jassObject):\n if jassObject['joy'] > 3 and jassObject['sorrow'] < 3:\n return 'upbeat'\n if jassObject['sorrow'] > 2 or jassObject['joy'] < 4:\n return 'neutral'\n if jassObject['anger'] > 3 or jassObject['joy'] > 3:\n return 'phase'\n else:\n return 'ambivalent'\n\n# mood factor -- multiplier for spotify levels (0 - 1); 1 = positive\ndef mood_factor(tag):\n if tag == 'upbeat':\n return 0.5\n if tag == 'neutral':\n return (.01 * r.randint(0,10))\n if tag == 'phase':\n return (.0069 * r.randint(1,5))\n if tag == 'ambivalent':\n return (.00042069 * r.randint(1,69))\n\n# direct multipliers\n#step 1\ndef spotifySliders(jassObject):\n \n joy, anger, sorrow, surprise = jassObject.values()\n trackParameters = { 'danceability' : 0,\n 'energy' : 0,\n 'valence' : 0,\n 'acoustic' : 0,\n 'liveness' : 0\n }\n\n mFactor = mood_factor(mood_spectrum(jassObject)) * r.randint(-1,1)\n print('mFactor : ', mFactor)\n trackParameters['danceability'] = r.uniform(sigmoid(joy/5 + softmax(anger * mFactor)), .9) + mFactor * 1/sorrow * 0.5\n \n trackParameters['energy'] = math.fabs(r.uniform((0.8 * (joy/5 + anger/5) + mFactor * (anger/joy)) * .7, .99))\n\n trackParameters['valence'] = math.fabs(((sorrow/5) * .5) + ((1/joy) * .3) + r.uniform(softmax(anger), 1) * mFactor)\n trackParameters['acoustic'] = r.uniform((math.fabs(sorrow/5 + .2 * joy/5)* 0.75), .9)\n\n trackParameters['liveness'] = r.uniform(math.fabs(softmax(sorrow)), .8) + mFactor * r.uniform(sigmoid(joy) * 0.4, .7)\n\n # goes into spotify's api call\n return trackParameters","sub_path":"number_maps.py","file_name":"number_maps.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"591299053","text":"from Bridge import Bridge\r\nfrom Conditions import Conditions\r\n\r\nclass Calculator():\r\n def __init__(self):\r\n self.bridge=Bridge()\r\n self.cond=Conditions()\r\n self.calc_winrate()\r\n self.dv=[]\r\n self.dp=[]\r\n self.dt=[]\r\n self.ds=[]\r\n self.dc=[]\r\n self.dcE=[]\r\n self.dte=[]\r\n def calc_winrate(self):\r\n self.win=Bridge().get_numberofwins()\r\n self.winrate=(self.win/(Bridge().rows-1)*100)\r\n return f\"Le pourcentage de victoire de la base de donnée est: {self.winrate}\"\r\n\r\n def valid_conditional_games(self,date,patch,time,side,champ,champE,team):\r\n for i in range (len(self.bridge.games)):\r\n if self.cond.Dateon==True:\r\n if self.bridge.games[i].date==date:\r\n self.dv.append(True)\r\n else: self.dv.append(False)\r\n else: self.dv.append(True)\r\n if self.cond.Patchon==True:\r\n if self.bridge.games[i].patch==patch:\r\n self.dp.append(True)\r\n else: self.dp.append(False)\r\n else: self.dp.append(True)\r\n if self.cond.Timeon==True:\r\n if self.bridge.games[i].time==time:\r\n self.dt.append(True)\r\n else: self.dt.append(False)\r\n else: self.dt.append(True)\r\n if self.cond.Sideon==True:\r\n if self.bridge.games[i].side==side:\r\n self.ds.append(True)\r\n else: self.ds.append(False)\r\n else: self.ds.append(True)\r\n if self.cond.Championon==True:\r\n if self.bridge.games[i].top==champ or self.bridge.games[i].jun==champ or self.bridge.games[i].mid==champ or self.bridge.games[i].adc==champ or self.bridge.games[i].supp==champ:\r\n self.dc.append(True)\r\n else: self.dc.append(False)\r\n else: self.dc.append(True)\r\n if self.cond.Ennemychampon==True:\r\n if self.bridge.games[i].e_top==champE or self.bridge.games[i].e_jun==champE or self.bridge.games[i].e_mid==champE or self.bridge.games[i].e_adc==champE or self.bridge.games[i].e_supp==champE:\r\n self.dcE.apppend(True)\r\n else: self.dcE.append(False)\r\n else: self.dcE.append(True)\r\n if self.cond.Ennemyteamon==True:\r\n if self.bridge.games[i].patch==team:\r\n self.dte.append(True)\r\n else: self.dte.append(False)\r\n else: self.dte.append(True)\r\n def valid_conditional_games_choice(self):\r\n self.valid_games=[]\r\n for i in range (len (self.bridge.games)):\r\n if all([self.dv[i],self.dp[i],self.dt[i],self.ds[i],self.dc[i],self.dcE[i],self.dte[i]]):\r\n self.valid_games.append(self.bridge.games[i])\r\n\r\n def calc_conditionnal_winrate(self):\r\n if self.valid_games==[]:\r\n return \"Aucune partie ne correspond au critère choisis\"\r\n else:\r\n self.cwin=0\r\n for i in range (len(self.valid_games)):\r\n if self.valid_games[i].win==\"Win\":\r\n self.cwin+=1\r\n self.cwinrate=(self.cwin/len(self.valid_games)*100)\r\n return f\"Le pourcentage de victoire avec ces conditions est {self.cwinrate}\"\r\n","sub_path":"ScrimDataAnalyse/Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"54312451","text":"import os\nimport re\nimport codecs\nfrom . import CommandLineHandler\n\n\ndef ManageWrapper(\n script_file_name_is,\n add_content_from_function,\n source_path_is='',\n dump_file_name_is='',\n content_file_name_is='',\n parent_manage_script_is='manage_main.py',\n base_file_name_is='base.html',\n isMain=False):\n\n keyword = os.path.basename(script_file_name_is).replace('update_', '').replace('.py', '')\n new_source_path = os.path.dirname(script_file_name_is) if source_path_is == '' else source_path_is\n new_dump_file_path = '../'+keyword+'.html' if dump_file_name_is == '' else dump_file_name_is\n new_content_path = 'contents_'+keyword+'.html' if content_file_name_is == '' else content_file_name_is\n\n page = Managing(\n source_path_is=new_source_path,\n content_replace_keywords_with=add_content_from_function,\n dump_file_name_is=new_dump_file_path,\n content_file_name_is=new_content_path,\n script_file_name_is=script_file_name_is,\n base_file_name_is=base_file_name_is,\n )\n\n if isMain:\n page.execute_as_main(parent_manage_script_is=parent_manage_script_is)\n else:\n page.execute_as_module()\n\n\nclass Managing:\n def __init__(self, source_path_is, content_replace_keywords_with, dump_file_name_is, content_file_name_is, script_file_name_is, base_file_name_is='base.html'):\n self._dump_file_name = dump_file_name_is\n self._content_file_name = content_file_name_is\n self._script_file_name = script_file_name_is\n self._base_file_name = base_file_name_is\n self._path = source_path_is\n self._content_populator = content_replace_keywords_with\n self._command_line_handler = CommandLineHandler.CommandLineHandler(self.merge_and_dump, self.get_dump_file_name())\n\n def get_dump_file_name(self):\n return self._dump_file_name\n\n def get_dump_file_path(self):\n return os.path.join(self._path, self._dump_file_name)\n\n def get_content_file_name(self):\n return self._content_file_name\n\n def get_content_file_path(self):\n return os.path.join(self._path, self._content_file_name)\n\n def get_script_file_name(self):\n return self._script_file_name\n\n def get_script_file_path(self):\n return os.path.join(self._path, self._script_file_name)\n\n def get_base_file_name(self):\n return self._base_file_name\n\n def get_base_file_path(self):\n return os.path.join(self._path, self._base_file_name)\n\n def get_project_path(self):\n return self._path\n\n def execute_as_main(self, parent_manage_script_is):\n self._command_line_handler.main_exec_interface(\n parent_manage_script=parent_manage_script_is,\n )\n\n def execute_as_module(self):\n self._command_line_handler.sub_exec_interface()\n\n def merge_and_dump(self):\n with codecs.open(self.get_content_file_path(), mode='r', encoding='utf-8') as content, codecs.open(self.get_base_file_path(), mode='r', encoding='utf-8') as base:\n base_structure = re.split('({{[\\s\\w]*}})', base.read())\n contents_structure = re.split('({{[\\s\\w]*}})', content.read())\n\n configurations = {}\n configurations['title'] = ''\n configurations['style'] = ''\n configurations['hasLeft'] = False\n configurations['hasBody'] = False\n configurations['hasRight'] = False\n configurations['left_content_left_tag'] = ''\n configurations['left_content_right_tag'] = ''\n configurations['right_content_left_tag'] = ''\n configurations['right_content_right_tag'] = ''\n\n self._content_populator(configurations, base_structure, contents_structure, self.get_project_path())\n self.replaceBaseWithContent(configurations, base_structure, contents_structure)\n with codecs.open(self.get_dump_file_path(), mode='w', encoding=\"utf-8\") as target:\n target.write(''.join(base_structure))\n\n\n def replaceBaseWithContent(self, configurations, base_structure, contents_structure):\n\n\n base_structure[base_structure.index('{{PAGE_TITLE}}')] = configurations['title']\n base_structure[base_structure.index('{{STYLE}}')] = configurations['style']\n\n left_content_marking = base_structure.index('{{LEFT_CONTENT}}')\n if configurations['hasLeft']:\n left_content_start_idx = contents_structure.index('{{LEFT_CONTENT_START}}')\n contents_structure[left_content_start_idx] = ''\n left_content_end_idx = contents_structure.index('{{LEFT_CONTENT_END}}')\n contents_structure[left_content_end_idx] = ''\n base_structure[left_content_marking:left_content_marking + 1] = [configurations['left_content_left_tag']] + contents_structure[left_content_start_idx:left_content_end_idx + 1] + [configurations['left_content_right_tag']]\n else:\n base_structure[left_content_marking] = ''\n\n body_content_marking = base_structure.index('{{BODY_CONTENT}}')\n if configurations['hasBody']:\n body_content_start_idx = contents_structure.index('{{BODY_CONTENT_START}}')\n contents_structure[body_content_start_idx] = ''\n body_content_end_idx = contents_structure.index('{{BODY_CONTENT_END}}')\n contents_structure[body_content_end_idx] = ''\n base_structure[body_content_marking:body_content_marking + 1] = contents_structure[body_content_start_idx:body_content_end_idx + 1]\n else:\n base_structure[body_content_marking] = ''\n\n right_content_marking = base_structure.index('{{RIGHT_CONTENT}}')\n if configurations['hasRight']:\n right_content_start_idx = contents_structure.index('{{RIGHT_CONTENT_START}}')\n contents_structure[right_content_start_idx] = ''\n right_content_end_idx = contents_structure.index('{{RIGHT_CONTENT_END}}')\n contents_structure[right_content_end_idx] = ''\n base_structure[right_content_marking:right_content_marking + 1] = [configurations['right_content_left_tag']] + contents_structure[right_content_start_idx:right_content_end_idx + 1] + [configurations['right_content_right_tag']]\n else:\n base_structure[right_content_marking] = ''\n","sub_path":"Sources/utils/Managing.py","file_name":"Managing.py","file_ext":"py","file_size_in_byte":6324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"623850114","text":"# Class that represent a queue data-structure using an array as implementation\nclass ArrayQueue(object):\n\n frontIndex = ... # type: int\n backIndex = ... # type: int\n capacity = ... # type: int\n array = ... # type: [object]\n\n # Constructor\n def __init__(self, capacity = 10):\n self.frontIndex = 0\n self.backIndex = 0\n\n self.capacity = capacity\n self.array = [None] * capacity\n\n # Add a new element into the queue\n def add(self, data):\n\n # If the queue is full, then create a new array with double size\n # Then copy each element from the old array to the new one\n if self.getSize() == self.capacity:\n self.capacity = 2 * self.capacity\n newArray = [None] * self.capacity\n\n for i in range(0, self.backIndex):\n newArray[i] = self.array[i]\n self.array = newArray\n\n # Add the new array\n self.array[self.backIndex] = data\n self.backIndex +=1\n\n # Removes the first element of the queue\n def remove(self):\n size = self.getSize()\n\n # If the queue has no elements\n if size == 0:\n return\n\n # If queue has one element\n if size == 1:\n self.frontIndex = 0\n self.backIndex = 0\n return\n\n self.array[self.frontIndex] = None\n self.frontIndex += 1\n\n # Peek the element from the front of the queue\n def peek(self):\n if self.getSize() == 0:\n return\n\n return self.array[self.frontIndex]\n\n # Return the size of the queue\n def getSize(self):\n return self.backIndex - self.frontIndex\n\n # Return the capacity of the queue\n def getCapacity(self):\n return self.capacity\n\n # Print the elements of the queue\n def print(self):\n for i in range(self.frontIndex, self.backIndex):\n print(self.array[i])\n\n\narrayQueue = ArrayQueue()\n\n# Test 1\nprint(\"--- Test 1 ---\")\narrayQueue.add(11)\narrayQueue.add(13)\narrayQueue.add(15)\narrayQueue.print()\n\n# Test 2\nprint(\"--- Test 2 ---\")\narrayQueue.remove()\narrayQueue.remove()\narrayQueue.print()\n\n# Test 3\nprint(\"--- Test 3 ---\")\narrayQueue.remove()\nprint(\"Size of the queue: %s\" % arrayQueue.getSize())\narrayQueue.add(13)\narrayQueue.add(1302)\narrayQueue.print()\n\n# Test 4\nprint(\"--- Test 4 ---\")\nprint(\"Size of the queue: %s\" % arrayQueue.getSize())\nprint(\"First element of the queue: %s\" % arrayQueue.peek())\narrayQueue.print()\n","sub_path":"Exercises/Queues/ArrayQueue.py","file_name":"ArrayQueue.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"136660966","text":"from __future__ import print_function\n__docformat__ = 'restructedtext en'\nimport six.moves.cPickle as pickle\nimport os\nimport sys\nimport timeit\nimport json\nimport numpy\nimport theano\nimport theano.tensor as T\nfrom ann.lgrg.logistic_regression import LogisticRegression\nfrom ann.loader.mnist_loader import MnistLoader\nfrom projects.ktnc.ktnc_loader import Ktnc_Loader\nfrom ann.mlp.mlp import MLP \nimport app_global as ag\n\nclass Mlp_Ktnc_Engine(object):\n def __init__(self):\n print(\"create Kaggle Titanic Model\")\n self.learning_rate = 0.0001\n self.L1_reg= 0.0\n self.L2_reg=0#0.0001\n self.n_epochs=1000000\n self.batch_size=16 # 20\n layer_nums = 3\n self.n_in = 7\n self.n_hidden = 10\n self.n_out = 2\n self.model_file = 'ktnc_1.pkl'\n self.dataset = 'mnist.pkl.gz'\n\n def build_model(self):\n loader = Ktnc_Loader()\n train_set_x, train_set_y, valid_set_x, valid_set_y, \\\n test_set_x, test_set_y = loader.load_data()\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] \\\n // self.batch_size\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] \\\n // self.batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] \\\n // self.batch_size\n print('b=%d' % n_train_batches)\n print('... building the model')\n index = T.lscalar() # index to a [mini]batch\n x = T.matrix('x') # the data is presented as rasterized images\n y = T.ivector('y') # the labels are presented as 1D vector of\n rng = numpy.random.RandomState(1234)\n '''\n # 全新运行时\n classifier0 = pickle.load(open('/home/osboxes/dev/wky/work/' + \\\n wky/repository/ktnc_5_1.pkl', 'rb'))\n print('读入已有模型')\n classifier = MLP(\n rng=rng,\n input=x,\n n_in=self.n_in,\n n_hidden=self.n_hidden,\n n_out=self.n_out,\n hW = classifier0.hiddenLayer.W,\n hb = classifier0.hiddenLayer.b,\n W = classifier0.logRegressionLayer.W.get_value(borrow=True),\n b = classifier0.logRegressionLayer.b.get_value(borrow=True)\n )\n '''\n print('使用全新模型')\n classifier = MLP(\n rng=rng,\n input=x,\n n_in=self.n_in,\n n_hidden=self.n_hidden,\n n_out=self.n_out\n )\n cost = (\n classifier.negative_log_likelihood(y)\n + self.L1_reg * classifier.L1\n + self.L2_reg * classifier.L2_sqr\n )\n test_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: test_set_x[index * self.batch_size:(index + 1) \\\n * self.batch_size],\n y: test_set_y[index * self.batch_size:(index + 1) \\\n * self.batch_size]\n }\n )\n tv_model = theano.function(\n inputs = [index],\n outputs = classifier.errors(y),\n givens = {\n x: train_set_x[index * self.batch_size:(index+1) \\\n *self.batch_size],\n y: train_set_y[index * self.batch_size:(index+1) \\\n *self.batch_size]\n }\n )\n validate_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: valid_set_x[index * self.batch_size:(index + 1) \\\n * self.batch_size],\n y: valid_set_y[index * self.batch_size:(index + 1) \\\n * self.batch_size]\n }\n )\n gparams = [T.grad(cost, param) for param in classifier.params]\n updates = [\n (param, param - self.learning_rate * gparam)\n for param, gparam in zip(classifier.params, gparams)\n ]\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * self.batch_size: (index + 1) \\\n * self.batch_size],\n y: train_set_y[index * self.batch_size: (index + 1) \\\n * self.batch_size]\n }\n )\n return (classifier, n_train_batches, n_valid_batches, \n n_test_batches, train_model, validate_model, \n test_model, tv_model)\n\n def train(self):\n classifier, n_train_batches, n_valid_batches, n_test_batches, \\\n train_model, validate_model, test_model, tv_model \\\n = self.build_model()\n print('... training')\n patience = 5000000*500 \n patience_increase = 12 \n improvement_threshold = 0.995 \n validation_frequency = min(n_train_batches, patience // 2)\n best_validation_loss = numpy.inf\n best_iter = 0\n test_score = 0.\n start_time = timeit.default_timer()\n epoch = 0\n done_looping = False\n while (epoch < self.n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in range(n_train_batches):\n minibatch_avg_cost = train_model(minibatch_index)\n iter = (epoch - 1) * n_train_batches + minibatch_index\n if (iter + 1) % validation_frequency == 0:\n validation_losses = [validate_model(i) for i\n in range(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n tv_losses = [tv_model(i) for i in range(\\\n n_train_batches)]\n m_tv_losses = numpy.mean(tv_losses)\n print(\n '%i, %i/%i, t %f %%(%f) : v: %f %%' \n %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n minibatch_avg_cost * 100,\n m_tv_losses * 100.,\n this_validation_loss * 100.\n )\n )\n this_validation_loss = (this_validation_loss + \\\n m_tv_losses)/2.0\n if this_validation_loss < best_validation_loss:\n if (\n this_validation_loss < best_validation_loss *\n improvement_threshold\n ):\n patience = max(patience, \n iter * patience_increase)\n best_validation_loss = this_validation_loss\n best_iter = iter\n test_losses = [test_model(i) for i\n in range(n_test_batches)]\n test_score = numpy.mean(test_losses)\n with open(ag.ann_mf_dir + self.model_file, \\\n 'wb') as f:\n pickle.dump(classifier, f)\n print(('##epoch %i, minibatch %i/%i, test error of '\n 'best model %f %%') %\n (epoch, minibatch_index + 1, n_train_batches,\n this_validation_loss * 100.))\n self.learning_rate = float(input('learning_rate:'))\n if patience <= iter:\n done_looping = False\n break\n end_time = timeit.default_timer()\n print(('Optimization complete. Best validation score of %f %% '\n 'obtained at iteration %i, with test performance %f %%') %\n (best_validation_loss * 100., best_iter + 1, \n test_score * 100.))\n print(('The code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time) / 60.)), \n file=sys.stderr)\n\n def run(self):\n classifier = pickle.load(open(self.model_file, 'rb'))\n predict_model = theano.function(\n inputs=[classifier.input],\n outputs=classifier.logRegressionLayer.y_pred\n )\n dataset = self.dataset\n loader = MnistLoader()\n datasets = loader.load_data(dataset)\n test_set_x, test_set_y = datasets[2]\n test_set_x = test_set_x.get_value()\n predicted_values = predict_model(test_set_x[:10])\n print(\"Predicted values for the first 10 examples in test set:\")\n print(predicted_values)\n\n def predict(self, samples):\n model_file = '/home/osboxes/dev/wky/work/wky/repository/' +\\\n 'ktnc_1.pkl'\n classifier = pickle.load(open(model_file, 'rb'))\n predict_model = theano.function(\n inputs = [classifier.input],\n outputs = classifier.logRegressionLayer.y_pred\n )\n return predict_model(samples)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"projects/ktnc/mlp_ktnc_engine.py","file_name":"mlp_ktnc_engine.py","file_ext":"py","file_size_in_byte":9386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"33607018","text":"import numpy as np\nimport scipy.io as sio\nimport os\nimport re\nimport cPickle\n\ndef mat2array(male,female):\n # concatenate two gender information\n people = np.concatenate([male,female])\n # remove direct current\n people_mean = np.mean(people,axis=0)\n people_process = np.zeros(people.shape)\n for i in range(people.shape[0]):\n people_process[i,:] = people[i,:]-people_mean\n # set covariance of each element = 1\n people_cov=np.zeros((people.shape[1]))\n for i in range(people.shape[1]):\n people_cov[i] = np.cov(people[:,i])\n people_cov_sqrt = np.sqrt(people_cov)\n people_final = np.zeros(people.shape)\n for i in range(people.shape[0]):\n people_final[i,:] = np.divide(people_process[i,:],people_cov_sqrt)\n \n data = people_final\n label = np.concatenate([np.ones((male.shape[0])),np.zeros((female.shape[0]))]).astype('int')\n return data,label\n \nname = 'trainfull.mat'\nmat = sio.loadmat(name)\nmale = mat['male_train_full']\nfemale = mat['female_train_full']\ndata,label = mat2array(male,female)\nwith open('trainfull.array','wb') as f:\n cPickle.dump(data,f)\n cPickle.dump(label,f)\n\nname = 'trainpart.mat'\nmat = sio.loadmat(name)\nmale = mat['male_train_part']\nfemale = mat['female_train_part']\ndata,label = mat2array(male,female)\nwith open('trainpart.array','wb') as f:\n cPickle.dump(data,f)\n cPickle.dump(label,f)\n\nname = 'test.mat'\nmat = sio.loadmat(name)\nmale = mat['male_test']\nfemale = mat['female_test']\ndata,label = mat2array(male,female)\nwith open('test.array','wb') as f:\n cPickle.dump(data,f)\n cPickle.dump(label,f)\n","sub_path":"hw2/mlp/mat2array.py","file_name":"mat2array.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"546431524","text":"def ft_len(st):\r\n kol = 0\r\n for i in st:\r\n kol += 1\r\n return kol\r\n\r\n\r\ndef ft_join(x, se=\" \"):\r\n res = ''\r\n for i in range(ft_len(x)):\r\n if i == ft_len(x) - 1:\r\n res += x[i]\r\n else:\r\n res += x[i] + se\r\n return res\r\n\r\n# print(ft_join([\"a\", \"s\", \"d\"], \", \"))\r\n","sub_path":"ft_join.py","file_name":"ft_join.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"22312651","text":"import numpy as np\nimport utils\nimport graphics\nimport annealing\nimport geometry\n\nSimulation_rates = []\nSimulation_differences = []\n\n\ndef simulations(nodes, n, cooling_schedule, distance_matrix=np.array([]), relative=False):\n\n simulation_costs = []\n global Simulation_rates, Simulation_differences\n Simulation_rates = []\n Simulation_differences = []\n\n simulated_annealing_func = annealing.simulated_annealing\n if relative == True:\n simulated_annealing_func = annealing.simulated_annealing_relative\n\n for i in range(n):\n traveling_list = utils.initial_perm(nodes)\n optimized_list, costs = simulated_annealing_func(traveling_list, nodes, cooling_schedule, distance_matrix)\n simulation_costs.append(costs)\n Simulation_rates.append(annealing.Rates.copy())\n Simulation_differences.append(annealing.Cost_difference.copy())\n\n return simulation_costs\n\n\ndef distance_solutions(nodes, costs, solution):\n global_solution_cost = geometry.total_distance(nodes, solution)\n xs, ys = [], []\n for c in costs: # Number of loop can differ between each simulations\n xs.append(range(len(c)))\n ys.append(np.array(c) - global_solution_cost)\n\n graphics.simple_scatter(xs, ys, 'Convergence to optimal solution', 'Number of operations', 'Cost difference with optimal solution')\n\n\nif __name__ == '__main__':\n\n # Case eil51\n nodes = utils.tsp_reader('./tsp_data/eil51.tsp.txt')\n traveling_list = utils.initial_perm(nodes)\n solution = utils.tsp_solution_reader('./tsp_data/eil51.opt.tour.txt')\n cooling_schedule = annealing.CoolingSchedule(\n T=80,\n steps=100,\n lowering_method=(lambda t: .9 * t),\n annealing_condition=(lambda i: i < 25),\n )\n\n costs = simulations(nodes, 10, cooling_schedule)\n distance_solutions(nodes, costs, solution)\n","sub_path":"assignment_3/source_code/convergence.py","file_name":"convergence.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"601927057","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# Load the training dataset\ntrain_features = np.load(\"train_features.npy\")\ntrain_labels = np.load(\"train_labels.npy\").astype(\"int8\")\n\nn_train = train_labels.shape[0]\n\ndef visualize_digit(features, label):\n # Digits are stored as a vector of 400 pixel values. Here we\n # reshape it to a 20x20 image so we can display it.\n \n plt.imshow(features.reshape(20, 20), cmap=\"binary\")\n plt.xlabel(\"Digit with label \" + str(label))\n plt.show()\n\nvisualize_digit(train_features[-2:],train_labels[1])","sub_path":"HW/HW1/classify-mnist/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"565820695","text":"from django.shortcuts import get_object_or_404\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom hellonewman.portfolio.models import *\nfrom taggit.models import Tag\n\n\ndef gallery_home(request):\n \"\"\"\n displays all published entries in the gallery\n \n \"\"\"\n works = PortfolioImage.objects.filter(published=True)\n \n return render_to_response(\"portfolio/index.html\", {\n \"works\": works,\n }, context_instance=RequestContext(request))\n\n\ndef gallery_detail(request, slug):\n \"\"\"\n Show detailed version of image\n\n \"\"\"\n work = get_object_or_404(PortfolioImage, slug=slug, published=True)\n work.increase_read_count()\n\n return render_to_response(\"portfolio/detail.html\", {\n \"work\": work,\n }, context_instance=RequestContext(request))\n\n\ndef category_list(request, slug):\n \"\"\"\n displays all images for the given category\n expects [slug]\n \"\"\"\n category = get_object_or_404(PortfolioCategory, slug=slug)\n tag = category.name\n works = PortfolioImage.objects.filter(published=True, category=category)\n\n return render_to_response(\"portfolio/index.html\", {\n 'tag': tag,\n \"category\": category,\n \"works\": works,\n }, context_instance=RequestContext(request))\n\n\ndef tag_list(request, tag):\n \"\"\"\n displays all images for the given tag\n expects [slug]\n \"\"\"\n\n tags = Tag.objects.get(slug=tag)\n works = PortfolioImage.objects.filter(published=True, tags__slug__in=[tag])\n\n return render_to_response(\"portfolio/index.html\", {\n 'tag': tag,\n \"category\": tags.name.title(),\n \"works\": works,\n }, context_instance=RequestContext(request))\n","sub_path":"hellonewman/portfolio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"570290781","text":"import falcon\nimport falcon_cors\n\nclass CorsMiddleware(falcon_cors.CORSMiddleware):\n ''' Handle CORS. '''\n def __init__(self):\n cors = falcon_cors.CORS(allow_all_origins=True,\n allow_credentials_all_origins=True,\n allow_all_headers=True,\n allow_all_methods=True)\n super().__init__(cors)\n\n def process_resource(self, request, response, resource, params):\n super().process_resource(request, response, resource, params)\n\n # Raise 200 for OPTIONS requests so middleware lower in the stack are skipped\n if request.method == 'OPTIONS':\n raise falcon.HTTPStatus(falcon.HTTP_OK)\n","sub_path":"webapi/webapi/middleware/cors.py","file_name":"cors.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"140597697","text":"#!/usr/bin/env python3\nimport argparse\nimport os\nimport subprocess as sp\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-s\", \"--system\", default=\"dev\", help=\"Specify system\")\nargs = parser.parse_args()\n\nCWD = os.path.dirname(os.path.abspath(__file__))\nSYSTEM = args.system\nNETWORK = SYSTEM\nIMAGE = os.path.basename(os.path.dirname(CWD)) + \"_\" + os.path.basename(CWD) + \"_\" + SYSTEM\nVOLUMES = {}\nVARIABLES = {}\nif SYSTEM == \"prod\":\n PORTS = {\"80\": \"80\"}\nelse:\n PORTS = {}\nRESTART = True if SYSTEM == \"prod\" else False\n\nsp.run([\"docker\", \"stop\", IMAGE], stderr=open(os.devnull, 'w'), stdout=open(os.devnull, 'w'))\nsp.run([\"docker\", \"rm\", IMAGE], stdout=open(os.devnull, 'w'))\nsp.run([\"docker\", \"build\", \"-t\", IMAGE, CWD], stdout=open(os.devnull, 'w'))\nsp.run([\"docker\", \"network\", \"create\", \"--driver\", \"bridge\", NETWORK], stderr=open(os.devnull, 'w'))\n\nOPTIONS = []\n\nif RESTART:\n OPTIONS += [\"--restart\", \"always\"]\n\nfor k, v in VARIABLES.items():\n OPTIONS += [\"-e\", f\"{k}={v}\"]\n\nfor k, v in PORTS.items():\n OPTIONS += [\"-p\", f\"{k}:{v}\"]\n\nfor k, v in VOLUMES.items():\n if not os.path.exists(k):\n os.makedirs(k)\n OPTIONS += [\"-v\", f\"{k}:{v}\"]\n\nsp.run([\"docker\", \"run\", \"-dit\", \"--name\", IMAGE, f\"--network={NETWORK}\", \"-e\", f\"IMAGE={IMAGE}\", \"-e\", f\"SYSTEM={SYSTEM}\"] + OPTIONS + [IMAGE])\n\nsp.run([\"docker\", \"ps\"])\n\n","sub_path":"http/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"208935583","text":"# -*- coding: utf-8 -*-\n# Created by apple on 2017/1/30.\n\nimport re\nimport plistlib\nfrom ..log import log\nfrom .regex import Regex\nfrom zipfile import ZipFile\nfrom ..config import Config\n\n\nasync def parse(file_path, icon_save_name):\n ipa_file = ZipFile(file_path)\n\n # 解析info.plist路径\n ns = [n for n in ipa_file.namelist() if Regex.IPAInfoPlistPath.match(n)]\n if not ns:\n log.warning('parse info.plist failure: {}'.format(file_path))\n return\n plist_path = ns[-1]\n\n # 解析plist\n plist_data = ipa_file.read(plist_path)\n plist_file = plistlib.loads(plist_data)\n\n # 解析icon\n if plist_file.get('CFBundleIcons'):\n icon_dict = plist_file['CFBundleIcons']\n elif plist_file.get('CFBundleIcons'):\n icon_dict = plist_file['CFBundleIcons~ipad']\n else:\n log.warning('parse icon failure: {}'.format(file_path))\n return\n icon_name = icon_dict['CFBundlePrimaryIcon']['CFBundleIconFiles'][-1]\n log.debug('parse icon name: {}'.format(icon_name))\n\n # 获取icon路径\n ns = [n for n in ipa_file.namelist() if re.match('([^/]+/){{2}}{}(@\\dx)\\.png'.format(icon_name), n)]\n if not ns:\n log.warning('read icon failure: {}'.format(file_path))\n return\n icon_path = ns[-1]\n log.debug('parse icon path: {}'.format(icon_path))\n\n icon_save_path = '{}/{}.png'.format(Config.icon_dir, icon_save_name)\n with open(icon_save_path, 'wb+') as f:\n f.write(ipa_file.read(icon_path))\n log.debug('save icon success: {}'.format(icon_save_path))\n\n # 版本号\n version_number = plist_file['CFBundleShortVersionString']\n # build号\n build_number = plist_file['CFBundleVersion']\n # 包名\n package_name = plist_file['CFBundleIdentifier']\n # app名称\n app_name = plist_file['CFBundleDisplayName'] if plist_file.get('CFBundleDisplayName') else plist_file[\n 'CFBundleName']\n log.debug('app: {}, V{} build {}, package: {}'.format(app_name, version_number, build_number, package_name))\n\n # PackageInfoModel()\n","sub_path":"app/utils/ipa_parse.py","file_name":"ipa_parse.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"541947261","text":"import os\r\nfrom optparse import OptionParser\r\n\r\nparser = OptionParser()\r\n(options, args) = parser.parse_args()\r\nfilename = os.path.splitext(args[0])[0]\r\n\r\nos.system(\"ipython nbconvert \" + filename + \" --to latex --template citations.tplx\")\r\n\r\nos.system(\"latex \" + filename + \".tex\")\r\nos.system(\"bibtex \" + filename + \".aux\")\r\nos.system(\"pdflatex \" + filename + \".tex\")\r\nos.system(\"pdflatex \" + filename + \".tex\")","sub_path":"Decision Trees Validation/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"440070867","text":"mynumber = int(input())\n\nmystack = [int(i) for i in input().strip().split(' ')]\nmaxarea = 0 \n\nfrom collections import namedtuple\n\nInfo = namedtuple('Info', 'start height')\n\ndef max_rectangle_area(mystack):\n \"\"\"Find the area of the largest rectangle that fits entirely under\n the histogram.\n \n This is an O(n) solution!\n \"\"\"\n stack = []\n top = lambda: stack[-1]\n max_area = 0\n pos = 0 # current position in the histogram\n for pos, height in enumerate(mystack):\n start = pos #position where rectangle starts\n while True: \n #if there is nothing in stack or height is greater than top of stack, push this to the stack\n if not stack or height > top().height:\n stack.append(Info(start, height)) # push\n #keep popping from the stack until the height from the stack is less than the current\n elif stack and height < top().height:\n max_area = max(max_area, top().height*(pos-top().start))\n start = stack.pop().start\n continue\n break # height == top().height goes here \n pos += 1\n \n\n \n for start, height in stack:\n max_area = max(max_area, height*(pos-start))\n\n \n return max_area\n \nprint(max_rectangle_area(mystack))\n \n","sub_path":"hackerrank/largest_rectangle.py","file_name":"largest_rectangle.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"139290191","text":"from django.shortcuts import render, redirect\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Count, OuterRef, Subquery\nfrom django.views.generic.list import ListView\nfrom django.forms import inlineformset_factory, NumberInput, Select\nfrom django.db.models.functions import Coalesce\nfrom django.db.models import Case, IntegerField, Sum, When, F, Q\nfrom django.conf import settings\n\nfrom eve_sde.models import Region, Constellation, SolarSystem, Moon, Planet\nfrom moon_tracker.utils import user_can_view_scans, user_can_add_scans, user_can_delete_scans\nfrom moon_tracker.models import ScanResult, ScanResultOre\nfrom moon_tracker.forms import BatchMoonScanForm\n\nclass MoonContainerListView(ListView):\n template_name = 'moon_tracker/grid_list.html'\n\n def get_context_data(self, **kwargs):\n context = super(MoonContainerListView, self).get_context_data(**kwargs)\n context['parent'] = self.get_parent()\n context['type'] = self.container_type\n return context\n\n def get_parent(self):\n return None\n\n def get_queryset(self):\n entity_scanned_count = (\n self.model.objects.raw(self.sql_query, [settings.MOON_TRACKER_MINIMUM_SCANS])\n )\n\n print(entity_scanned_count)\n\n entity_scanned_map = {x.id: x.num_scanned for x in entity_scanned_count}\n\n entities = (\n self.get_entities()\n .filter(**{self.system_accessor + 'security__lt': 0.5})\n .annotate(num_moons=Count(self.system_accessor + 'planets__moons'))\n )\n\n for r in entities:\n r.num_scanned = entity_scanned_map.get(r.id, 0)\n r.fraction_scanned = float(r.num_scanned) / r.num_moons\n\n return sorted(list(entities), key=lambda r: (-r.fraction_scanned, r.name))\n\n\nclass RegionListView(MoonContainerListView):\n model = Region\n container_type = 'universe'\n id_accessor = 'planet__system__constellation__region__id'\n system_accessor = 'constellations__systems__'\n\n sql_query = '''\n SELECT r.id, r.name, COUNT(*) as num_scanned\n FROM (\n SELECT * FROM (\n SELECT moon_id AS id, COUNT(*) AS count\n FROM moon_tracker_scanresult\n GROUP BY moon_id\n ) AS moons\n INNER JOIN eve_sde_moon m\n ON (m.id = moons.id)\n WHERE moons.count >= %s\n ) m\n INNER JOIN \"eve_sde_planet\" p\n ON (m.\"planet_id\" = p.\"id\")\n INNER JOIN \"eve_sde_solarsystem\" s\n ON (p.\"system_id\" = s.\"id\")\n INNER JOIN \"eve_sde_constellation\" c\n ON (s.\"constellation_id\" = c.\"id\")\n INNER JOIN \"eve_sde_region\" r\n ON (c.\"region_id\" = r.\"id\")\n GROUP BY r.id\n '''\n\n def get_entities(self):\n return self.model.objects.filter(id__lt=11000000)\n\n\nclass ConstellationListView(MoonContainerListView):\n model = Constellation\n container_type = 'region'\n id_accessor = 'planet__system__constellation__id'\n system_accessor = 'systems__'\n\n sql_query = '''\n SELECT c.id, c.name, COUNT(*) as num_scanned\n FROM (\n SELECT * FROM (\n SELECT moon_id AS id, COUNT(*) AS count\n FROM moon_tracker_scanresult\n GROUP BY moon_id\n ) AS moons\n INNER JOIN eve_sde_moon m\n ON (m.id = moons.id)\n WHERE moons.count >= %s\n ) m\n INNER JOIN \"eve_sde_planet\" p\n ON (m.\"planet_id\" = p.\"id\")\n INNER JOIN \"eve_sde_solarsystem\" s\n ON (p.\"system_id\" = s.\"id\")\n INNER JOIN \"eve_sde_constellation\" c\n ON (s.\"constellation_id\" = c.\"id\")\n GROUP BY c.id\n '''\n\n def get_entities(self):\n return self.model.objects.filter(region=self.get_parent())\n\n def get_parent(self):\n return get_object_or_404(Region, name=self.kwargs['region'])\n\n\nclass SolarSystemListView(MoonContainerListView):\n model = SolarSystem\n container_type = 'constellation'\n id_accessor = 'planet__system__id'\n system_accessor = ''\n\n sql_query = '''\n SELECT s.id, s.name, COUNT(*) as num_scanned\n FROM (\n SELECT * FROM (\n SELECT moon_id AS id, COUNT(*) AS count\n FROM moon_tracker_scanresult\n GROUP BY moon_id\n ) AS moons\n INNER JOIN eve_sde_moon m\n ON (m.id = moons.id)\n WHERE moons.count >= %s\n ) m\n INNER JOIN \"eve_sde_planet\" p\n ON (m.\"planet_id\" = p.\"id\")\n INNER JOIN \"eve_sde_solarsystem\" s\n ON (p.\"system_id\" = s.\"id\")\n GROUP BY s.id\n '''\n\n def get_entities(self):\n return self.model.objects.filter(constellation=self.get_parent())\n\n def get_parent(self):\n return get_object_or_404(Constellation, name=self.kwargs['constellation'])\n\n\ndef list_system(request, system):\n system_obj = get_object_or_404(SolarSystem.objects.prefetch_related('planets', 'planets__moons'), name=system)\n\n moons = set()\n\n for p in (\n Moon.objects\n .filter(planet__system=system_obj)\n .annotate(scan_count=Count('scans'))\n .filter(scan_count__gte=settings.MOON_TRACKER_MINIMUM_SCANS)\n .values_list('id')\n ):\n moons.add(p[0])\n\n return render(\n request,\n 'moon_tracker/system_list.html',\n context={\n 'valid_moons': moons,\n 'parent': system_obj,\n 'type': 'system'\n }\n )\n\ndef moon_detail(request, system, planet, moon):\n moon_obj = get_object_or_404(Moon, number=moon, planet__number=planet, planet__system__name=system)\n\n form = ScanResultOreFormSet = inlineformset_factory(\n ScanResult,\n ScanResultOre,\n fields=('ore', 'quantity'),\n can_delete=False,\n widgets={\n 'ore': Select(attrs={'class': 'custom-select form-control ore-type-input'}),\n 'quantity': NumberInput(attrs={'value': 0, 'max': 100, 'class': 'form-control ore-percentage-input'})\n }\n )\n\n scans = ScanResult.objects.filter(moon=moon_obj)\n\n return render(\n request,\n 'moon_tracker/moon_detail.html',\n context={\n 'moon': moon_obj,\n 'scans': scans,\n 'can_view': user_can_view_scans(request.user, moon_obj),\n 'can_add': user_can_add_scans(request.user, moon_obj),\n 'can_delete': user_can_delete_scans(request.user, moon_obj),\n 'form': form\n }\n )\n\ndef batch_submit(request):\n if request.method == 'POST':\n form = BatchMoonScanForm(request.POST)\n\n if form.is_valid():\n for moon, materials in form.cleaned_data['data'].items():\n result = ScanResult.objects.create(\n moon_id=moon,\n owner=request.user\n )\n\n for ore, quantity in materials.items():\n result.constituents.create(\n ore=ore,\n quantity=quantity\n )\n\n return redirect('/')\n else:\n form = BatchMoonScanForm()\n\n return render(request, 'moon_tracker/batch_submit.html', {'form': form})\n","sub_path":"elmo/moon_tracker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"576107931","text":"#!/usr/bin/env python3\n\nimport sys\nimport csv\n\nfile_name1 = input(\"Insert LD FET file name:\\n\")\n\ncsv.register_dialect('Comma_Delin', delimiter=',', quoting=csv.QUOTE_NONE, escapechar='\\\\')\n\n#This will only work with SNP_LD_file_DOR0.001 files. Other files may contain columns in different orders.\n#Pulls Site position with corresponding FET value\n\nwith open(file_name1, 'r') as File1:\n\treader = csv.reader(File1, dialect='Comma_Delin')\n\tdict_FET = {}\n\tfor rows in reader:\n\t\tkey = rows[0]\n\t\tdict_FET[key] = rows[3]\t\n\tdel dict_FET['Pos1']\n\tprint(\"Fife position and FET value for\" + \" \" + str(file_name1) + \" \" + \"compiled\")\t\t\n\tFile1.close()\n\t\nprint(dict_FET)\n\t\nfile_name2 = input(\"Insert FST file name:\\n\")\n\n#This will only work for FST_sites output files. Other files may contain columns in different orders.\n#Pulls Site position with corresponding FST value \n\nwith open(file_name2, 'r') as File2:\n\treader = csv.reader(File2, dialect='Comma_Delin')\n\tdict_FST = {}\n\tfor rows in reader:\n\t\tkey = rows[1]\n\t\tdict_FST[key] = rows[2]\n\tdel dict_FST['Position']\n\tFile2.close()\n\t\nprint(\"FST values and SNP positions for\" + \" \" + str(file_name2) + \" \" + \"compiled\")\n\nprint(dict_FST)\n\n#Match site values and combine FET and FST values, None where value is not present\n\t\t\nkeys = dict_FET.keys()\ndict_combined = {k: [dict_FET.get(k), dict_FST.get(k)] for k in keys}\t\t\n\n\nprint(\"--------------------------------------------\")\nprint(dict_combined)\n\n\nnew_file = input(\"Insert a file name for your new file, include .csv:\\n\")\n\n#Write combined values to a new file\n\nwith open(new_file, 'w', newline='') as File3:\n\twriter = csv.writer(File3, dialect='Comma_Delin')\n\twriter.writerows([row[0]] + row[1] for row in dict_combined.items())\n\nprint(str(new_file) + ' ' + \"has been created in your current directory!\")\n\t\n","sub_path":"Combined_FST_FET.py","file_name":"Combined_FST_FET.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"548903500","text":"import commands\n\n\nclass HeavyProcessStat(object):\n\n def __init__(self, options):\n if \"proc_number\" in options:\n self.howMany = int(options[\"proc_number\"])\n else:\n self.howMany = 5\n\n self.prevCpuDict = {}\n self.prevMemDict = {}\n\n def report(self):\n howMany = self.howMany\n cpuDict = {}\n memDict = {}\n outCpu = commands.getoutput(\"ps xrco %cpu,command\")\n # -o for custom format, -r for cpu sorting, -c for just process name (not full path, -x for all processes)\n outMem = commands.getoutput(\"ps xmco %mem,command\")\n for line in outCpu.split(\"\\n\")[1:]: # ommiting shit\n s = line.split(None, 1)\n # btw how to not pass a separator (use default separator == any whitespace) and split with max split? Any ideas? \"\\s\" separator doesn't seem to work\n # this is nessesary since we can have processes with not unique\n # name like \"Google Chrome\"\n if s[1] in cpuDict:\n cpuDict[s[1]] += float(s[0])\n else:\n cpuDict[s[1]] = float(s[0])\n howMany -= 1\n\n # means we already gathered top 5 proceses with unique name!\n if howMany == 0:\n break\n\n howMany = self.howMany\n\n for line in outMem.split(\"\\n\")[1:]: # ommiting shit\n s = line.split(None, 1)\n # btw how to not pass a separator (use default separator == any whitespace) and split with max split? Any ideas? \"\\s\" separator doesn't seem to work\n # this is nessesary since we can have processes with not unique\n # name like \"Google Chrome\"\n if s[1] in memDict:\n memDict[s[1]] += float(s[0])\n else:\n memDict[s[1]] = float(s[0])\n howMany -= 1\n\n # means we already gathered top 5 proceses with unique name!\n if howMany == 0:\n break\n\n memList = []\n cpuList = []\n\n for process in cpuDict:\n if process in self.prevCpuDict:\n if cpuDict[process] > self.prevCpuDict[process]:\n tendency = 1\n elif cpuDict[process] == self.prevCpuDict[process]:\n tendency = 0\n elif cpuDict[process] < self.prevCpuDict[process]:\n tendency = -1\n else:\n tendency = 1\n\n cpuList.append({\n \"process\": process,\n \"value\": cpuDict[process],\n \"tendency\": tendency,\n })\n\n self.prevCpuDict = cpuDict\n\n for process in memDict:\n if process in self.prevMemDict:\n if memDict[process] > self.prevMemDict[process]:\n tendency = 1\n elif memDict[process] == self.prevMemDict[process]:\n tendency = 0\n elif memDict[process] < self.prevMemDict[process]:\n tendency = -1\n else:\n tendency = 1\n memList.append({\n \"process\": process,\n \"value\": memDict[process],\n \"tendency\": tendency,\n })\n self.prevMemDict = memDict\n\n return {\n \"cpuList\": cpuList,\n \"memList\": memList,\n }\n","sub_path":"lib/probes/osx/HeavyProcessStat.py","file_name":"HeavyProcessStat.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"427281414","text":"import json\nimport os\nfrom multiprocessing import Pool\n\nimport nltk\nfrom nltk.translate.bleu_score import SmoothingFunction\n\nfrom utils.metrics.Metrics import Metrics\nfrom utils.pycocotools.coco import COCO\n\n\nclass BleuCoco(Metrics):\n def __init__(self, test_text='', annotation_file='', gram=3):\n super().__init__()\n self.name = 'BleuCoco'\n self.coco = COCO(annotation_file)\n self.test_data = json.load(open(test_text, 'r'))\n self.gram = gram\n \n def get_score(self, is_fast=True, ignore=False):\n if ignore:\n return 0\n return self.get_bleu_parallel()\n\n def calc_bleu(self, reference, hypothesis, weight):\n return nltk.translate.bleu_score.sentence_bleu(reference, hypothesis, weight,\n smoothing_function=SmoothingFunction().method1)\n\n def get_bleu(self):\n ngram = self.gram\n bleu = list()\n weight = tuple((1. / ngram for _ in range(ngram)))\n for hypothesis in self.test_data:\n annIds = self.coco.getAnnIds(imgIds=hypothesis['id'])\n anns = self.coco.loadAnns(annIds)\n bleu.append(self.calc_bleu(anns, hypothesis['caption'], weight))\n return sum(bleu) / len(bleu)\n\n def get_bleu_parallel(self, reference=None):\n ngram = self.gram\n weight = tuple((1. / ngram for _ in range(ngram)))\n pool = Pool(os.cpu_count())\n result = list()\n with open(self.test_data) as test_data:\n for hypothesis in test_data:\n annIds = self.coco.getAnnIds(imgIds=hypothesis['id'])\n anns = self.coco.loadAnns(annIds)\n result.append(pool.apply_async(self.calc_bleu, args=(anns, hypothesis['caption'], weight)))\n score = 0.0\n cnt = 0\n for i in result:\n score += i.get()\n cnt += 1\n pool.close()\n pool.join()\n return score / cnt\n","sub_path":"utils/metrics/BleuCoco.py","file_name":"BleuCoco.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"378168029","text":"import os\n\nfrom functools import wraps\nfrom qtpy.QtCore import QThread, Signal\nfrom qtpy.QtWidgets import QDialog, QMessageBox\nfrom qtpy.QtGui import QIntValidator\nfrom qtpy.uic import loadUi\nimport astropy.units as u\nfrom specutils import Spectrum1D\nfrom specutils.manipulation.smoothing import (box_smooth, gaussian_smooth,\n median_smooth, trapezoid_smooth)\n\nfrom ...core.items import PlotDataItem\nfrom ...core.plugin import plugin\nfrom ...core.operations import FunctionalOperation\n\nKERNEL_REGISTRY = {\n \"\"\"\n Dictionary to store available kernel options.\n\n KERNEL_REGISTRY:\n kernel_type: Type of kernel\n name: Display name\n unit_label: Display units of kernel size (singular)\n size_dimension: Dimension of kernel (width, radius, etc..)\n function: Smoothing function\n \"\"\"\n \"box\": {\"name\": \"Box\",\n \"unit_label\": \"Pixel\",\n \"size_dimension\": \"Width\",\n \"function\": box_smooth},\n \"gaussian\": {\"name\": \"Gaussian\",\n \"unit_label\": \"Pixel\",\n \"size_dimension\": \"Std Dev\",\n \"function\": gaussian_smooth},\n \"trapezoid\": {\"name\": \"Trapezoid\",\n \"unit_label\": \"Pixel\",\n \"size_dimension\": \"Width\",\n \"function\": trapezoid_smooth},\n \"median\": {\"name\": \"Median\",\n \"unit_label\": \"Pixel\",\n \"size_dimension\": \"Width\",\n \"function\": median_smooth}\n}\n\n\n@plugin(\"Smoothing\")\nclass SmoothingDialog(QDialog):\n \"\"\"\n Widget to handle user interactions with smoothing operations.\n Allows the user to select spectra, kernel type and kernel size.\n It utilizes smoothing functions in `~specutils.manipulation`.\n Assigns the smoothing workload to a QTread instance.\n \"\"\"\n def __init__(self, parent=None, *args, **kwargs):\n super().__init__(parent=parent, *args, **kwargs)\n\n self.model_items = None\n\n self._smoothing_thread = None # Worker thread\n\n self.kernel = None # One of the sub-dicts in KERNEL_REGISTRY\n self.function = None # function from `~specutils.manipulation.smoothing`\n self.data = None # Current `~specviz.core.items.DataItem`\n self.size = None # Current kernel size\n self._already_loaded = False\n\n #\n # Do the first-time loading and initialization of the GUI\n #\n loadUi(os.path.abspath(\n os.path.join(os.path.dirname(__file__),\n \".\", \"smoothing.ui\")), self)\n\n self.smooth_button.clicked.connect(self.accept)\n self.cancel_button.clicked.connect(self.close)\n self.data_combo.currentIndexChanged.connect(self._on_data_change)\n\n for key in KERNEL_REGISTRY:\n kernel = KERNEL_REGISTRY[key]\n self.kernel_combo.addItem(kernel[\"name\"], key)\n self.kernel_combo.currentIndexChanged.connect(self._on_kernel_change)\n\n # Add integer validator to size input field\n self.size_input.setValidator(QIntValidator())\n\n @plugin.tool_bar(\"Smoothing\", location=\"Operations\")\n def on_action_triggered(self):\n \"\"\"\n Triggers the display of the dialog where users may enter smoothing\n options.\n \"\"\"\n # Update the current list of available data items\n self.model_items = self.hub.data_items\n\n self._display_ui()\n self.exec_()\n\n def _display_ui(self):\n \"\"\"\n Things to do each time the Smoothing GUI is re-displayed.\n \"\"\"\n self.data_combo.clear()\n for index, data in enumerate(self.model_items):\n self.data_combo.addItem(data.name, index)\n\n self._on_data_change(0)\n self._on_kernel_change(0)\n\n self.set_to_current_selection()\n self.smooth_button.setEnabled(True)\n self.cancel_button.setEnabled(True)\n\n def set_to_current_selection(self):\n \"\"\"Sets Data selection to currently active data\"\"\"\n current_item = self.hub.workspace.current_item\n if current_item is not None:\n if isinstance(current_item, PlotDataItem):\n current_item = current_item.data_item\n if current_item is not None and current_item in self.model_items:\n index = self.model_items.index(current_item)\n self.data_combo.setCurrentIndex(index)\n\n def _on_kernel_change(self, index):\n \"\"\"Callback for kernel combo index change\"\"\"\n key = self.kernel_combo.currentData()\n kernel = KERNEL_REGISTRY[key] # Kernel type\n self.size_label.setText(kernel[\"size_dimension\"])\n self.unit_label.setText(kernel[\"unit_label\"]+\"s\")\n self.function = kernel[\"function\"]\n self.kernel = kernel\n\n def _on_data_change(self, index):\n \"\"\"Callback for data combo index change\"\"\"\n data_index = self.data_combo.currentData()\n\n if data_index is not None and len(self.model_items) > 0:\n self.data = self.model_items[data_index]\n\n def _generate_output_name(self):\n \"\"\"Generate a name for output spectra\"\"\"\n unit_label = self.kernel[\"unit_label\"].lower()\n unit_format = \"{0} {1}\" if self.size == 1. else \"{0} {1}s\"\n size_text = unit_format.format(self.size, unit_label)\n\n return \"{0} Smoothed({1}, {2})\".format(self.data.name, self.kernel[\"name\"], size_text)\n\n def is_size_valid(self):\n \"\"\"\n Check if size input is valid.\n Marks LineEdit red if input is invalid.\n\n returns\n -------\n bool: True if no errors\n \"\"\"\n success = True\n try:\n size = float(self.size_input.text())\n if size <= 0:\n success = False\n except ValueError:\n success = False\n\n if success:\n self.size_input.setStyleSheet(\"\")\n else:\n red = \"background-color: rgba(255, 0, 0, 128);\"\n self.size_input.setStyleSheet(red)\n\n return success\n\n def accept(self):\n \"\"\"Called when the user clicks the \"Smooth\" button of the dialog.\"\"\"\n if not self.is_size_valid():\n return\n\n self.smooth_button.setEnabled(False)\n self.cancel_button.setEnabled(False)\n\n self.size = int(self.size_input.text())\n\n if self.data is not None:\n # This wrapper function is necessary for cases where the specutils\n # functions expect a spectrum1d, but the data provided is a simple\n # array or quantity.\n def func_convert(func):\n @wraps(func)\n def wrapper(data, spectral_axis, *args, **kwargs):\n spec = Spectrum1D(flux=u.Quantity(data),\n spectral_axis=spectral_axis)\n return func(spec, *args, **kwargs).flux.value\n return wrapper\n\n # Generate a smoothing operation to place on the operation stack.\n # This allows for playback via stack singleton.\n smoothing_operation = FunctionalOperation(\n func_convert(self.function), self.size,\n name=\"Smoothing Operation ({}, size={})\".format(\n self.function.__name__, self.size))\n\n self._smoothing_thread = SmoothingThread(self.data.spectrum, self.size, self.function)\n self._smoothing_thread.finished.connect(self.on_finished)\n self._smoothing_thread.exception.connect(self.on_exception)\n\n self._smoothing_thread.start()\n\n def on_finished(self, spec):\n \"\"\"\n Called when the `QThread` has finished performing\n the smoothing operation.\n Parameters\n ----------\n spec : `~specutils.Spectrum1D`\n The result of the smoothing operation.\n \"\"\"\n name = self._generate_output_name()\n data_item = self.hub.workspace.model.add_data(spec=spec, name=name)\n self.hub.workspace.force_plot(data_item)\n self.close()\n\n def on_exception(self, exception):\n \"\"\"\n Called when the `QThread` runs into an exception.\n Parameters\n ----------\n exception : Exception\n The Exception that interrupted the `QThread`.\n \"\"\"\n self.smooth_button.setEnabled(True)\n self.cancel_button.setEnabled(True)\n\n info_box = QMessageBox(parent=self)\n info_box.setWindowTitle(\"Smoothing Error\")\n info_box.setIcon(QMessageBox.Critical)\n info_box.setText(str(exception))\n info_box.setStandardButtons(QMessageBox.Ok)\n info_box.show()\n\n\nclass SmoothingThread(QThread):\n \"\"\"\n Thread in which a single smoothing operation\n is performed to ensure that the UI does not\n freeze while the operation is running.\n\n Parameters\n ----------\n data : `~specutils.Spectrum1D`\n size : Number\n Smoothing kernel size.\n func : function\n Smoothing function from `~specutils.manipulation.smoothing`.\n parent : `~specviz.widgets.smoothing.SmoothingDialog`\n\n Signals\n -------\n finished : Signal\n Notifies parent UI that smoothing is complete and is used to\n communicate the resulting data.\n exception : Signal\n Sends exceptions to parent UI where they are raised.\n \"\"\"\n finished = Signal(object)\n exception = Signal(Exception)\n\n def __init__(self, data, size, func, parent=None):\n super(SmoothingThread, self).__init__(parent)\n self._data = data\n self._size = size\n self._function = func\n self._tracker = None\n\n def run(self):\n \"\"\"Run the thread.\"\"\"\n try:\n new_spec = self._function(self._data, self._size)\n self.finished.emit(new_spec)\n except Exception as e:\n self.exception.emit(e)\n","sub_path":"specviz/plugins/smoothing/smoothing_dialog.py","file_name":"smoothing_dialog.py","file_ext":"py","file_size_in_byte":9821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"526119960","text":"#!/usr/bin/python\n\nimport sys\nimport os\n\nhwufile=sys.argv[1]\n\nplots = []\n\npath = os.path.split(hwufile)[0]\n\n\nfor l in open(hwufile):\n if l.startswith('#') or not l.strip():\n continue\n if '' in l:\n tags = l.split('\"')[1].split('|')\n try:\n name = (tags[0].strip()+tags[3].strip()).replace(' ','_').replace('TYPE@#1','_').replace('/','o').replace('[','').replace(']','')\n #name = (tags[0].strip()+tags[-1].strip()).replace(' ','_').replace('TYPE@','').replace('T@','').replace('/','o').replace('[','').replace(']','')\n except IndexError:\n name = tags[0].strip().replace(' ','_').replace('TYPE@','').replace('/','o').replace('[','').replace(']','')\n name = name.strip()\n while name in plots:\n name=name+'_1'\n# print 'DOING', name\n thisplot = open(os.path.join(path, name+'.dat'), 'w')\n plots.append(name)\n elif '' in l:\n thisplot.close()\n else:\n thisplot.write(l)\n","sub_path":"HwUtodat.py","file_name":"HwUtodat.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"133988959","text":"import torch.nn as nn\nimport torch.nn.functional as F\n#from pytorch3d.ops import GraphConv\nfrom torch_geometric.nn import GCNConv, GraphConv, GatedGraphConv, GENConv, SGConv\nimport torch\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nfrom torch.nn import Linear, LayerNorm, ReLU\n\nfrom torch_geometric.nn import GENConv, DeepGCNLayer\n\nclass GCN(nn.Module):\n def __init__(self):\n super(GCN, self).__init__()\n self.conv1 = GraphConv(3, 16)\n self.conv2 = GraphConv(16, 3)\n\n def forward(self, mesh):\n verts = mesh.verts_packed()\n #TODO faster with packed\n edges = mesh.edges_packed()\n out = F.relu(self.conv1(verts, edges))\n return self.conv2(out, edges)\n #x = F.relu(x)\n #x = F.dropout(x, training=self.training)\n #x = self.conv2(x, edge_index)\n\n #return F.log_softmax(x, dim=1)\n\n\n\nclass Net(nn.Module):\n def __init__(self, in_dim, out_dim):\n super(Net, self).__init__()\n self.conv_first = GraphConv(in_dim, 128)\n #self.conv1 = GraphConv(16, 16)\n self.conv1 = GraphConv(128, 128) #TODO ?\n self.conv2 = GraphConv(128, 128)\n self.conv3 = GraphConv(128, 128)\n self.conv4 = GraphConv(128, 128)\n self.conv4 = GraphConv(128, 128)\n self.conv5 = GraphConv(128, 128)\n self.conv6 = GraphConv(128, 128)\n self.conv7 = GraphConv(128, 128)\n self.conv8 = GraphConv(128, 128)\n self.conv9 = GraphConv(128, 128)\n self.conv10 = GraphConv(128, 128)\n self.conv11 = GraphConv(128, 128)\n self.conv_last = GraphConv(128, out_dim)\n self.conv_pos = GraphConv(out_dim, 3)\n #self.conv2 = GraphConv(32, 3)\n #self.conv_last =GraphConv(out_dim, 3)\n #self.conv2 = GCNConv(248, 124)\n #self.conv3 = GCNConv(124, 124)\n #self.conv4 = GCNConv(124, 3)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n x = self.conv_first(x, edge_index)\n x = F.relu(x)\n #print(x.shape)\n #print(self.conv1(x, edge_index).shape)\n x = F.relu(self.conv1(x, edge_index)) + x\n x = F.relu(self.conv2(x, edge_index)) + x\n x = F.relu(self.conv3(x, edge_index)) + x\n x = F.relu(self.conv4(x, edge_index)) + x\n x = F.relu(self.conv5(x, edge_index)) + x\n x = F.relu(self.conv6(x, edge_index)) + x\n x = F.relu(self.conv7(x, edge_index)) + x\n x = F.relu(self.conv8(x, edge_index)) + x\n x = F.relu(self.conv9(x, edge_index)) + x\n x = F.relu(self.conv10(x, edge_index)) + x\n x = F.relu(self.conv11(x, edge_index)) + x\n out_features = x\n #x = F.relu(x) #TODO ?\n out_pos = self.conv_pos(x, edge_index)\n #x = F.relu(x)\n #x = self.conv2(x, edge_index)\n #x = F.relu(x)\n #x = F.relu(x)\n #x = F.dropout(x, training=self.training)\n #x = self.conv2(x, edge_index)\n #x = F.relu(x)\n #x = self.conv3(x, edge_index)\n #x = F.relu(x)\n\n return out_features, out_pos#self.conv4(x, edge_index)#F.log_softmax(x, dim=1)\n\nclass DeeperGCN(nn.Module):\n def __init__(self, in_dim, hidden_channels, out_dim, num_layers):\n super(DeeperGCN, self).__init__()\n\n self.node_encoder = nn.Linear(in_dim, hidden_channels)\n #self.edge_encoder = nn.Linear(data.edge_attr.size(-1), hidden_channels)\n\n self.layers = nn.ModuleList()\n for i in range(1, num_layers + 1):\n conv = GENConv(hidden_channels, hidden_channels, aggr='softmax',\n t=1.0, learn_t=True, num_layers=2, norm='layer')\n #conv = GraphConv(hidden_channels, hidden_channels)\n #conv = SGConv(hidden_channels, hidden_channels)\n norm = LayerNorm(hidden_channels, elementwise_affine=True)\n act = ReLU(inplace=True)\n\n layer = DeepGCNLayer(conv, norm, act, block='res+', dropout=0.0, #TODO\n ckpt_grad=i % 3)\n self.layers.append(layer)\n\n self.lin = Linear(hidden_channels, out_dim)\n\n def forward(self, data):\n x , edge_index = data.x, data.edge_index\n x = self.node_encoder(x)\n #edge_attr = self.edge_encoder(edge_attr)\n x = self.layers[0].conv(x, edge_index)#, edge_attr)\n\n for layer in self.layers[1:]:\n x = layer(x, edge_index)#, edge_attr)\n\n x = self.layers[0].act(self.layers[0].norm(x))\n #x = F.dropout(x, p=0.05, training=self.training) #TODO\n\n return x, self.lin(x)","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"521296126","text":"class BagPolicy:\n def __init__(self, policy_dict):\n self.policy = policy_dict\n\n def find_possible_bags(self, bag_type):\n \"\"\"\n Finds all bags that can contain the given bag_type\n \"\"\"\n\n ## we use dynamic programming to store results of bags\n evaluated = dict()\n contain = list()\n for bag in self.policy:\n ## we don't want to process itself\n if bag == bag_type:\n continue\n\n if bag in evaluated:\n if evaluated[bag]:\n contain.append(bag)\n else:\n can_contain = self._evaluate_contain(bag, bag_type, evaluated)\n\n if can_contain:\n contain.append(bag)\n\n return contain\n\n def _evaluate_contain(self, bag, bag_type, evaluated):\n \"\"\"\n Recursive function to see if bag can contain bag_type\n \"\"\"\n ## if this bag can't contain any other bag\n if len(self.policy[bag]) == 0:\n return bag == bag_type\n\n results = list()\n for inner_bag in self.policy[bag]:\n if inner_bag == bag_type:\n return True\n \n elif inner_bag in evaluated:\n results.append(evaluated[inner_bag])\n\n else:\n r = self._evaluate_contain(inner_bag, bag_type, evaluated)\n evaluated[inner_bag] = r\n results.append(r)\n\n r = any(results)\n evaluated[bag] = r\n return r\n\n def find_total_bags(self, bag_type):\n \"\"\"\n Finds total bags that can be inside bag_type\n \"\"\"\n ## we use dynamic programming to store results of bags\n evaluated = dict()\n contain = 0\n\n inner_bags = self.policy[bag_type]\n for bag, quantity in inner_bags.items():\n if bag in evaluated:\n ## we need to count the bag itself as well as the inside bags\n contain += evaluated[bag] * quantity + quantity\n else:\n ## we need to count the bag itself as well as the inside bags\n contain += self._find_inner_total(bag, evaluated) * quantity + quantity\n\n return contain\n\n def _find_inner_total(self, bag, evaluated):\n \"\"\"\n Recursive function to find total bags a bag can fit\n \"\"\"\n if len(self.policy[bag]) == 0:\n return 0\n\n result = 0\n for inner, quantity in self.policy[bag].items():\n if inner in evaluated:\n result += evaluated[inner] * quantity + quantity\n\n else:\n inside_count = self._find_inner_total(inner, evaluated)\n evaluated[inner] = inside_count\n result += inside_count * quantity + quantity\n\n evaluated[bag] = result\n return result\n\nif __name__ == \"__main__\":\n rules = dict()\n\n with open(\"input/input.txt\", \"r\") as f:\n for line in f:\n inner = dict()\n\n line = line.strip()\n\n ## we do a series of string splits to extract information we need\n ## we also do some data normalization for easier processing\n line = line.replace(\"bags\", \"bag\").replace(\".\", \"\")\n\n ## we divide the information into two;\n ## bigger bag --> smaller bags\n lhs, rhs = [l.strip() for l in line.split(\"contain\")]\n\n ## process the left hand side first\n ## we really just need to extract the bag color\n lhs = lhs.replace(\"bag\", \"\").strip()\n\n if rhs.startswith(\"no other\"):\n rules[lhs] = inner\n continue\n\n ## process the right hand side\n ## this can be a list of bag colors with quantity so we need to store that\n rhs = [l.strip() for l in rhs.split(\",\")]\n \n for rule in rhs:\n ## throw away the phrase \"bag\" since it's not important\n rule = rule.split(\" \")[:-1]\n bag_type = f\"{rule[1]} {rule[2]}\"\n bag_quantity = int(rule[0])\n \n inner[bag_type] = bag_quantity\n\n rules[lhs] = inner\n \n bp = BagPolicy(rules)\n\n possible_bags = len(bp.find_possible_bags(\"shiny gold\"))\n\n print(f\"Part 1 solution is {possible_bags}\")\n\n total_bags = bp.find_total_bags(\"shiny gold\")\n\n print(f\"Part 2 solution is {total_bags}\")\n","sub_path":"day07/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"441276687","text":"import math\n\nclass FibonacciTree:\n def __init__(self, value):\n self.value = value\n self.child = []\n self.order = 0\n self.parent = None\n\n # ajouter l'arbre a la fin\n def add_tree(self, tree):\n self.child.append(tree)\n self.order = self.order + 1 \n\n\nclass FibonacciHeap:\n def __init__(self):\n self.trees = []\n self.least = None\n self.head = None\n self.count = 0\n\n # Ajoute une valeur dans l'arbre\n def insert(self, value):\n new_tree = FibonacciTree(value)\n self.trees.append(new_tree)\n if (self.least is None or value < self.least.value):\n self.least = new_tree \n if (self.head is None or value > self.head.value):\n self.head = new_tree \n self.count = self.count + 1\n\n # Retourne la valeur minimum dans l'arbre\n def find_min(self):\n if self.least is None:\n return None\n return self.least.value \n\n # Supprime et retourne la valeur minimum dans l'arbre\n def delete_min(self):\n small = self.least\n if small is not None:\n for child in small.child:\n self.trees.append(child)\n self.trees.remove(small)\n if self.trees == []:\n self.least = None\n else:\n self.least = self.trees[0]\n self.consolidate()\n self.count = self.count - 1\n return small.value\n\n # Retourne la valeur maximum dans l'arbre\n def find_max(self):\n if self.head is None:\n return None\n return self.head.value \n\n def consolidate(self):\n aux = (floor(self.count) + 1) * [None]\n\n while self.trees != []:\n x = self.trees[0]\n order = x.order\n self.trees.remove(x)\n while aux[order] is not None:\n y = aux[order]\n if x.value > y.value:\n x, y = y, x\n x.add_tree(y)\n aux[order] = None\n order = order + 1\n aux[order] = x\n\n self.least = None\n for k in aux:\n if k is not None:\n self.trees.append(k)\n if self.least is None or k.value < self.least.value:\n self.least = k\n\n # Fusionne deux arbres\n def merge (self, heap_1, heap_2):\n if heap_1 is None: heap_1 = []\n if heap_2 is None: heap_2 = []\n if type(heap_1) is not type([]):\n heap_1 = [heap_1]\n if type(heap_2) is not type([]):\n heap_2 = [heap_2]\n return heap_1 + heap_2 \n\ndef floor(x):\n return math.frexp(x)[1] - 1 \n\n\nfibonacci_heap = FibonacciHeap()\n\nfibonacci_heap.insert(7)\nfibonacci_heap.insert(3)\nfibonacci_heap.insert(17)\nfibonacci_heap.insert(24)\nfibonacci_heap.insert(12)\nfibonacci_heap.insert(4)\n\nprint('Valeur minimum: ' + format(fibonacci_heap.find_min()))\n\nprint('Valeur maximum: ' + format(fibonacci_heap.find_max()))\n\nfibonacci_heap.delete_min()\n\nprint('Nouvelle valeur minimum: ' + format(fibonacci_heap.find_min()))\n\nprint(fibonacci_heap.merge(3,2))","sub_path":"fibonacci_heap.py","file_name":"fibonacci_heap.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"364297994","text":"\"\"\"DjangoLogin URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,include,re_path\nfrom LoginUser.views import *\nfrom django.views.decorators.csrf import csrf_exempt\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('loginuser/',include(\"LoginUser.urls\")),\n path(\"goods_list/\",goods_list),\n re_path(\"goods_list/(?P\\d{0,1})/(?P\\d+)/\",goods_list),\n re_path(\"goods_list_api/(?P\\d{0,1})/(?P\\d+)/\",goods_list_api),\n re_path(\"goods_status/(?P\\w+)/(?P\\d+)/\",goods_status),\n path(\"goodsview/\",csrf_exempt(GoodsView.as_view())),\n path(\"personinfo/\",PersonInfo),\n\n]\n\nfrom rest_framework import routers\nrouter = routers.DefaultRouter() ## 定义一个 路由集合\nrouter.register(\"goods\",GoodsViewsSet) ### restful 视图收集路由\nrouter.register(\"user\",UserViewsSet) ### restful 视图收集路由\n\nurlpatterns += [\n re_path(\"^API/\", include(router.urls))\n]\n\n","sub_path":"DjangoLogin/DjangoLogin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"641393376","text":"# coding: utf8\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom .Tokenizer import *\n\n\n\nclass StatementTokenizer(Tokenizer):\n\t## tried to keep the frequency order of delimiters (in Persian texts)\n\t## to improve the performance\n\tdelimiters = [\n\t\t'\\n',\n\t\t'،' ,\n\t\t'.',\n\t\t'\\t',\n\t\t':',\n\t\t'\\r',\n\t\t#'/',\n\t\t')',\n\t\t'(',\n\t\t'«',\n\t\t'»',\n\t\t'؛',\n\t\t'ـ',\n\t\t'\"',\n\t\t'؟',\n\t\t'*',\n\t\t',',\n\t\t'[',\n\t\t']',\n\t\t'!',\n\t\t'=',\n\t\t'+',\n\t\t'_',\n\t\t'–',\n\t\t'>',\n\t\t'<',\n\t\t'%',## FIXME\n\t\t';',\n\t\t'…',\n\t\t'”',\n\t\t'?',\n\t\t'&',\n\t\t'“',\n\t\t'\\'',\n\t\t'#',\n\t\t'^',\n\t\t'}',\n\t\t'{',\n\t\t'$',## FIXME\n\t\t'@',\n\t\t'﴿',\n\t\t'﴾',\n\t\t'٪',## FIXME\n\t\t'~',\n\t\t'`',\n\t\t'\\\\',\n\t\t#'\\x0b',## almost never used\n\t\t#'\\x0c',## almost never used\n\t\t#'-',## FIXME\n\t]\n\n\tword_delimiters = [\n\t\t' ',\n\t\t'-',\n\t\t'ـ',\n\t\t'|',\n\t\t'٬',\n\t\t'0',\n\t\t'2',\n\t\t'1',\n\t\t'3',\n\t\t'4',\n\t\t'5',\n\t\t'6',\n\t\t'7',\n\t\t'8',\n\t\t'9',\n\t\t'۰',\n\t\t'۱',\n\t\t'۲',\n\t\t'۳',\n\t\t'۴',\n\t\t'۵',\n\t\t'۶',\n\t\t'۷',\n\t\t'۸',\n\t\t'۹',\n\t]\n\n\tdef __init__(self):\n\t\tTokenizer.__init__(self)\n\t\tself.word_pattern = re.compile(escapeSeq(self.word_delimiters))\n\tdef deepTokenize(self, text):\n\t\tstats = []\n\t\tfor stat in self.splitPattern(self.pattern, text):\n\t\t\tstat_words = self.splitPattern(self.word_pattern, stat)\n\t\t\tif not stat_words:\n\t\t\t\tcontinue\n\t\t\tstats.append(stat_words)\n\t\treturn stats\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"hazm/StatementTokenizer.py","file_name":"StatementTokenizer.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"371150645","text":"#!/usr/bin/python3\n\"\"\"Module creates class Student\"\"\"\n\n\nclass Student:\n \"\"\"Student class with public instance attributes\n\n Instance Attributes:\n first_name: first name of student\n last_name: last name of student\n age: age of student\n \"\"\"\n\n def __init__(self, first_name, last_name, age):\n \"\"\"Instantiates public instance attributes\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\"Returns dictionary representation of instance\n\n Params:\n attrs: attributes to retrieve.\n if not a list of strings, retrieve all attributes\n \"\"\"\n if type(attrs) is not list:\n return self.__dict__\n filtered = {}\n for a in attrs:\n value = getattr(self, a, None)\n if value is None:\n continue\n filtered[a] = value\n return filtered\n","sub_path":"0x0B-python-input_output/12-student.py","file_name":"12-student.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"442479364","text":"# pylint: disable=protected-access, unused-argument, no-value-for-parameter\n\n__copyright__ = 'Copyright 2020-2022, The RADICAL-Cybertools Team'\n__license__ = 'MIT'\n\nimport glob\nimport os\nimport shutil\n\nimport radical.utils as ru\n\nfrom unittest import TestCase, mock\n\nfrom radical.pilot.session import Session\n\n\n# ------------------------------------------------------------------------------\n#\nclass TestSession(TestCase):\n\n _cleanup_files = []\n\n # --------------------------------------------------------------------------\n #\n @classmethod\n @mock.patch.object(Session, '_initialize_primary', return_value=None)\n @mock.patch.object(Session, '_get_logger')\n @mock.patch.object(Session, '_get_profiler')\n @mock.patch.object(Session, '_get_reporter')\n def setUpClass(cls, *args, **kwargs) -> None:\n\n cls._session = Session()\n cls._cleanup_files.append(cls._session.uid)\n\n # --------------------------------------------------------------------------\n #\n @classmethod\n def tearDownClass(cls) -> None:\n\n for p in cls._cleanup_files:\n for f in glob.glob(p):\n if os.path.isdir(f):\n try:\n shutil.rmtree(f)\n except OSError as e:\n print('[ERROR] %s - %s' % (e.filename, e.strerror))\n else:\n os.unlink(f)\n\n # --------------------------------------------------------------------------\n #\n def test_list_resources(self):\n\n listed_resources = self._session.list_resources()\n\n self.assertIsInstance(listed_resources, list)\n self.assertIn('local.localhost', listed_resources)\n\n # --------------------------------------------------------------------------\n #\n def test_get_resource_config(self):\n\n rcfg_label = 'access.bridges2'\n\n # schemas are [\"ssh\", \"gsissh\"]\n rcfg = self._session.get_resource_config(rcfg_label)\n self.assertEqual(rcfg.job_manager_endpoint,\n rcfg[rcfg.schemas[0]].job_manager_endpoint)\n new_schema = 'gsissh'\n rcfg = self._session.get_resource_config(rcfg_label, schema=new_schema)\n self.assertEqual(rcfg.job_manager_endpoint,\n rcfg[new_schema].job_manager_endpoint)\n\n # check exceptions\n\n with self.assertRaises(RuntimeError):\n self._session.get_resource_config(resource='wrong_domain.host')\n\n with self.assertRaises(RuntimeError):\n self._session.get_resource_config(resource='local.wrong_host')\n\n with self.assertRaises(RuntimeError):\n self._session.get_resource_config(\n resource='local.localhost', schema='wrong_schema')\n\n # --------------------------------------------------------------------------\n #\n @mock.patch.object(Session, '_initialize_primary', return_value=None)\n @mock.patch.object(Session, '_get_logger')\n @mock.patch.object(Session, '_get_profiler')\n @mock.patch.object(Session, '_get_reporter')\n @mock.patch('radical.pilot.session.ru.Config')\n def test_resource_schema_alias(self, mocked_config, *args, **kwargs):\n\n mocked_config.return_value = ru.TypedDict({\n 'local': {\n 'test': {\n 'schemas' : ['schema_origin',\n 'schema_alias',\n 'schema_alias_alias'],\n 'schema_origin' : {'param_0': 'value_0'},\n 'schema_alias' : 'schema_origin',\n 'schema_alias_alias': 'schema_alias'\n }\n }\n })\n\n s_alias = Session()\n\n self.assertEqual(\n s_alias._rcfgs.local.test.schema_origin,\n s_alias._rcfgs.local.test.schema_alias)\n self.assertEqual(\n s_alias._rcfgs.local.test.schema_origin,\n s_alias._rcfgs.local.test.schema_alias_alias)\n self.assertEqual(\n s_alias.get_resource_config('local.test', 'schema_origin'),\n s_alias.get_resource_config('local.test', 'schema_alias_alias'))\n\n self._cleanup_files.append(s_alias.uid)\n\n with self.assertRaises(KeyError):\n # schema alias refers to unknown schema\n mocked_config.return_value = ru.TypedDict({\n 'local': {\n 'test': {\n 'schemas' : ['schema_alias_error'],\n 'schema_alias_error': 'unknown_schema'\n }\n }\n })\n Session()\n\n # --------------------------------------------------------------------------\n #\n @mock.patch.object(Session, 'created', return_value=0)\n @mock.patch.object(Session, 'closed', return_value=0)\n def test_close(self, mocked_closed, mocked_created):\n\n # check default values\n self.assertFalse(self._session._close_options.cleanup)\n self.assertFalse(self._session._close_options.download)\n self.assertTrue(self._session._close_options.terminate)\n\n # only `True` values are targeted\n\n self._session._closed = False\n self._session.close(cleanup=True)\n self.assertTrue(self._session._close_options.cleanup)\n\n self._session._closed = False\n self._session.fetch_json = mock.Mock()\n self._session.fetch_profiles = mock.Mock()\n self._session.fetch_logfiles = mock.Mock()\n self._session.close(download=True)\n self._session.fetch_json.assert_called()\n self._session.fetch_profiles.assert_called()\n self._session.fetch_logfiles.assert_called()\n\n self._session._closed = False\n self._session.close(cleanup=True, terminate=True)\n\n # --------------------------------------------------------------------------\n #\n def test_get_resource_sandbox(self):\n\n pilot = {'uid' : 'pilot.0000',\n 'description': {}}\n\n with self.assertRaises(ValueError):\n # `PilotDescription.resource` is not provided\n self._session._get_resource_sandbox(pilot=pilot)\n\n # check `default_remote_workdir` handling\n\n # ORNL: split `project` by \"_\"\n pilot['description'].update({'resource': 'ornl.summit',\n 'project' : 'PROJNAME_machine'})\n self.assertIn('/projname/',\n self._session._get_resource_sandbox(pilot).path)\n self._session._cache['resource_sandbox'] = {}\n\n # ORNL: no any splitting\n pilot['description'].update({'resource': 'ornl.summit',\n 'project' : 'PROJNAME'})\n self.assertIn('/projname/',\n self._session._get_resource_sandbox(pilot).path)\n self._session._cache['resource_sandbox'] = {}\n\n # NCSA: split `project` by \"-\"\n pilot['description'].update({'resource': 'ncsa.delta',\n 'project' : 'bbka-delta-cpu'})\n self.assertIn('/bbka/',\n self._session._get_resource_sandbox(pilot).path)\n self._session._cache['resource_sandbox'] = {}\n\n # NCSA: no splitting\n pilot['description'].update({'resource': 'ncsa.delta',\n 'project' : 'bbka_wrongsplitter'})\n self.assertNotIn('/bbka/',\n self._session._get_resource_sandbox(pilot).path)\n self._session._cache['resource_sandbox'] = {}\n\n\n# ------------------------------------------------------------------------------\n#\nif __name__ == '__main__':\n\n tc = TestSession()\n tc.test_list_resources()\n tc.test_get_resource_config()\n tc.test_resource_schema_alias()\n tc.test_get_resource_sandbox()\n\n# ------------------------------------------------------------------------------\n\n","sub_path":"tests/component_tests/test_session.py","file_name":"test_session.py","file_ext":"py","file_size_in_byte":7880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"485741609","text":"import torch\r\nfrom ..losses import KL\r\n\r\n\r\nclass FGM(object):\r\n '''\r\n Example\r\n # 初始化\r\n fgm = FGM(model,epsilon=1,emb_name='word_embeddings.')\r\n for batch_input, batch_label in processor:\r\n # 正常训练\r\n loss = model(batch_input, batch_label)\r\n loss.backward() # 反向传播,得到正常的grad\r\n # 对抗训练\r\n fgm.attack() # 在embedding上添加对抗扰动\r\n loss_adv = model(batch_input, batch_label)\r\n loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度\r\n fgm.restore() # 恢复embedding参数\r\n # 梯度下降,更新参数\r\n optimizer.step()\r\n model.zero_grad()\r\n '''\r\n\r\n def __init__(self, model, emb_name, epsilon=1.0):\r\n # emb_name这个参数要换成你模型中embedding的参数名\r\n self.model = model\r\n self.epsilon = epsilon\r\n self.emb_name = emb_name\r\n self.backup = {}\r\n\r\n def attack(self):\r\n for name, param in self.model.named_parameters():\r\n if param.requires_grad and self.emb_name in name:\r\n self.backup[name] = param.data.clone()\r\n norm = torch.norm(param.grad)\r\n if norm != 0 and not torch.isnan(norm):\r\n r_at = self.epsilon * param.grad / norm\r\n param.data.add_(r_at)\r\n\r\n def restore(self):\r\n for name, param in self.model.named_parameters():\r\n if param.requires_grad and self.emb_name in name:\r\n assert name in self.backup\r\n param.data = self.backup[name]\r\n self.backup = {}\r\n\r\n\r\nclass PGD(object):\r\n '''\r\n Example\r\n pgd = PGD(model,emb_name='word_embeddings.',epsilon=1.0,alpha=0.3)\r\n K = 3\r\n for batch_input, batch_label in processor:\r\n # 正常训练\r\n loss = model(batch_input, batch_label)\r\n loss.backward() # 反向传播,得到正常的grad\r\n pgd.backup_grad()\r\n # 对抗训练\r\n for t in range(K):\r\n pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.processor\r\n if t != K-1:\r\n model.zero_grad()\r\n else:\r\n pgd.restore_grad()\r\n loss_adv = model(batch_input, batch_label)\r\n loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度\r\n pgd.restore() # 恢复embedding参数\r\n # 梯度下降,更新参数\r\n optimizer.step()\r\n model.zero_grad()\r\n '''\r\n\r\n def __init__(self, model, emb_name, epsilon=1., alpha=0.3):\r\n # emb_name这个参数要换成你模型中embedding的参数名\r\n self.model = model\r\n self.emb_name = emb_name\r\n self.epsilon = epsilon\r\n self.alpha = alpha\r\n self.emb_backup = {}\r\n self.grad_backup = {}\r\n\r\n def attack(self, is_first_attack=False):\r\n for name, param in self.model.named_parameters():\r\n if param.requires_grad and self.emb_name in name:\r\n if is_first_attack:\r\n self.emb_backup[name] = param.data.clone()\r\n norm = torch.norm(param.grad)\r\n if norm != 0:\r\n r_at = self.alpha * param.grad / norm\r\n param.data.add_(r_at)\r\n param.data = self.project(name, param.data, self.epsilon)\r\n\r\n def restore(self):\r\n for name, param in self.model.named_parameters():\r\n if param.requires_grad and self.emb_name in name:\r\n assert name in self.emb_backup\r\n param.data = self.emb_backup[name]\r\n self.emb_backup = {}\r\n\r\n def project(self, param_name, param_data, epsilon):\r\n r = param_data - self.emb_backup[param_name]\r\n if torch.norm(r) > epsilon:\r\n r = epsilon * r / torch.norm(r)\r\n return self.emb_backup[param_name] + r\r\n\r\n def backup_grad(self):\r\n for name, param in self.model.named_parameters():\r\n if param.requires_grad:\r\n self.grad_backup[name] = param.grad.clone()\r\n\r\n def restore_grad(self):\r\n for name, param in self.model.named_parameters():\r\n if param.requires_grad:\r\n param.grad = self.grad_backup[name]\r\n\r\n\r\nclass FreeLB(object):\r\n '''\r\n https://arxiv.org/pdf/1909.11764.pdf\r\n '''\r\n\r\n def __init__(self, adv_K, adv_lr, adv_init_mag, adv_max_norm=0., adv_norm_type='l2'):\r\n self.adv_K = adv_K\r\n self.adv_lr = adv_lr\r\n self.adv_max_norm = adv_max_norm\r\n self.adv_init_mag = adv_init_mag\r\n self.adv_norm_type = adv_norm_type\r\n\r\n def attack(self, model, inputs, gradient_accumulation_steps=1):\r\n input_ids = inputs['input_ids']\r\n if isinstance(model, torch.nn.DataParallel):\r\n embeds_init = model.module.bert.embeddings.word_embeddings(input_ids)\r\n else:\r\n embeds_init = model.bert.embeddings.word_embeddings(input_ids)\r\n if self.adv_init_mag > 0:\r\n input_mask = inputs['attention_mask'].to(embeds_init)\r\n input_lengths = torch.sum(input_mask, 1)\r\n if self.adv_norm_type == \"l2\":\r\n delta = torch.zeros_like(embeds_init).uniform_(-1, 1) * input_mask.unsqueeze(2)\r\n dims = input_lengths * embeds_init.size(-1)\r\n mag = self.adv_init_mag / torch.sqrt(dims)\r\n delta = (delta * mag.view(-1, 1, 1)).detach()\r\n elif self.adv_norm_type == \"linf\":\r\n delta = torch.zeros_like(embeds_init).uniform_(-self.adv_init_mag,\r\n self.adv_init_mag) * input_mask.unsqueeze(2)\r\n else:\r\n delta = torch.zeros_like(embeds_init)\r\n for astep in range(self.adv_K):\r\n delta.requires_grad_()\r\n inputs['inputs_embeds'] = delta + embeds_init\r\n inputs['input_ids'] = None\r\n outputs = model(**inputs)\r\n loss, logits = outputs[:2] # model outputs are always tuple in transformers (see doc)\r\n loss = loss.mean() # mean() to average on multi-gpu parallel training\r\n if gradient_accumulation_steps > 1:\r\n loss = loss / gradient_accumulation_steps\r\n loss.backward()\r\n delta_grad = delta.grad.clone().detach()\r\n if self.adv_norm_type == \"l2\":\r\n denorm = torch.norm(delta_grad.view(delta_grad.size(0), -1), dim=1).view(-1, 1, 1)\r\n denorm = torch.clamp(denorm, min=1e-8)\r\n delta = (delta + self.adv_lr * delta_grad / denorm).detach()\r\n if self.adv_max_norm > 0:\r\n delta_norm = torch.norm(delta.view(delta.size(0), -1).float(), p=2, dim=1).detach()\r\n exceed_mask = (delta_norm > self.adv_max_norm).to(embeds_init)\r\n reweights = (self.adv_max_norm / delta_norm * exceed_mask + (1 - exceed_mask)).view(-1, 1, 1)\r\n delta = (delta * reweights).detach()\r\n elif self.adv_norm_type == \"linf\":\r\n denorm = torch.norm(delta_grad.view(delta_grad.size(0), -1), dim=1, p=float(\"inf\")).view(-1, 1, 1)\r\n denorm = torch.clamp(denorm, min=1e-8)\r\n delta = (delta + self.adv_lr * delta_grad / denorm).detach()\r\n if self.adv_max_norm > 0:\r\n delta = torch.clamp(delta, -self.adv_max_norm, self.adv_max_norm).detach()\r\n else:\r\n raise ValueError(\"Norm type {} not specified.\".format(self.adv_norm_type))\r\n if isinstance(model, torch.nn.DataParallel):\r\n embeds_init = model.module.bert.embeddings.word_embeddings(input_ids)\r\n else:\r\n embeds_init = model.bert.embeddings.word_embeddings(input_ids)\r\n return loss\r\n\r\n\r\nclass ALUM(object):\r\n '''\r\n Adversarial Training for Large Neural Language Models\r\n '''\r\n\r\n def __init__(self, adv_lr, adv_K, adv_var=1e-5, adv_alpha=1.0, adv_gamma=1e-6, adv_norm_type='inf'):\r\n self.adv_var = adv_var\r\n self.adv_K = adv_K\r\n self.adv_lr = adv_lr\r\n self.adv_gamma = adv_gamma\r\n self.adv_alpha = adv_alpha\r\n self.adv_norm_type = adv_norm_type\r\n self.kl = KL()\r\n\r\n def adv_project(self, grad, eps=1e-6):\r\n if self.adv_norm_type == 'l2':\r\n direction = grad / (torch.norm(grad, dim=-1, keepdim=True) + eps)\r\n elif self.adv_norm_type == 'l1':\r\n direction = grad.sign()\r\n else:\r\n direction = grad / (grad.abs().max(-1, keepdim=True)[0] + eps)\r\n return direction\r\n\r\n def attack(self, model, inputs, gradient_accumulation_steps=1):\r\n input_ids = inputs['input_ids']\r\n outputs = model(**inputs)\r\n loss, logits = outputs[:2]\r\n if isinstance(model, torch.nn.DataParallel):\r\n embeds_init = model.module.bert.embeddings.word_embeddings(input_ids)\r\n else:\r\n embeds_init = model.bert.embeddings.word_embeddings(input_ids)\r\n input_mask = inputs['attention_mask'].to(embeds_init)\r\n delta = torch.zeros_like(embeds_init).normal_(0, 1) * self.adv_var * input_mask.unsqueeze(2)\r\n for astep in range(self.adv_K):\r\n delta.requires_grad_()\r\n inputs['inputs_embeds'] = delta + embeds_init\r\n inputs['input_ids'] = None\r\n adv_outputs = model(**inputs)\r\n adv_logits = adv_outputs[1] # model outputs are always tuple in transformers (see doc)\r\n\r\n adv_loss = self.kl(adv_logits, logits.detach())\r\n delta_grad, = torch.autograd.grad(adv_loss, delta, only_inputs=True)\r\n adv_direct = self.adv_project(delta_grad, eps=self.adv_gamma)\r\n\r\n inputs['inputs_embeds'] = embeds_init + adv_direct * self.adv_lr\r\n outputs = model(**inputs)\r\n adv_loss_f = self.kl(outputs[1], logits.detach())\r\n adv_loss_b = self.kl(logits, outputs[1].detach())\r\n adv_loss = (adv_loss_f + adv_loss_b) * self.adv_alpha\r\n loss = loss + adv_loss\r\n loss = loss.mean() # mean() to average on multi-gpu parallel training\r\n if gradient_accumulation_steps > 1:\r\n loss = loss / gradient_accumulation_steps\r\n loss.backward()\r\n if isinstance(model, torch.nn.DataParallel):\r\n embeds_init = model.module.bert.embeddings.word_embeddings(input_ids)\r\n else:\r\n embeds_init = model.bert.embeddings.word_embeddings(input_ids)\r\n return loss\r\n","sub_path":"torchblocks/callback/adversarial.py","file_name":"adversarial.py","file_ext":"py","file_size_in_byte":10673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"104250067","text":"class Solution:\n def calculate(self, s: str) -> int:\n \"\"\"\n Time Complexity #\n The time complexity of the above algorithm will be O(N) where ‘N’\n is the number of characters in the input string.\n\n Space Complexity #\n O(1): Since we are only using variables such as res which occupies\n constant space.\n \"\"\"\n i = 0\n cur = prev = res = 0\n cur_operation = '+'\n\n while i < len(s):\n cur_char = s[i]\n\n # if cur_char is a digit\n if cur_char.isdigit():\n while i < len(s) and s[i].isdigit():\n cur = cur * 10 + int(s[i])\n i += 1\n i -= 1\n\n if cur_operation == '+':\n res += cur\n prev = cur\n elif cur_operation == '-':\n res -= cur\n prev = -cur\n elif cur_operation == '*':\n res -= prev\n res += prev * cur\n prev = cur * prev\n else:\n res -= prev\n res += int(prev / cur)\n prev = int(prev/cur)\n\n cur = 0\n # If any operator\n elif cur_char != ' ':\n cur_operation = cur_char\n i += 1\n return res\n\n\nclass Solution:\n def calculate(self, s: str) -> int:\n \"\"\"\n Time Complexity #\n The time complexity of the above algorithm will be O(N) where ‘N’\n is the number of characters in the input string.\n\n Space Complexity #\n O(D): If D is the total number of digits in the string, then space\n complexity will be O(D). As stack should store all digits.\n \"\"\"\n\n if not s:\n return \"0\"\n num, stack, sign = 0, [], '+'\n operators = ['+', '-', '*', '/']\n for i in range(len(s)):\n if s[i].isdigit():\n num = num * 10 + int(s[i]) # Could be a number with more than 1 digit\n if s[i] in operators or i == len(s)-1:\n if sign == '+':\n stack.append(num)\n elif sign == '-':\n stack.append(-num)\n elif sign == '*':\n stack.append(stack.pop()*num)\n elif sign == '/':\n stack.append(int(stack.pop()/num))\n num = 0\n sign = s[i]\n return sum(stack)\n","sub_path":"Problems/Leetcode/227_BasicCalculatorII.py","file_name":"227_BasicCalculatorII.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"416213997","text":"# %load q05_replace_missing_values/build.py\nimport pandas as pd\nimport numpy as np\nimport sys\nimport os\n#sys.path.append(os.path.join(os.path.dirname(os.curdir)))\nfrom greyatomlib.pandas_guided_project.q04_mapping.build import q04_mapping\n\npath1 = 'data/excel-comp-data.xlsx'\npath2 = 'data/scraped.csv'\n# def q05_replace_missing_values(path1,path2):\ndef q01_load_data(path):\n data = pd.read_excel(path)\n\n data['state'] = data.state.str.lower()\n data['total'] = data[['Jan','Feb','Mar']].sum(axis=1)\n\n return data\n\ndef q02_append_row(path):\n\n df = q01_load_data(path)\n sum_row = df[['Jan', 'Feb', 'Mar', 'total']].sum()\n df_sum = pd.DataFrame(data=sum_row).T\n df_final = df.append(df_sum,ignore_index = True)\n return df_final\ndef q04_mapping(path1,path2):\n \n a = pd.read_excel(path1)\n b = pd.read_csv(path2)\n b['United States of America'] = b['United States of America'].astype(str).str.lower()\n b['US'] = b['US'].astype(str)\n mapping = b.set_index('United States of America').to_dict()['US']\n mapping['mississipi']=mapping.pop('mississippi')\n mapping['tenessee']=mapping.pop('tennessee')\n new_df = q02_append_row(path1)\n# new['abbr'] = np.nan\n new_df.insert(loc = 6,column = 'abbr', value = np.nan)\n new_df['state'] = new_df['state'].map(mapping)\n return new_df\ndef q05_replace_missing_values(path1,path2):\n df_replace = q04_mapping(path1,path2)\n df_mississippi = pd.DataFrame(df_replace.iloc[6,:]).T\n df_tenessee = pd.DataFrame(df_replace.iloc[10,:]).T\n df_tenessee = df_tenessee.replace(np.nan,'TN')\n df_mississippi = df_mississippi.replace(np.nan,'MS')\n df_replace.iloc[6,6] = 'MS'\n df_replace.iloc[10,6] = 'TN'\n return df_replace\n\nq05_replace_missing_values(path1,path2)\n#print(q05_replace_missing_values(path1,path2).shape)\n\n\n","sub_path":"q05_replace_missing_values/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"236757013","text":"# coding=utf8\n\"\"\"\nbuild linear regression model\n\"\"\"\nfrom torch.nn import Module\nfrom torch.nn import Linear\nfrom torch.utils.data import DataLoader\nfrom src.data_prepare.DataRegression import DataRegression\nimport torch\n\ntotal_epoch = 10\nbatch_size = 10\nlearn_rate = 0.05\nbatch_print = 2\n\n\nclass LinearRegression(Module):\n def __init__(self, in_features, out_features):\n super(LinearRegression, self).__init__()\n self.fc = Linear(in_features, out_features)\n\n def forward(self, x):\n x = self.fc(x)\n return x\n\n\nclass RawLinearRegression:\n def __init__(self, in_features, out_features):\n self.weights = torch.randn(in_features, dtype=torch.float) # 3*1\n self.bias = torch.randn(out_features, dtype=torch.float) # 1*1\n\n def forward(self, x):\n # 记住一定要加偏置项\n pred_y = torch.matmul(x, self.weights) + self.bias # 100*3 3*1\n return pred_y\n\n @staticmethod\n def loss(pred_y, true_y):\n loss = torch.mean(torch.pow(pred_y - true_y, 2))\n return torch.sqrt(loss)\n\n def sgd(self, pred_y, true_y, true_x, lr=0.01):\n sample_size = true_y.size()[0]\n weights_grad = torch.matmul(true_y - pred_y, true_x)\n self.weights.add_(lr*weights_grad/sample_size)\n bias_grad = torch.sum(true_y - pred_y)\n self.bias.add_(lr*bias_grad/sample_size)\n\n\ndef train_raw():\n # prepare train data 准备数据集\n regression_helper = RawLinearRegression(3, 1)\n data_regression = DataRegression()\n train_data_loader = DataLoader(data_regression, batch_size)\n train_loss = []\n for i in range(1, total_epoch + 1):\n for data_x, data_y in train_data_loader:\n pred_y = regression_helper.forward(data_x)\n loss = RawLinearRegression.loss(pred_y, data_y)\n regression_helper.sgd(pred_y, data_y, data_x, learn_rate)\n train_loss.append(loss)\n if i % batch_print == 0:\n print(\"Loss = \", train_loss[-1])\n\n print(regression_helper.weights, regression_helper.bias)\n\n\ndef train_auto_grad():\n # prepare train data 准备数据集\n data_regression = DataRegression()\n train_data_loader = DataLoader(data_regression, batch_size)\n\n # prepare loss function 定义损失需要有括号\n loss_fn = torch.nn.MSELoss()\n\n # prepare model\n # linear_regression = LinearRegression(4, 1)\n linear_regression = LinearRegression(3, 1)\n\n # optimizer 优化器需要接受模型的参数\n optimizer = torch.optim.SGD(linear_regression.parameters(), lr=learn_rate)\n\n train_loss = []\n loss = None\n for i in range(1, total_epoch+1):\n for ind, batch_data in enumerate(train_data_loader):\n data_x = batch_data[0]\n data_y = batch_data[1].view(1, -1)\n # 注意顺序 优化器梯度归零 自动会将模型梯度归零\n optimizer.zero_grad()\n\n # 损失反向传播 用框架定义的损失函数\n predict_y = linear_regression(data_x).view(1, -1)\n loss = loss_fn(predict_y, data_y)\n loss.backward()\n\n # 优化器走一步 更新模型的参数\n optimizer.step()\n train_loss.append(loss)\n if i % batch_print == 0:\n print(\"Loss = \", loss)\n for f in linear_regression.parameters():\n print(f)\n\n # 在验证集的效果\n \n\nif __name__ == '__main__':\n train_raw()\n","sub_path":"src/algorithm/LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"646961332","text":"import os\nimport sys\nimport datafile \nimport nasm\nimport symboltable\nimport pickle\n\nimport removejumps\n\ndef checkbranching(name) :\n if name in ['jump','print','append', 'call', 'goto', 'label:', 'jg', 'jl', 'jle', 'jge' ,'je', 'jne','ret','ret1','ret2','printstr','pusharg2','pushaddr', 'fopen', 'fwrite', 'fread', 'fclose', 'deletetail']:\n return True\n else:\n return False\ndef default(name1,name2):\n if name1 == \"pusharg\" and name2 == \"DEFAULT\":\n return False\n else:\n return True\n\ndef Size(type1):\n if type1==\"INT\":\n return 4\n elif type1==\"CHAR\":\n return 2\n elif type1==\"BYTE\":\n return 1\n elif type1==\"SHORT\":\n return 2\n elif type1==\"LONG\":\n return 8\n elif type1==\"FLOAT\":\n return 4\n elif type1==\"DOUBLE\":\n return 8\n elif type1==\"POINTER\":\n return 4\n elif type1==\"BOOL\":\n return 4\n elif \"ARRAY\" in type1:\n return Size(type1.replace(\"ARRAY\", ''))\n elif \"LIST\" in type1:\n return Size(type1.replace(\"LIST@\", ''))\n else:\n return 10\n\ndef checkvariable(name):\n try:\n int(name)\n return False\n except:\n try:\n float(name)\n return False\n except:\n if name == \"DEFAULT\":\n return False\n else:\n return True\n\nif __name__ == \"__main__\" :\n filename = sys.argv[1]\n with open(filename) as f:\n # RESOLVES jumps to jumps\n data = removejumps.removejumps(f.readlines())\n index = 0\n flag = 0\n scopefunc = 0\n arglength = 8\n locallength = -4\n symbolTable = pickle.load(open(\"rootScope.p\", \"rb\"))\n mainScope = symbolTable.singletonObject.functions['main'][0]\n currentScope = mainScope\n for line in data:\n index = index +1\n listvar = line.split(' ')\n node = [index]+[None]*4\n node[1:len(listvar)+1] = listvar\n node[len(listvar)] = node[len(listvar)].replace('\\n','')\n if not checkbranching(node[1]):\n for i in range(2 , (len(listvar) + 1)):\n if(checkvariable(node[i]) and default(node[1],node[2]) ):\n if(flag):\n if node[i] not in datafile.memorymap[scopefunc].keys():\n if node[1] == 'arg':\n datafile.memorymap[scopefunc][node[2]] = '['+str(arglength) + ' + ebp]'\n try:\n node[3]\n datafile.meta[node[2]] = node[3] \n except:\n pass\n arglength = arglength + Size(currentScope.LookUpVarSize(node[2])[1])\n elif (node[i] not in datafile.globalsection and node[i] not in datafile.setofList and node[i] not in datafile.setofarray and node[i] not in datafile.setofString):\n datafile.memorymap[scopefunc][node[i]] = '['+ str(locallength) + ' + ebp]'\n if node[1] == \"ARRAY\":\n if node[4] != None:\n locallength = locallength - Size(currentScope.LookUpVarSize(node[2])[1])*int(node[3])*int(node[4])\n else:\n locallength = locallength - Size(currentScope.LookUpVarSize(node[2])[1])*int(node[3])\n elif node[1] == \"LIST\":\n if node[3] != None:\n datafile.Listoffset[node[2]] = {}\n for m in range(0,int(node[3])):\n datafile.Listoffset[node[2]][str(m)] = 0 \n else: \n locallength = locallength - Size(currentScope.LookUpVarSize(node[2])[1])*int(currentScope.listdict[node[2]])\n datafile.Listoffset[node[2]] = 0\n else:\n locallength = locallength - Size(currentScope.LookUpVarSize(node[i])[1])\n else:\n if node[1] == \"ARRAY\":\n try:\n node[4]\n size = Size(currentScope.LookUpVarSize(node[2])[1])*int(node[3])*int(node[4])\n datafile.setofarray[node[i]] = size\n except:\n size = Size(currentScope.LookUpVarSize(node[2])[1])*int(node[3])\n datafile.setofarray[node[i]] = size\n elif node[1] == \"LIST\":\n if node[3] != None:\n size = Size(currentScope.LookUpVarSize(node[2])[1])\n scope = currentScope.LookUpListScope(node[2])\n datafile.Listoffset[node[2]] = {}\n datafile.setofList[node[i]] = {}\n for m in range(0,int(node[3])):\n datafile.Listoffset[node[2]][str(m)] = 0\n if m == 0:\n datafile.setofList[node[i]][str(m)] = size * int(scope.listdict[node[2]][str(m)])\n else: \n datafile.setofList[node[i]][str(m)] = size * int(scope.listdict[node[2]][str(m)]) + datafile.setofList[node[i]][str(m-1)] \n else: \n size = Size(currentScope.LookUpVarSize(node[2])[1])\n scope = currentScope.LookUpListScope(node[2])\n datafile.Listoffset[node[2]] = 0\n datafile.setofList[node[i]] = size * int(scope.listdict[node[2]])\n else:\n datafile.globalsection.add(node[i])\n datafile.allvariables.add(node[i])\n\n if node[1] == 'label:' and node[2][0:4] == \"func\":\n flag = 1\n scopefunc = node[2]\n newScope = currentScope.parent.functions[node[2][7:]][0]\n newScope.returnScope = currentScope\n currentScope = newScope\n datafile.memorymap[scopefunc] = {}\n if (node[1] == 'printstr') :\n for i in range(0,5):\n if node[i]:\n pass\n else:\n node[i] = ''\n datafile.setofString['str'+str(node[0])] = ' '.join(node[2:]).strip()\n if node[1] == 'ret':\n flag = 0\n datafile.numberofarguments[scopefunc] = -arglength\n datafile.numberofvariables[scopefunc] = -locallength\n scopefunc = 0\n arglength = 8\n locallength = -4\n currentScope = currentScope.returnScope\n if node[1] == 'cmp':\n datafile.instruction.append(datafile.a3acinst(int(node[0]),node[2],node[1],node[3],node[1],None))\n continue\n if node[3] == '`':\n datafile.instruction.append(datafile.a3acinst(int(node[0]),node[2],node[1],None,'Unary',node[4]))\n continue\n if node[1] in ['je','jne','jg','jge','jl','jle','goto','pusharg','pusharg2','pushaddr','call','label:','print','printstr','read','ret', 'get', 'ret1', 'ret2']:\n if node[1] == 'pusharg2':\n for i in range(0,5):\n if node[i]:\n pass\n else:\n node[i] = ''\n datafile.setofString['str'+str(node[0])] = ' '.join(node[2:]).strip()\n datafile.instruction.append(datafile.a3acinst(int(node[0]),node[2],node[1],node[3],node[1],node[2]))\n continue\n datafile.instruction.append(datafile.a3acinst(int(node[0]),node[2],node[1],node[3],node[1],node[4]))\n\n nasm.asm()\n\n # Useful Debug information that can be printed\n # print datafile.allvariables, \"all variables\"\n # print datafile.globalsection, 'globalsection'\n # print datafile.setofarray, 'setofarray'\n \n # for inst in datafile.instruction:\n # print inst.instnumber,inst.type,inst.op1,inst.op2,inst.operator, inst.out \n # print datafile.memorymap, 'memorymap'\n # print datafile.numberofarguments, 'numberofarguments'\n # print datafile.numberofvariables, 'numberofvariabels' \n\n\n\n\n\n\n\n","sub_path":"milestone4/irparser.py","file_name":"irparser.py","file_ext":"py","file_size_in_byte":8634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"71944118","text":"from SlackClient import SlackClient\n\nclass SlackGroups(SlackClient):\n\n def __init__(self):\n\n self.channel = None\n self.name = None\n self.team_id = None\n self.validate = None\n\n self.count = None\n self.inclusive = None\n self.latest = None\n self.oldest = None\n self.unreads = None\n self.user = None\n\n self.cursor = None\n self.exclude_archived = None\n self.exclude_members = None\n self.limit = None\n\n self.ts = None\n\n self.purpose = None\n\n self.topic = None\n\n\n def generate_queries(self):\n\n body = {}\n\n if self.topic != None:\n body['topic'] = self.topic\n if self.purpose != None:\n body['purpose'] = self.purpose\n if self.ts != None:\n body['ts'] = self.ts\n if self.cursor != None:\n body['cursor'] = self.cursor\n if self.exclude_archived != None:\n body['exclude_archived'] = self.exclude_archived\n if self.exclude_members != None:\n body['exclude_members'] = self.exclude_members\n if self.limit != None:\n body['limit'] = self.limit\n if self.user != None:\n body['user'] = self.user\n if self.count != None:\n body['count'] = self.count\n if self.inclusive != None:\n body['inclusive'] = self.inclusive\n if self.latest != None:\n body['latest'] = self.latest\n if self.oldest != None:\n body['oldest'] = self.oldest\n if self.unreads != None:\n body['unreads'] = self.unreads\n if self.name != None:\n body['name'] = self.name\n if self.team_id != None:\n body['team_id'] = self.team_id\n if self.validate != None:\n body['validate'] = self.validate\n if self.channel != None:\n body['channel'] = self.channel\n return body\n\n def clear_queries(self):\n\n self.count = None\n self.inclusive = None\n self.latest = None\n self.oldest = None\n self.unreads = None\n self.name = None\n self.team_id = None\n self.validate = None\n self.channel = None\n self.ts = None\n self.purpose = None\n self.topic = None","sub_path":"SlackGroups.py","file_name":"SlackGroups.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"166931989","text":"import re\n\ndef comparewords(word1, word2):\n kof = 0\n if word1[0]==word2[0]:\n kof+=10\n if word1[-1]==word2[-1]:\n kof+=10\n L1 = len(word1)\n L2 = len(word2)\n kof += (L1/L2)*30 if L1<= L2 else (L2/L1)*30\n \n mn1 = set(word1)\n mn2 = set(word2)\n N1 = len(mn1|mn2)\n N2 = len(mn1&mn2)\n \n kof += (N2/N1)*50\n \n return kof\n\n\n\ndef find_word(message):\n words = re.findall(r\"[\\w]+\", message.lower())[::-1]\n \n arr = [[] for i in range(len(words)) ]\n for i in range(len(words)):\n for j in range(len(words)):\n if i!=j:\n arr[i] += [comparewords(words[i],words[j])]\n else:\n arr[i] += [0]\n \n means = [ sum(x)/len(x) for x in arr ]\n maxindex = means.index(max(means))\n return words[maxindex]\n\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert find_word(\"Speak friend and enter.\") == \"friend\", \"Friend\"\n assert find_word(\"Beard and Bread\") == \"bread\", \"Bread is Beard\"\n assert find_word(\"The Doors of Durin, Lord of Moria. Speak friend and enter. \"\n \"I Narvi made them. Celebrimbor of Hollin drew these signs\") == \"durin\", \"Durin\"\n assert find_word(\"Aoccdrnig to a rscheearch at Cmabrigde Uinervtisy.\"\n \" According to a researcher at Cambridge University.\") == \"according\", \"Research\"\n assert find_word(\"One, two, two, three, three, three.\") == \"three\", \"Repeating\"\n","sub_path":"problems/Checkio/Electronic Station/MoriaDors.py","file_name":"MoriaDors.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"496819310","text":"from flask import Flask\napplication = Flask(__name__)\n\n@application.route(\"/\")\n#def hello():\n# return \"Hello World!123\"\ndef test(x): #program does nothing as written\n x = 5\n return x \n\nif __name__ == \"__main__\":\n application.run()\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"208069651","text":"\"\"\" messageme.web.lib.webHelpers\n\n This module provides various helper functions for the MessageMe system's\n \"web\" application.\n\"\"\"\nfrom messageme.core.api import users\n\nfrom messageme.core.api.exceptions import InvalidSessionException\n\n#############################################################################\n\ndef is_logged_in(request):\n \"\"\" Returns True if and only if the user is currently logged in.\n\n 'request' is the HttpRequest object passed to a view function. We\n return True if and only if the request includes a cookie which matches\n a current active session.\n\n Note that an ad-hoc user is considered not to be logged in.\n \"\"\"\n if \"mm_session\" not in request.COOKIES:\n return False\n\n try:\n session = request.COOKIES['mm_session']\n user = users.get(session)\n except InvalidSessionException:\n return False\n\n if user['ad_hoc']:\n return False\n else:\n return True\n\n","sub_path":"messageme/web/lib/webHelpers.py","file_name":"webHelpers.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"534897199","text":"# code to read simulated data from a run\nimport matplotlib as mpl\nmpl.use('Agg')\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os\nimport subprocess\nos.chdir('/home/bepp/michaudp/rise/runtime')\n# scenario\nscenarios = ['copay95','copay75','copay50','copay25']\nscnnames = ['$\\overline{m}(95)$','$\\overline{m}(75)$','$\\overline{m}(50)$','$\\overline{m}(25)$']\ncopay = [0.95,0.75,0.5,0.25]\nenames = ['e(95)','e(75)','e(50)']\nelatex = ['$\\epsilon_p(95)$','$\\epsilon_p(75)$','$\\epsilon_p(50)$']\n# setting directory\n# variable for which plots are required by hlth\nvarnames = ['id','age','byear','wealth','work','claim','income','insurance','ame',\n 'cons', 'dead','health','base_health','oop','medexp','value','tr']\nagenames = ['30-34']\nfor i in range(35,85,5):\n\tlab = [str(i)+'-'+str(i+4)]\n\tagenames = agenames + lab\n\nirun = 0\nss = 0\nresult =pd.DataFrame(index=agenames,columns=scenarios)\n\nfor scn in scenarios :\n\t# running scenario\n\tif (irun==1) :\n\t\t# Calling executable\n\t\tfortran = '/opt/openmpi/intel/bin/mpirun --hostfile mpi_hosts.txt ./generate '+scn\n\t\tps = subprocess.Popen(fortran,shell=True,stdin=subprocess.PIPE)\n\t\tps.communicate() #now wait\n\t# load data\n\tdf = pd.read_csv('../output/simulation/simulated_'+scn+'.csv',header=None,names=varnames,sep=',',index_col=False)\n\tdf = df[(df['age']>=30) & (df['age']<85)]\n\tdf['age5'] = pd.cut(df['age'], 11,labels=agenames)\n\tdf['copay'] = copay[ss]\n\tresult[scn] = df.groupby(['age5'])['medexp'].mean()\n\tif (ss!=0):\n\t\tlastscn = scenarios[ss-1]\n\t\tc = copay[ss-1]*100\n\t\tvar = 'e('+str(int(c))+')'\n\t\tdp = (copay[ss] - copay[ss-1])/(copay[ss] + copay[ss-1])\n\t\tresult[var] = (result[scn] - result[lastscn])/(result[scn]+result[lastscn])\n\t\tresult[var] = result[var]/dp\n\tss +=1\n\n\ntex = open('../tables/table-price-elasticity.tex', \"w\")\ncol = 'l'\nfor r in range(0,len(scenarios)):\n\tcol = col + 'r'\nfor r in range(0,len(enames)):\n\tcol = col + 'r'\ntex.write('\\\\begin{tabular}{'+col+'} \\n')\ntex.write('\\hline\\hline \\n')\nbuff = 'Age & '\nii = 0\nfor i in scenarios :\n\tbuff = buff + scnnames[ii]\n\tbuff = buff + ' & '\n\tii +=1\nii = 0\nfor i in enames :\n\tbuff = buff + elatex[ii]\n\tif (ii < len(enames)-1):\n\t\tbuff = buff + ' & '\n\t\tii +=1\nbuff = buff + ' \\\\\\ \\n'\ntex.write(buff)\ntex.write('\\hline \\n')\njj = 0\nfor j in agenames :\n\tbuff = j + ' & '\n\tii = 0\n\tfor i in scenarios:\n\t\tbuff = buff + str(round(result[i].loc[j],1))\n\t\tbuff = buff + ' & '\n\t\tii += 1\n\tii = 0\n\tfor i in enames:\n\t\tbuff = buff + str(round(result[i].loc[j],3))\n\t\tif (ii < len(enames)-1): buff = buff + ' & '\n\t\tii += 1\n\tbuff = buff + ' \\\\\\ \\n'\n\ttex.write(buff)\ntex.write('\\hline\\hline \\n')\ntex.write('\\\\end{tabular} \\n')\ntex.close()\n","sub_path":"py/tab5-price-elasticity.py","file_name":"tab5-price-elasticity.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"615014237","text":"import enum\nimport apac\n\nSCORE = {\n 0: 30, # TODO: This is a temporary fix for to avoid implementing a cost function\n 1: 16,\n 2: 13,\n 3: 12,\n 4: 11,\n 5: 10,\n 6: 9,\n 7: 7,\n 8: 5,\n 9: 4,\n 10: 3,\n 11: 2,\n 12: 1,\n}\n\nSCORE_8LANE = {\n 0: 30, # TODO: This is a temporary fix for to avoid implementing a cost function\n 1: 20,\n 2: 17,\n 3: 16,\n 4: 15,\n 5: 14,\n 6: 13,\n 7: 12,\n 8: 11,\n 9: 9,\n 10: 7,\n 11: 6,\n 12: 5,\n 13: 4,\n 14: 3,\n 15: 2,\n 16: 1\n}\n\nSWIMMERS = ['Miles Huang', 'Curtis Wong', 'King Wah', 'Justin Choi', 'Aaron Wu', 'Frank Zhou',\n 'Alan Wang', 'Alan Sun', 'Bernard Ip', 'Kan KikuchiYuan', 'Jerry Zheng', 'Aaron Sun']\n\n\n@enum.unique\nclass SwimmingRace(enum.Enum):\n FR50m = 1\n FR100m = 2\n FR200m = 3\n FR400m = 4\n BR50m = 5\n BR100m = 6\n BA100m = 7\n BA50m = 8\n FLY50m = 9\n FLY100m = 10\n IM100m = 11\n IM200m = 12\n FRRelay4P50 = 13\n FRRelay4P100 = 14\n IMRelay4P50_FR = 15\n IMRelay4P50_BR = 16\n IMRelay4P50_BA = 17\n IMRelay4P50_FLY = 18\n\n\nclass Filter:\n ranksuite = apac.APAC(year=2017, prelims=0) # FIXME: This is not good architecture....\n skillFinder = {'FRRelay4P50': 'FR50m',\n 'FRRelay4P100': 'FR100m',\n 'IMRelay4P50_FR': 'FR50m',\n 'IMRelay4P50_BR': 'BR50m',\n 'IMRelay4P50_BA': 'BA50m',\n 'IMRelay4P50_FLY': 'FLY50m'}\n\n @staticmethod\n def rank(place):\n # this needs to be changed based on the number of lanes in the pool\n # as well as the configuration of the finals.\n if place <= 12:\n return SCORE[place]\n else:\n return 0\n\n @staticmethod\n def takeMax(df, name, event):\n \"\"\"\n df: DataFrame that contains all of the swimmer's data.\n name: The name of the swimmer that is being scored.\n event: The corresponding event taht is being score.\n This function compiles the data from the swimming table together and\n outputs a list with the name of the event and the swimmers score in that event.\n It calculates the score using the maximum function, considering all of\n the swims and taking the best one.\n :return: [event (str), score (int), time (float)]\n \"\"\"\n racesForTargetSwimmer = df[(df['Event'] == event) & (df['Name'] == name)]\n if len(racesForTargetSwimmer) == 0: # Relay event\n points, time = Filter.assessSkill(df, name, event)\n return [event, points, time]\n else:\n if len(racesForTargetSwimmer) == 1:\n time = racesForTargetSwimmer.iloc[0]['Time']\n place = Filter.ranksuite.compare(event, time)\n return [event, Filter.rank(place), time]\n else:\n minimum = racesForTargetSwimmer.loc[racesForTargetSwimmer['Time'].idxmin()]\n # COST FUNCTION RIGHT HERE\n time = minimum.loc['Time']\n place = Filter.ranksuite.compare(event, time)\n if place == 0: # Relay event if the swimmer has swam a relay event before\n p, time = Filter.assessSkill(df, name, event)\n return [event, p, time]\n return [event, Filter.rank(place), time]\n \n @staticmethod\n def takeAverage(df, name, event):\n pass\n \n @staticmethod\n def takeMin(df, name, event):\n pass\n \n @staticmethod\n def takeRecent(df, name, event):\n pass\n\n @staticmethod\n def assessSkill(df, name, event):\n \"\"\"\n event (str): the name of the event that does not have a rank.\n If you have an individual time for the FR50 or the FR100 and never \n swam in a relay before your point contribution to that relay would be \n you score in that event / 2. If you have a time in one of the 50IMs that could \n also be used when you are being seeded into the IMRelay.\n\n Rankings in relays cannot be converted and used as individual time. \n :returns: the amount of points that the swimmer would get in that event based on their skills.\n If they do not have that skill then a large negative number is used to denote the large risk / cost\n putting that swimmer in this event implies.\n \"\"\"\n if event in Filter.skillFinder.keys():\n skill = Filter.skillFinder[event]\n swimmerEvent = df[(df['Event'] == skill) & (df['Name'] == name)]\n time = swimmerEvent['Time'].iloc[0]\n if swimmerEvent.shape[0] == 0:\n # if the person does not possess this event and also does not have the skill\n return -1000, 'NT'\n place = Filter.ranksuite.compare(skill, time)\n return Filter.rank(place) / 4, time\n else:\n return -1000, 'NT'\n\n\nif __name__ == '__main__':\n print(Filter.assessSkill(0, 0, 0))\n","sub_path":"swimmer.py","file_name":"swimmer.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"163684153","text":"#!/usr/bin/python\n\n\n# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.\n#\n\n\"\"\"\nThis file contains utility functions for sending prouter and job objectlogs\nvia sandesh\n\"\"\"\n\nimport uuid\nimport time\nimport json\nimport logging\nfrom job_manager.job_log_utils import JobLogUtils\nfrom job_manager.sandesh_utils import SandeshUtils\n\n\nclass ObjectLogUtil(object):\n\n def __init__(self, job_ctx):\n self.job_ctx = job_ctx\n self.results = dict()\n self.results['failed'] = False\n logging.basicConfig(level=logging.INFO)\n self.validate_job_ctx()\n\n self.job_log_util = JobLogUtils(\n sandesh_instance_id=str(\n uuid.uuid4()), config_args=json.dumps(\n job_ctx['config_args']))\n\n def validate_job_ctx(self):\n required_job_ctx_keys = [\n 'job_template_fqname', 'job_execution_id', 'config_args',\n 'job_input']\n for key in required_job_ctx_keys:\n if key not in self.job_ctx or self.job_ctx.get(key) is None:\n raise ValueError(\"Missing job context param: %s\" % key)\n\n def send_prouter_object_log(self, prouter_fqname, onboarding_state,\n os_version, serial_num):\n self.job_log_util.send_prouter_object_log(\n prouter_fqname,\n self.job_ctx['job_execution_id'],\n json.dumps(self.job_ctx['job_input']),\n self.job_ctx['job_template_fqname'],\n onboarding_state,\n os_version,\n serial_num)\n\n def send_job_object_log(self, message, status, job_result):\n self.job_log_util.send_job_log(\n self.job_ctx['job_template_fqname'],\n self.job_ctx['job_execution_id'],\n message,\n status,\n job_result)\n\n def close_sandesh_conn(self):\n try:\n sandesh_util = SandeshUtils(self.job_log_util.get_config_logger())\n sandesh_util.close_sandesh_connection()\n except Exception as e:\n logging.error(\"Unable to close sandesh connection: %s\", str(e))\n\n","sub_path":"src/config/fabric-ansible/ansible-playbooks/module_utils/sandesh_log_utils.py","file_name":"sandesh_log_utils.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"343338025","text":"largest= None\r\nsmallest= None\r\n\r\nwhile True:\r\n numb = input(\"Enter a number: \")\r\n if numb == \"done\" :\r\n break\r\n try:\r\n num=int(numb)\r\n except:\r\n print('Invalid input')\r\n continue\r\n\r\n if largest is None:\r\n smallest=num\r\n largest=num\r\n elif numlargest:\r\n largest=num\r\n\r\n\r\nprint(\"Maximum is\", largest)\r\nprint('Minimum is', smallest)\r\n","sub_path":"Assignment_5.2.py","file_name":"Assignment_5.2.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"421057374","text":"import os.path as osp\nimport torch\nimport os\nimport cv2\nfrom collections import OrderedDict\nimport numpy as np\nimport PIL.Image as PImage\nfrom PIL import ImageFont, ImageDraw\nfrom matplotlib import pyplot as plt\n\nbase_dir = osp.join(osp.dirname(__file__), 'usedres')\nrect_label = OrderedDict()\nlabel_path = 'train2.txt'\nimg_path = '/data/DataSet/ocr_sfz/train'\n\n\ndef get_label(label_path='/media/pjq/新加卷/DataSets/IDcard/Train_Labels.csv'):\n label_dic = OrderedDict()\n with open(label_path) as f:\n labels = f.readlines()\n for label in labels:\n label = label.strip().split(',')\n dic = {label[0]: {\n \"姓名\": label[1],\n \"民族\": label[2],\n \"性别\": label[3],\n \"出生\": [label[4], label[5], label[6]],\n \"住址\": label[7],\n \"身份证号\": label[8],\n \"签发机关\": label[9],\n \"有效时间\": label[10]}}\n label_dic.update(dic)\n return label_dic\n\n\ndef generator(value):\n # 姓名,民族,性别,年,月,日,签发机关,有效日期,地址,号码\n im = PImage.open(os.path.join(base_dir, 'empty.png'))\n name, nation, sex, year, mon, day, org, life, addr, idn = value[\"姓名\"], value[\"民族\"], \\\n value[\"性别\"], value[\"出生\"][0], value[\"出生\"][1], value[\"出生\"][\n 2], \\\n value[\"签发机关\"], value[\"有效时间\"], \\\n value[\"住址\"], value[\"身份证号\"]\n im = PImage.fromarray(changBackGround(im))\n name_font = ImageFont.truetype(os.path.join(base_dir, 'hei.ttf'), 75)\n other_font = ImageFont.truetype(os.path.join(base_dir, 'hei.ttf'), 75)\n bdate_font = ImageFont.truetype(os.path.join(base_dir, 'fzhei.ttf'), 75)\n id_font = ImageFont.truetype(os.path.join(base_dir, 'ocrb10bt.ttf'), 75)\n\n draw = ImageDraw.Draw(im)\n draw.text((290, 500), \"仅限BDCI比赛使用\", fill=0, font=bdate_font)\n draw.text((655, 690), name, fill=0, font=name_font)\n draw.text((655, 825), sex, fill=0, font=other_font)\n draw.text((1105, 825), nation, fill=0, font=other_font)\n draw.text((655, 960), year, fill=0, font=bdate_font)\n draw.text((935, 960), mon, fill=0, font=bdate_font)\n draw.text((1135, 960), day, fill=0, font=bdate_font)\n start = 0\n loc = 1110\n while start + 12 < len(addr):\n draw.text((655, loc), addr[start:start + 12], fill=0, font=other_font)\n start += 12\n loc += 90\n draw.text((655, loc), addr[start:], fill=0, font=other_font)\n draw.text((855, 1475), idn, fill=0, font=id_font)\n\n draw.text((295, 1920), \"仅限BDCI比赛使用\", fill=0, font=bdate_font)\n start = 0\n loc = 2750\n while start + 12 < len(addr):\n draw.text((1010, loc), org[start:start + 12], fill=0, font=other_font)\n start += 12\n loc += 90\n draw.text((1010, loc), org[start:], fill=0, font=other_font)\n draw.text((1010, 2892), life, fill=0, font=other_font)\n\n im = np.asarray(im)\n img_zheng, img_fan = split(im)\n return img_zheng, img_fan\n\n\ndef split(img):\n img_zheng = img[491:1676, 287:2173]\n img_fan = img[1908:3094, 282:2169]\n img_zheng = cv2.resize(img_zheng, (445, 281))\n\n img_fan = cv2.resize(img_fan, (445, 281))\n return img_zheng, img_fan\n\n\ndef changBackGround(img):\n img = img.convert('L')\n img = np.copy(np.asarray(img))\n width = img.shape[1]\n height = img.shape[0]\n img[0:height, 0:380][img[0:height, 0:380] < 80] = 255\n img[0:height, 2060:width][img[0:height, 2060:width] < 80] = 255\n img[0:2000, 0:width][img[0:2000, 0:width] < 80] = 255\n img[3000:height, 0:width][img[3000:height, 0:width] < 80] = 255\n return img\n\n\ndef ada_sharp(img, kernel_size=3, K=10, alph=0.2):\n img = img.astype(np.float32)\n kernel = np.zeros([kernel_size, kernel_size])\n filter_out = []\n lc = np.ones_like(img)[:, :, np.newaxis]\n for i in range(kernel_size):\n\n for j in range(kernel_size):\n kernel[i, j] = 1\n out = cv2.filter2D(img, -1, kernel)\n kernel[i, j] = 0\n filter_out.append(out[:, :, np.newaxis])\n for i in range(len(filter_out) - 1):\n for j in range(i + 1, len(filter_out)):\n lc += ((filter_out[i] - filter_out[j]) != 0).astype(np.uint8)\n lc = np.squeeze(lc)\n filter_out = np.concatenate(filter_out, -1)\n mean = np.mean(filter_out, -1)\n std = np.std(filter_out, -1)\n lc = (lc - np.min(lc)) / (np.max(lc) - np.min(lc))\n lv = (std - np.min(std)) / (np.max(std) - np.min(std))\n lamda = alph * lc + (1 - alph) * lv\n new = img + K * lamda * (img - mean)\n new = np.minimum(np.maximum(new, 0), 255).astype(np.uint8)\n return new\n\n\ndef gen_zf(key, point):\n point = [int(t) for t in point.split(',')]\n flip = point[-1] == 1\n new_dst_rect = np.array(point[:8], dtype=np.int).reshape([4, 2])\n dst_rect = new_dst_rect.copy()\n if flip:\n dst_rect = dst_rect[[3, 2, 1, 0], :]\n img = cv2.imread(osp.join(img_path, '{}.jpg'.format(key)), 0)\n pts = np.reshape(dst_rect, [1, 4, 2]).astype(np.float32)\n h, w = 281, 445\n dst = np.float32([[[0, 0], [0, h], [w, 0], [w, h]]])\n M = cv2.getPerspectiveTransform(pts, dst)\n img_z = cv2.warpPerspective(img, M, (w, h))\n img_z = (img_z.astype(np.float) * 255 / img_z[0, 0])\n img_z = np.minimum(np.maximum(img_z, 0), 255).astype(np.uint8)\n return img_z\n\n\n# def surf(org,gen):\n# surf = cv2.xfeatures2d.SIFT_create()\n# mask = np.ones_like(org)\n# kp1, des1 = surf.detectAndCompute(org, mask) # 查找关键点和描述符\n# kp2, des2 = surf.detectAndCompute(gen, mask)\n# bf = cv2.BFMatcher()\n# matches = bf.knnMatch(des1, des2, k=2)\n# goodPoints = []\n# for m, n in matches:\n# if m.distance < 0.5 * n.distance: # 如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留\n# goodPoints.append(m)\n# img3 = cv2.drawMatches(org,kp1,gen,kp2,goodPoints, flags=2,outImg=None )\n# cv2.imshow(\"3\",img3)\n# cv2.waitKey(0)\n# src_pts = np.array([kp1[m.queryIdx].pt for m in goodPoints]).reshape(-1, 1, 2)\n# dst_pts = np.array([kp2[m.trainIdx].pt for m in goodPoints]).reshape(-1, 1, 2)\n# M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 0.5)\n# print(M)\n#\n# h, w = org.shape[:2]\n# gen = cv2.warpPerspective(gen, M, (w, h))\n#\n# cv2.imshow('org', org -gen)\n# cv2.imshow(\"gen\",gen)\n# cv2.waitKey(0)\n#\n# def qusy(img,temp,mask):\n# res_z = cv2.matchTemplate(img, temp, cv2.TM_CCOEFF_NORMED)\n# _, max_val_z, _, max_point_z = cv2.minMaxLoc(res_z)\n# x,y = max_point_z\n# x1,y1 = max_point_z[0] + temp.shape[1], max_point_z[1] + temp.shape[0]\n# #cv2.rectangle(img, (x,y),(x1,y1) , 0,3)\n# sy = img[y:y1,x:x1].copy()\n# sy = cv2.inpaint(sy, mask, 4, cv2.INPAINT_TELEA)\n# img[y:y1, x:x1] = sy\n# return img\n#\n# def gen_label(label_path,label_dic):\n# shuiying = cv2.imread(\"shuiying.jpg\",0)\n# mask=cv2.Laplacian(shuiying, cv2.CV_8UC1, ksize=3)\n# mask[mask>20]=255\n# mask[mask <= 20] = 0\n# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))\n# mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)\n# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n# mask = cv2.morphologyEx(mask, cv2.MORPH_DILATE, kernel, iterations=1)\n# # cv2.imshow(' ', mask)\n# with open(label_path) as f:\n# lines=f.readlines()\n# for line in lines:\n# line = line.split()\n# key = line[0]\n# org_img_z=gen_zf(key,line[2])\n# org_img_f=gen_zf(key,line[1])\n# gen_img_z,gen_img_f=generator(label_dic[key],org_img_z[0,0])\n# org_img_z=qusy(org_img_z,shuiying,mask)\n# cv2.imshow('org_f',org_img_f)\n# # cv2.imshow(\"org_z\", org_img_z)\n# cv2.waitKey(0)\n#\n#\ndef add_sy(img):\n str = \"复印无效\"\n img = PImage.fromarray(img)\n sy_font = ImageFont.truetype('hwxw.ttf', np.random.randint(40, 45))\n draw = ImageDraw.Draw(img)\n color = np.random.randint(50, 140)\n h, w = 209 - 159, 244 - 66\n x, y = np.random.randint(0, img.size[0] - w), np.random.randint(0, img.size[1] - h)\n draw.text((x + np.random.randint(0, 8), y + np.random.randint(0, 8)), str, fill=(color,), font=sy_font)\n draw.rectangle((x, y, x + w, y + h), outline=(color,), width=np.random.randint(1, 4))\n img = np.asarray(img)\n return img\n\n\ndef gen_label_2(base_dir, label_dic):\n for key in label_dic.keys():\n gen_img_z, gen_img_f = generator(label_dic[key])\n gen_img_z_sy = add_sy(gen_img_z)\n gen_img_f_sy = add_sy(gen_img_f)\n cv2.imwrite(base_dir + key + '_zheng_clean.jpg', gen_img_z)\n cv2.imwrite(base_dir + key + '_fan_clean.jpg', gen_img_f)\n cv2.imwrite(base_dir + key + '_zheng_logo.jpg', gen_img_z_sy)\n cv2.imwrite(base_dir + key + '_fan_logo.jpg', gen_img_f_sy)\n\n\nlabel_dic = get_label()\ngen_label_2('/media/pjq/新加卷/DataSets/IDcard/gen_train/', label_dic)","sub_path":"id_card/find_jx.py","file_name":"find_jx.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"541034316","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport urlparse\n\nimport raven\nimport requests\nimport BeautifulSoup\n\nimport post\nimport attachment\n\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 (KHTML, like Gecko) Version/8.0.8 Safari/600.8.9'\n\nclass PostListParser(object):\n\n def __init__(self, url=None):\n self.url = url\n \n self.hostname = urlparse.urlparse(url=self.url).hostname\n self.session = requests.Session()\n self.headers = {\n 'User-Agent': USER_AGENT,\n 'Referer': self.url\n }\n\n def parse(self):\n response = self._get_response()\n response.encoding = 'utf-8'\n\n doc = BeautifulSoup.BeautifulSoup(response.text)\n board_list = doc.find('div', attrs={'class': 'latest_module'})\n if not board_list:\n return []\n \n elements = list(reversed(board_list.findAll('div', attrs={'class': 'latest_title'})))\n\n posts = []\n\n for e in elements:\n c = e.find('td', attrs={'class': 'title'})\n subject_element = e.find('a')\n if not subject_element:\n continue\n\n title = subject_element.text.strip()\n url = subject_element.get('href', None)\n\n p = post.Post(title=title, url=url)\n posts.append(p)\n\n return posts\n\n def _get_response(self):\n return self.session.get(self.url, headers=self.headers)\n \nclass PostParser(object):\n\n def __init__(self, session=None, post=None):\n self.session = session\n self.post = post\n\n self.hostname = urlparse.urlparse(url=self.post.url).hostname\n self.headers = {\n 'User-Agent': USER_AGENT,\n 'Referer': self.post.url\n }\n\n def parse(self):\n response = self._get_response(self.post.url)\n response.encoding = 'utf-8'\n\n doc = BeautifulSoup.BeautifulSoup(response.text)\n board_view = doc.find('div', attrs={'class': 'rd rd_nav_style2 clear'})\n if not board_view:\n return self.post\n\n header_view = board_view.find('div', attrs={'class': 'rd_hd clear'})\n content_view = board_view.find('div', attrs={'class': 'rd_body clear'})\n footer_view = board_view.find('div', attrs={'class': 'rd_ft'})\n \n title = header_view.find('h1').find('a').text.strip()\n elements = board_view.findAll('p')\n\n files = []\n images = []\n\n # images\n for e in content_view.findAll('img'):\n name = e['alt']\n url = e['src']\n if not 'files/attach/images' in url:\n continue\n response = self._get_response(url)\n if not response.status_code / 100 == 2:\n continue\n image = attachment.Image(name, response.content)\n images.append(image)\n\n # torrents\n for e in footer_view.findAll('a', attrs={'class': 'bubble'}):\n name = e.text.strip()\n url = e['href']\n if not '.torrent' in name:\n continue\n response = self._get_response(url)\n if not response.status_code / 100 == 2:\n continue\n f = attachment.File(name=name, content=response.content)\n files.append(f)\n\n self.post.title = title\n self.post.files = files\n self.post.images = images\n return self.post\n\n def _get_response(self, url=None):\n return self.session.get(url, headers=self.headers)\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"249721149","text":"# -*- coding: utf-8 -*-\n# @Author: NanoApe\n# @Date: 2018-09-14 01:28:54\n# @Last Modified by: NanoApe\n# @Last Modified time: 2018-09-14 02:56:43\n\nimport codecs\nimport json\nimport jieba\nimport jieba.analyse\n\nnews_start = 1\nnews_end = 10000\n\ndef relate(a, b):\n value = 0\n tag = list(set(map(lambda x:x[0], b)) & set(map(lambda x:x[0], a)))\n # print(a, b, tag)\n for word in tag:\n value += list(filter(lambda x:x[0]==word, a))[0][1] * list(filter(lambda x:x[0]==word, b))[0][1]\n # print(value)\n return value\n\nif __name__ == '__main__':\n tags = {}\n re = {}\n for news_id in range(news_start, news_end+1):\n if news_id % 100:\n print('Now', news_id)\n data_file = codecs.open('data/html_'+str(news_id),'r','utf-8')\n news = json.loads(data_file.read())\n data_file.close()\n tags[news_id] = jieba.analyse.extract_tags(news['text'], withWeight=True)\n\n print('Read OK!')\n for news_id in range(news_start, news_end+1):\n re = []\n print('Now', news_id)\n for _news_id in range(news_start, news_end+1):\n if _news_id != news_id:\n re.append((_news_id, relate(tags[news_id], tags[_news_id])))\n\n data_file = codecs.open('data/html_'+str(news_id),'r','utf-8')\n news = json.loads(data_file.read())\n data_file.close()\n news['relate'] = list(map(lambda x:x[0], sorted(re, key=lambda x:x[1], reverse=True)[:3]))\n data_file = codecs.open('data/html_'+str(news_id),'w','utf-8')\n data_file.write(json.dumps(news))\n data_file.close()\n","sub_path":"relate.py","file_name":"relate.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"550768309","text":"def update_seat(m, i, j):\n count_occupied = 0\n directions = [\n [-1, 0], # north \n [-1, 1], # north east\n [0, 1], # east\n [1, 1], # south east\n [1, 0], # south\n [1, -1], # south west\n [0, -1], # west\n [-1, -1] # north west \n ]\n for d in directions:\n v = d[0]\n h = d[1]\n found = False\n while i + v in range(len(m)) and j + h in range(len(m[0])) and m[i + v][j + h] != \"L\" and not found:\n if m[i + v][j + h] == \"#\":\n count_occupied += 1\n found = True\n v += d[0]\n h += d[1]\n if count_occupied == 0:\n return \"#\"\n elif count_occupied >= 5:\n return \"L\"\n else:\n return m[i][j]\n\n\ndef update_map(m):\n new_m = [[_ for _ in row] for row in m]\n for i, row in enumerate(m):\n for j, seat in enumerate(row):\n if seat in ['L', '#']:\n new_m[i][j] = update_seat(m, i, j)\n # for row in new_m:\n # print(\"\".join(row))\n return new_m\n\n\n\ndef main():\n with open(\"input.txt\") as f:\n rows = f.read().splitlines()\n seat_map = [[x for x in y] for y in rows]\n i = 0\n while True:\n i += 1\n print(i)\n new_seat_map = update_map(seat_map)\n a = set([tuple(x) for x in new_seat_map]) \n b = set([tuple(x) for x in seat_map])\n if a.intersection(b) == b:\n break\n seat_map = new_seat_map\n flat = \"\".join([\"\".join(x) for x in seat_map])\n print(\"Occupied seats: %s\" % flat.count(\"#\"))\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"day-11/script2.py","file_name":"script2.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"218426183","text":"import json\nimport requests\n\nfrom django.conf import settings\n\n\nclass Client:\n # Tells the RIS server to respond in JSON instead of key/value pairs\n # This cannot be overridden.\n RESPONSE_FORMAT = 'JSON'\n\n # RIS Version. Can be overridden my merchant if required.\n DEFAULT_VERSION = '0630'\n\n # Default endpoint for production. Used by the DEFAULT_OPTIONS\n ENDPOINT_PROD = 'https://risk.kount.net'\n\n # Default endpoint for test. Used by the TEST_DEFAULT_OPTIONS\n ENDPOINT_TEST = 'https://risk.test.kount.net'\n\n # Default params for production\n PROD_DEFAULT_OPTIONS = {\n 'endpoint': ENDPOINT_PROD,\n 'version': DEFAULT_VERSION,\n 'is_test': False,\n }\n\n # Default params for test if is_test is TRUE\n TEST_DEFAULT_OPTIONS = {\n 'endpoint': ENDPOINT_TEST,\n 'version': DEFAULT_VERSION,\n }\n\n def __init__(self, **kwargs):\n \"\"\"\n :param kwargs: (must contain merchant_id)\n \"\"\"\n self.options = {}\n if 'is_test' in kwargs and kwargs['is_test']:\n self.options.update(**self.TEST_DEFAULT_OPTIONS)\n else:\n self.options.update(**self.PROD_DEFAULT_OPTIONS)\n self.options.update(**kwargs)\n\n def get_response(self, request):\n \"\"\"\n Makes the call to the Kount RIS server\n\n :param request: [kount.kount_request.KountRequest] Kount inquiry or update object\n :return: RIS response formatted into a dictionary (or string if errors)\n \"\"\"\n params = self.prepare_request_params(request)\n headers = {\n 'X-Kount-Api-Key': settings.KOUNT_API_KEY if not self.test else settings.KOUNT_TEST_API_KEY,\n }\n response = requests.post(\n self.endpoint,\n data=params,\n headers=headers,\n )\n try:\n return response.json()\n except:\n # RIS errors do not come back as JSON, so just pass them along raw.\n return response.text\n\n # Give the request object what it needs to know to process the params\n # to send to RIS.\n def prepare_request_params(self, request):\n return request.prepare_params(self.version, self.merchant_id, self.RESPONSE_FORMAT)\n\n @property\n def merchant_id(self): # Kount Merchant ID\n return self.options['merchant_id']\n\n @property\n def version(self): # RIS Interface Version\n return self.options['version']\n\n @property\n def endpoint(self): # RIS Endpoint URL\n return self.options['endpoint']\n\n @property\n def test(self): # Is test or production setting\n return self.options['is_test']\n","sub_path":"kount/kount/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"581770113","text":"from model.country import Country\nfrom model.state import State\nfrom model.city import City\nfrom model.address import Address\nfrom model.condominium import Condominium\nfrom model.tower import Tower\nfrom model.apartment import Apartment\nfrom sqlalchemy import exc, and_\nfrom setup import db\n\n\ndef parse_tower_list(tower_list_obj, condominium_id):\n if 'apartments' in tower_list_obj:\n tower_obj = {'apartments': tower_list_obj['apartments']}\n else:\n tower_obj = {\n 'floors': tower_list_obj['floors'],\n 'apartments_by_floor': tower_list_obj['apartments_by_floor'],\n 'start': tower_list_obj['start']\n }\n\n for tower_name in tower_list_obj[\"names\"]:\n parse_tower(tower_obj, tower_name, condominium_id)\n\n\ndef parse_tower(tower_obj, tower_name, condominium_id):\n tower = Tower(name=tower_name, condominium_id=condominium_id)\n db.session.add(tower)\n db.session.flush()\n\n parse_apartments(tower_obj, tower.id)\n\n\ndef parse_apartments(tower_obj, tower_id):\n if 'apartments' in tower_obj:\n for apt_number in tower_obj['apartments']:\n db.session.add(Apartment(apt_number=apt_number, tower_id=tower_id))\n\n else:\n for i in range(tower_obj['start'], (tower_obj['floors'] + 1) * tower_obj['start'], tower_obj['start']):\n for j in range(tower_obj['apartments_by_floor'] + 1):\n db.session.add(Apartment(apt_number=i+j, tower_id=tower_id))\n\n\ndef build_address(condominium_obj):\n try:\n country = Country(name=condominium_obj['CountryName'])\n db.session.add(country)\n db.session.flush()\n\n except exc.IntegrityError:\n db.session.rollback()\n country = Country.query.filter_by(name=condominium_obj['CountryName']).first()\n\n try:\n state = State(name=condominium_obj['StateName'], country_id=country.id)\n db.session.add(state)\n db.session.flush()\n\n except exc.IntegrityError:\n db.session.rollback()\n state = State.query.filter(and_(State.name == condominium_obj['StateName'], State.country_id == country.id)).first()\n\n try:\n city = City(name=condominium_obj['CityName'], state_id=state.id)\n db.session.add(city)\n db.session.flush()\n\n except exc.IntegrityError:\n db.session.rollback()\n city = City.query.filter(and_(City.name == condominium_obj['CityName'], City.state_id == state.id)).first()\n\n address = Address(street_name=condominium_obj['StreetName'], neighbourhood=condominium_obj['Neighbourhood'], city_id=city.id)\n db.session.add(address)\n db.session.flush()\n\n return address.id\n\n\ndef build(json_structure):\n try:\n for condominium_name in json_structure:\n condominium_obj = json_structure[condominium_name]\n\n address_id = build_address(condominium_obj)\n\n condominium = Condominium(name=condominium_name,\n street_number=condominium_obj['StreetNumber'],\n photo_location=condominium_obj.get('PhotoLocation'),\n address_id=address_id)\n db.session.add(condominium)\n db.session.flush()\n\n for key in condominium_obj['Condominium']:\n if key == 'Towers':\n parse_tower_list(condominium_obj['Condominium'][key], condominium.id)\n else:\n parse_tower(condominium_obj['Condominium'][key], key, condominium.id)\n\n db.session.commit()\n return True\n\n except exc.IntegrityError:\n db.session.rollback()\n return False\n","sub_path":"helpers/condominium_builder.py","file_name":"condominium_builder.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"217767709","text":"from django.conf.urls import patterns, url\r\n\r\n\r\n\r\nurlpatterns = patterns('',\r\n url(r'^add_like/(?P\\d+)/$', 'blog.views.addlike', name='addlike'),\r\n url(r'^add_comment/(?P\\d+)/$', 'blog.views.addcomment', name='addcomment'),\r\n url(r'^(?P[-\\w]+)/$', 'blog.views.getblog', name='getblog'),\r\n\r\n #url(r'^delete_comment/(?P\\d+)/$', 'blog.views.delete_comment', name='deleteComment'),\r\n \r\n)","sub_path":"social_auth/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"162580509","text":"import sqlite3\nfrom common.CommonDefs import db_loc, db_name\n\n\ndef drop_files():\n connection, cursor = None, None\n try:\n connection = sqlite3.connect(db_loc+db_name)\n cursor = connection.cursor()\n cursor.execute('''\n DROP TABLE files\n ''')\n connection.commit()\n print(\"Table files dropped successfully.\")\n connection.close()\n except Exception as e:\n print(\"Error occurred - \", e)\n connection.close()\n\n\nif __name__ == '__main__':\n drop_files()\n","sub_path":"db/scripts/drop_files.py","file_name":"drop_files.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"210228824","text":"#! /usr/bin/env python\n# coding: utf-8\n\nimport random\nimport numpy as np\nimport scipy.stats\n\n\ndef execute_something2():\n\n # we suppose 2<=symbols<=symbols_max\n e = [float(i) / symbols_max for i in range(1, symbols)]\n\n # number of packets transmitted\n global p_sum, p_node\n p_node = [0 for i in range(0, symbols)]\n\n # each node will keep sending its packet until it's received by all other nodes\n for j in range(0, symbols):\n e_m = 0\n while not e_m >= e[max([abs(0-j), abs(symbols-1-j)]) - 1]:\n e_m = random.random()\n p_node[j] += 1\n p_sum = sum(p_node)\n print(\"number of packet transmitted by each node: {}\\n\"\n \"total number of packet transmitted: {}\\n\".format(\n p_node,\n p_sum))\n\n\ndef mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n m = np.mean(a)\n se = scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., len(a)-1)\n return m, h\n\n\ndef main():\n global symbols, symbols_max\n symbols = 1\n symbols_max = 10\n run_times_max = 100\n\n node_position = [i for i in range(0, symbols_max)]\n smybols_plot = [i for i in range(2, symbols_max + 1)]\n p_sum_m_plot = [0 for i in range(2, symbols_max+1)]\n p_sum_h_plot = [0 for i in range(2, symbols_max+1)]\n p_node_m_plot = [0 for i in range(2, symbols_max+1)]\n p_node_h_plot = [0 for i in range(2, symbols_max+1)]\n\n while symbols < symbols_max:\n symbols += 1\n run_times = 0\n p_node_list = [[0 for i in range(0, run_times_max)] for j in range(0, symbols)]\n p_node_m = [0 for i in range(0, symbols)]\n p_node_h = [0 for i in range(0, symbols)]\n p_sum_list = [0 for i in range(0, run_times_max)]\n\n while run_times < run_times_max:\n execute_something2()\n p_sum_list[run_times] = p_sum\n for k in range(0, symbols):\n p_node_list[k][run_times] = p_node[k]\n run_times += 1\n\n p_sum_m_plot[symbols - 2], p_sum_h_plot[symbols - 2] = mean_confidence_interval(p_sum_list)\n for l in range(0, symbols):\n p_node_m[l], p_node_h[l] = mean_confidence_interval(p_node_list[l])\n p_node_m_plot[symbols - 2] = p_node_m\n p_node_h_plot[symbols - 2] = p_node_h\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"SA/4enc_4dec(5)/withoutcoding.py","file_name":"withoutcoding.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"139977761","text":"import os\r\n\r\nos.remove('sub.out')\r\n\r\n\r\nf = open('B-small-attempt0.in', 'r')\r\n\r\noutput = open('sub.out','a')\r\nData = f.read()\r\n\r\nData = Data.split('\\n')\r\n\r\nT = int(Data[0])\r\nCakeList = Data[1:]\r\n\r\nfor t in xrange(T):\r\n cakes = CakeList[t]\r\n # preprocessing\r\n newcakes = cakes[0]\r\n for temp in cakes:\r\n if temp != newcakes[-1]:\r\n newcakes += temp\r\n # counting times\r\n times = len(newcakes)-1\r\n if newcakes[-1] == '-':\r\n times += 1\r\n\r\n writeline = 'Case #'+ str(t+1) + ': '+ str(times)+'\\n'\r\n output.write(writeline)","sub_path":"solutions_5634697451274240_0/Python/OneStrugglingRookie/revenue.py","file_name":"revenue.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"612672188","text":"from ....tools.decorators import method\nfrom ....tools.normalize import log_cpm\nfrom ....tools.utils import check_version\n\n# import rctd, this is an R tool so need to have also R file\n\n\n@method(\n method_name=\"Rctd\",\n paper_name=\"Robust decomposition of cell type mixtures in spatial transcriptomics\",\n paper_url=\"https://www.nature.com/articles/s41587-021-00830-w\",\n paper_year=2020,\n code_url=\"https://github.com/dmcable/RCTD\",\n code_version=check_version(\"rctd\"),\n)\ndef stereoscope_log_cpm(adata):\n log_cpm(adata)\n # do something\n return\n","sub_path":"openproblems/tasks/spatial_decomposition/methods/rctd.py","file_name":"rctd.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"554974324","text":"\n\n\ndef computeNegativity(comments,community):\n totalComments = len(comments)\n totalNegComment = 0\n totalNegWord = 0\n for comment in comments:\n found = 0\n values = comment.split(\" \")\n for word in values:\n if word in negword:\n found = 1\n totalNegWord = totalNegWord + 1\n if found == 1:\n totalNegComment = totalNegComment + 1\n f = open(\"negativity_per_community_follower.txt\",\"a\")\n f.write(str(community)+\",\"+str(totalComments)+\",\"+str(totalNegComment)+\",\"+str(totalNegWord)+\"\\n\")\n f.close()\n \n\nnegword = []\nf = open(\"new_neg_list1.csv\",\"r\")\n\nfor line in f:\n line = line.strip()\n negword.append(line)\nf.close()\n\nf = open(\"commentsPerCommunityFollowing.txt\",\"r\")\n\ncomments = []\nline = f.readline()\nline = line.strip()\nindex = line.index(\":\")\ncommunity = line[index+1:]\n\nfor line in f:\n line = line.strip()\n if \"comments for community:\" in line:\n computeNegativity(comments,community)\n comments = []\n index = line.index(\":\")\n community = line[index+1:]\n else:\n comments.append(line)\nf.close()\n","sub_path":"NetworkAnalysisAndModeling/community following commenter/negativityPerCommunity.py","file_name":"negativityPerCommunity.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"23842949","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: train_refcoco_nornn.py\n# Author: Fan Wu \nfrom abc import ABCMeta\n\nimport numpy as np\nimport tensorflow as tf\nimport os, sys, re, time\nimport random\nimport uuid\nimport argparse\nimport multiprocessing, threading\nfrom collections import deque\nimport six\nfrom six.moves import queue\n\nfrom tensorpack import *\nfrom tensorpack.RL.simulator import SimulatorProcess, SimulatorProcessBase\nfrom tensorpack.utils.concurrency import *\nfrom tensorpack.utils.serialize import *\nfrom tensorpack.utils.timer import *\nfrom tensorpack.utils.stat import *\nfrom tensorpack.tfutils import symbolic_functions as symbf\n\nfrom tensorpack.RL import *\nimport utils.common as common\nfrom utils.common import (play_model, Evaluator, eval_model_multithread)\n\nfrom agent.refcocoenv_nornn import RefcocoEnv\nfrom config.config import cfg\n\nimport tensorflow.contrib.slim as slim\nfrom tensorpack.callbacks.base import Callback\nfrom tensorpack.tfutils.common import get_global_step\nfrom tensorpack.utils.serialize import *\nimport multiprocessing as mp\nimport zmq\n\nfrom utils.image import image_utils\nimport os\n\nGAMMA = cfg.GAMMA\n\nLOCAL_TIME_MAX = 5\nSTEP_PER_EPOCH = 20000\nEVAL_EPISODE = 50\nBATCH_SIZE = 128\nSIMULATOR_PROC = 50\nPREDICTOR_THREAD_PER_GPU = 2\nPREDICTOR_THREAD = None\nEVALUATE_PROC = min(multiprocessing.cpu_count() // 2, 20)\n\nNUM_ACTIONS = cfg.NUM_ACTIONS\nHISTORY_LENGTH = cfg.HISTORY_LENGTH\nENV_NAME = None\n\nglobal_step = None\n\nAPPRENTICESHIP_LR = False\n\nname_base = str(uuid.uuid1())[:6]\nPIPE_DIR = os.environ.get('TENSORPACK_PIPEDIR', '.').rstrip('/')\nnamec2s = 'ipc://{}/sim-c2s-{}'.format(PIPE_DIR, name_base)\nnames2c = 'ipc://{}/sim-s2c-{}'.format(PIPE_DIR, name_base)\n\nVISUAL_LEN = 2048\nSPATIAL_LEN = 5\nHISTORY_LEN = 450\nLANG_LEN = 4800\n\ndef softmax(logit):\n exp = np.exp(logit)\n return exp / np.sum(exp)\n\n\ndef get_player(viz=False, train=False, dumpdir=None):\n pl = RefcocoEnv(ENV_NAME, \"train\")\n\n global NUM_ACTIONS\n NUM_ACTIONS = pl.get_action_space().num_actions()\n\n return pl\n\nclass MySimulatorWorker(SimulatorProcessBase):\n\n def __init__(self, idx, pipe_c2s, pipe_s2c):\n super(MySimulatorWorker, self).__init__(idx)\n self.idx = idx\n self.pipe_c2s = pipe_c2s\n self.pipe_s2c = pipe_s2c\n\n def connect(self):\n #Set pipe to master\n context = zmq.Context()\n self.pipe_c2s_socket = context.socket(zmq.PUSH)\n self.pipe_c2s_socket.setsockopt(zmq.IDENTITY, self.identity)\n self.pipe_c2s_socket.set_hwm(2)\n self.pipe_c2s_socket.connect(self.pipe_c2s)\n\n self.pipe_s2c_socket = context.socket(zmq.DEALER)\n self.pipe_s2c_socket.setsockopt(zmq.IDENTITY, self.identity)\n #self.pipe_s2c_socket.set_hwm(5)\n self.pipe_s2c_socket.connect(self.pipe_s2c)\n\n def run(self):\n os.environ['CUDA_VISIBLE_DEVICES'] = \"\"\n np.random.seed(int(self.idx))\n self.connect()\n #Build player after connected\n player = self._build_player()\n\n state = player.current_state()\n reward, isOver = 0, False\n while True:\n self.pipe_c2s_socket.send(dumps(\n (self.identity, state, reward, isOver)),\n copy=False)\n action = loads(self.pipe_s2c_socket.recv(copy=False).bytes)[0]\n reward, isOver = player.action(action)\n state = player.current_state()\n\n def _build_player(self):\n return get_player(train=True)\n\nclass Model(ModelDesc):\n def _get_input_vars(self):\n assert NUM_ACTIONS is not None\n return [InputVar(tf.float32, (None, SPATIAL_LEN + VISUAL_LEN + LANG_LEN + HISTORY_LENGTH*NUM_ACTIONS), 'state'),\n InputVar(tf.int64, (None,), 'action'),\n InputVar(tf.float32, (None,), 'futurereward') ]\n\n def _get_NN_prediction(self, state):\n visual = state[:,:VISUAL_LEN]\n lang = state[:,VISUAL_LEN:VISUAL_LEN+LANG_LEN]\n lang = slim.fully_connected(lang, VISUAL_LEN, scope='fc/lang')\n other = state[:,VISUAL_LEN+LANG_LEN: SPATIAL_LEN+VISUAL_LEN+LANG_LEN+HISTORY_LENGTH*NUM_ACTIONS]\n\n context = tf.mul(visual, lang)\n context = tf.nn.l2_normalize(context, 1)\n\n l = tf.concat(1, [context, other])\n l = slim.fully_connected(l, 1024, scope='fc/fc1')\n l = slim.fully_connected(l, 1024, scope='fc/fc2')\n\n policy = slim.fully_connected(l, 9, activation_fn=None, scope='fc/fc-pi')\n value = slim.fully_connected(l, 1, activation_fn=None, scope='fc/fc-v')\n\n return policy, value\n\n def _build_graph(self, inputs):\n state, action, futurereward = inputs\n policy, self.value = self._get_NN_prediction(state)\n self.value = tf.squeeze(self.value, [1], name='pred_value') # (B,)\n self.logits = tf.nn.softmax(policy, name='logits')\n\n expf = tf.get_variable('explore_factor', shape=[],\n initializer=tf.constant_initializer(1), trainable=False)\n logitsT = tf.nn.softmax(policy * expf, name='logitsT')\n is_training = get_current_tower_context().is_training\n if not is_training:\n return\n log_probs = tf.log(self.logits + 1e-6)\n\n log_pi_a_given_s = tf.reduce_sum(\n log_probs * tf.one_hot(action, NUM_ACTIONS), 1)\n advantage = tf.sub(tf.stop_gradient(self.value), futurereward, name='advantage')\n policy_loss = tf.reduce_sum(log_pi_a_given_s * advantage, name='policy_loss')\n xentropy_loss = tf.reduce_sum(\n self.logits * log_probs, name='xentropy_loss')\n value_loss = tf.nn.l2_loss(self.value - futurereward, name='value_loss')\n\n pred_reward = tf.reduce_mean(self.value, name='predict_reward')\n advantage = symbf.rms(advantage, name='rms_advantage')\n summary.add_moving_summary(policy_loss, xentropy_loss, value_loss, pred_reward, advantage)\n entropy_beta = tf.get_variable('entropy_beta', shape=[],\n initializer=tf.constant_initializer(0.01), trainable=False)\n self.cost = tf.add_n([policy_loss, xentropy_loss * entropy_beta, value_loss])\n self.cost = tf.truediv(self.cost,\n tf.cast(tf.shape(futurereward)[0], tf.float32),\n name='cost')\n\n def get_gradient_processor(self):\n return [MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.1)),\n SummaryGradient()]\n\nclass MySimulatorMaster(SimulatorMaster, Callback):\n def __init__(self, pipe_c2s, pipe_s2c, model):\n super(MySimulatorMaster, self).__init__(pipe_c2s, pipe_s2c)\n self.M = model\n self.queue = queue.Queue(maxsize=BATCH_SIZE*8*2)\n\n def _setup_graph(self):\n self.sess = self.trainer.sess\n self.async_predictor = MultiThreadAsyncPredictor(\n self.trainer.get_predict_funcs(['state'], ['logitsT', 'pred_value',],\n PREDICTOR_THREAD), batch_size=15)\n self.async_predictor.run()\n\n def _on_state(self, state, ident):\n def cb(outputs):\n distrib, value = outputs.result()\n\n action = np.random.choice(len(distrib), p=distrib)\n\n client = self.clients[ident]\n client.memory.append(TransitionExperience(state, action, None, value=value))\n self.send_queue.put([ident, dumps([action,])])\n self.async_predictor.put_task([state], cb)\n\n def _on_episode_over(self, ident):\n self._parse_memory(0, ident, True)\n\n def _on_datapoint(self, ident):\n client = self.clients[ident]\n if len(client.memory) == LOCAL_TIME_MAX + 1:\n R = client.memory[-1].value\n self._parse_memory(R, ident, False)\n\n def _parse_memory(self, init_r, ident, isOver):\n client = self.clients[ident]\n mem = client.memory\n if not isOver:\n last = mem[-1]\n mem = mem[:-1]\n\n mem.reverse()\n R = float(init_r)\n for idx, k in enumerate(mem):\n R = np.clip(k.reward, -5, 5) + GAMMA * R\n self.queue.put([k.state, k.action, R])\n\n if not isOver:\n client.memory = [last]\n else:\n client.memory = []\n\nclass GlobalStepSetter(Callback):\n\n def trigger_step(self):\n global global_step\n global_step = get_global_step()\n\ndef get_config():\n logger.auto_set_dir()\n M = Model()\n\n master = MySimulatorMaster(namec2s, names2c, M)\n dataflow = BatchData(DataFromQueue(master.queue), BATCH_SIZE)\n\n lr = symbf.get_scalar_var('learning_rate', 0.0001, summary=True)\n return TrainConfig(\n dataset=dataflow,\n optimizer=tf.train.AdamOptimizer(lr, epsilon=1e-3),\n callbacks=Callbacks([\n StatPrinter(), ModelSaver(),\n HumanHyperParamSetter('learning_rate', 'hyper.txt'),\n HumanHyperParamSetter('entropy_beta', 'hyper.txt'),\n HumanHyperParamSetter('explore_factor', 'hyper.txt'),\n master,\n StartProcOrThread(master),\n# PeriodicCallback(Evaluator(EVAL_EPISODE, ['state'], ['logits']), 1),\n GlobalStepSetter(),\n ]),\n session_config=get_default_sess_config(0.5),\n model=M,\n step_per_epoch=STEP_PER_EPOCH,\n max_epoch=1000,\n )\n\nif __name__ == '__main__':\n global global_step\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n parser.add_argument('--env', help='env', required=True)\n parser.add_argument('--task', help='task to perform',\n choices=['play', 'eval', 'train'], default='train')\n args = parser.parse_args()\n\n ENV_NAME = args.env\n assert ENV_NAME\n\n procs = [MySimulatorWorker(k, namec2s, names2c) for k in range(SIMULATOR_PROC)]\n\n ensure_proc_terminate(procs)\n start_proc_mask_signal(procs)\n\n #p = get_player(); del p # set NUM_ACTIONS\n\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.task != 'train':\n assert args.load is not None\n\n if args.task != 'train':\n cfg = PredictConfig(\n model=Model(),\n session_init=SaverRestore(args.load),\n input_names=['state'],\n output_names=['logits'])\n if args.task == 'play':\n play_model(cfg)\n elif args.task == 'eval':\n eval_model_multithread(cfg, EVAL_EPISODE)\n else:\n if args.gpu:\n nr_gpu = get_nr_gpu()\n if nr_gpu > 1:\n predict_tower = range(nr_gpu)[-nr_gpu/2:]\n else:\n predict_tower = [0]\n PREDICTOR_THREAD = len(predict_tower) * PREDICTOR_THREAD_PER_GPU\n train_tower = range(nr_gpu)[:-nr_gpu/2] or [0]\n logger.info(\"[BA3C] Train on gpu {} and infer on gpu {}\".format(\n ','.join(map(str, train_tower)), ','.join(map(str, predict_tower))))\n else:\n nr_gpu = 0\n PREDICTOR_THREAD = 1\n predict_tower = [0]\n train_tower = [0]\n config = get_config()\n if args.load:\n config.session_init = SaverRestore(args.load)\n global_step = int(args.load.split('-')[-1])\n else:\n #config.session_init = SaverRestore(cfg.CKPT_PATH)\n global_step = 0\n config.tower = train_tower\n AsyncMultiGPUTrainer(config, predict_tower=predict_tower).train()\n","sub_path":"train_refcoco_nornn.py","file_name":"train_refcoco_nornn.py","file_ext":"py","file_size_in_byte":10552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"491322434","text":"import os\nimport numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\n# import csv\n# import pickle\n# import unicodecsv as csv\n\ninputFolder = '/home/mainampati/speech_framework/newdb/imagedb/'\nsuffix = '.png'\n# for filename in os.listdir(inputFolder):\n# print(filename)\n\nfilename = os.listdir(inputFolder)\nfeatures = []\nfor i in range (0, len(filename)):\n base_filename = filename[i]\n name = os.path.join(inputFolder, base_filename)\n # print(name)\n img = cv.imread(name,0)\n # Initiate ORB detector\n orb = cv.ORB_create(nfeatures=500, WTA_K=3)\n # find the keypoints with ORB\n kp = orb.detect(img,None)\n # compute the descriptors with ORB\n kp, des = orb.compute(img, kp)\n #retval = cv.ORB.getMaxFeatures(orb)\n flat = des.flatten('C')\n flat1 = flat[:, np.newaxis] . T\n # print(des.shape, flat1.shape)\n features.append(flat1)\n print(des.shape, flat1.shape)\n \n '''\n with open('new_features1.txt', 'w') as f:\n csvwriter = csv.writer(f, lineterminator = '\\n')\n for val in flat1:\n csvwriter.writerow([val])\n #csvwriter.writerows(flat1)\n f.close()\n \n with open('new_features.txt', 'a') as f:\n np.savetxt(f, flat1, delimiter=',', newline='\\n')\n f.close()\n '''\n\n\n\n\nprint(len(features))\n\n","sub_path":"files1.py","file_name":"files1.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"229992414","text":"\n\nfrom acousticsim.representations.pitch import to_pitch_zcd, ACPitch, Harmonicity\n\nfrom numpy.testing import assert_array_almost_equal\n\n\n\ndef test_pitch_zcd(base_filenames):\n return\n for f in base_filenames:\n path = f+'.wav'\n gt, env = to_gammatone(path,num_bands = 128, freq_lims = (80,7800))\n to_pitch_zcd(gt)\n\ndef test_pitch_ac(base_filenames):\n for f in base_filenames:\n if f.startswith('silence'):\n continue\n wavpath = f+'.wav'\n print(f)\n pitch = ACPitch(wavpath, time_step = 0.01, freq_lims = (75,600))\n print(pitch.to_array())\n\ndef test_harmonics(base_filenames):\n for f in base_filenames:\n if f.startswith('silence'):\n continue\n wavpath = f+'.wav'\n print(f)\n harms = Harmonicity(wavpath, time_step = 0.01, min_pitch = 75)\n harms.process()\n print(harms.to_array())\n\n","sub_path":"tests/test_rep_pitch.py","file_name":"test_rep_pitch.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"514537354","text":"import argparse\nimport configparser\nimport os\nimport subprocess\nimport sys\nimport yaml\n\nfrom openlabcmd import exceptions\nfrom openlabcmd.plugins import base\nfrom openlabcmd import utils\nfrom openlabcmd.utils import _color\nfrom openlabcmd import zk\n\n\nclass OpenLabCmd(object):\n def __init__(self):\n self.parser = None\n self.args = None\n self.config = None\n self.zk = None\n\n @staticmethod\n def _str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n @staticmethod\n def _node_name_format(v):\n spl_v = v.split('-')\n if (len(spl_v) < 2 or spl_v[-2] != 'openlab' or\n spl_v[-1] not in ['nodepool', 'zuul', 'zookeeper']):\n raise argparse.ArgumentTypeError(\n 'Node name should be format like: '\n '{cloud_provider}-openlab-{type}')\n return v\n\n def _add_check_cmd(self, parser):\n # openlab check\n cmd_check = parser.add_parser('check',\n help='Check OpenLab environment.')\n cmd_check.set_defaults(func=self.check)\n cmd_check.add_argument('--type', default='all',\n help=\"Specify a plugin type, like 'nodepool', \"\n \"'jobs'. Default is 'all'.\")\n cmd_check.add_argument('--cloud', default='all',\n help=\"Specify a cloud provider, like 'otc', \"\n \"'vexxhost'. Default is 'all'.\")\n cmd_check.add_argument('--nocolor', action='store_true',\n help='Enable the no color mode.')\n cmd_check.add_argument('--recover', action='store_true',\n help='Enable the auto recover mode.')\n\n def _add_ha_node_cmd(self, parser):\n # openlab ha node\n cmd_ha_node = parser.add_parser('node', help='Manage HA node.')\n cmd_ha_node_subparsers = cmd_ha_node.add_subparsers(title='node',\n dest='node')\n # openlab ha node list\n cmd_ha_node_list = cmd_ha_node_subparsers.add_parser(\n 'list', help='List all nodes.')\n cmd_ha_node_list.set_defaults(func=self.ha_node_list)\n # openlab ha node get\n cmd_ha_node_get = cmd_ha_node_subparsers.add_parser(\n 'get', help='Get a node.')\n cmd_ha_node_get.set_defaults(func=self.ha_node_get)\n cmd_ha_node_get.add_argument('name', help='The node hostname.')\n # openlab ha node create\n cmd_ha_node_create = cmd_ha_node_subparsers.add_parser(\n 'init', help='Create a new node. This command usually should be '\n 'called by CI environment deploy tools when creating '\n 'a new system. Operators should be careful for this '\n 'command. One case for this command may like: the '\n 'data in zookeeper is broken or missing, but the '\n 'node works well, so that operators need to rebuild '\n 'the node info.')\n cmd_ha_node_create.set_defaults(func=self.ha_node_create)\n cmd_ha_node_create.add_argument(\n 'name', type=self._node_name_format,\n help='The new node hostname, it should be global unique. Format: '\n '{cloud-provider}-openlab-{type}.')\n cmd_ha_node_create.add_argument(\n '--type', required=True, choices=['nodepool', 'zuul', 'zookeeper'],\n help=\"The new node type. Choose from 'nodepool', 'zuul' and \"\n \"'zookeeper'\")\n cmd_ha_node_create.add_argument(\n '--role', required=True, choices=['master', 'slave', 'zookeeper'],\n help=\"The new node role. It should be 'master', 'slave' or \"\n \"'zookeeper'.\")\n cmd_ha_node_create.add_argument(\n '--ip', required=True, help=\"The new node's public IP.\")\n\n # openlab ha node set\n cmd_ha_node_set = cmd_ha_node_subparsers.add_parser(\n 'set', help='Update a node.')\n cmd_ha_node_set.set_defaults(func=self.ha_node_update)\n cmd_ha_node_set.add_argument('name', help='The node hostname.')\n cmd_ha_node_set.add_argument('--maintain', metavar='{yes, no}',\n type=self._str2bool,\n help='Set the node to maintained status.')\n cmd_ha_node_set.add_argument(\n '--role', choices=['master', 'slave'],\n help=\"Update node role. It should be either 'master' or 'slave'. \"\n \"Be careful to update the role, you should not update role \"\n \"except emergency situations, because it will impact \"\n \"checking scope of HA monitor , HA monitor will check and \"\n \"update it with built-in policy automatically.\")\n\n # openlab ha node delete\n cmd_ha_node_delete = cmd_ha_node_subparsers.add_parser(\n 'delete', help='Delete a node.')\n cmd_ha_node_delete.set_defaults(func=self.ha_node_delete)\n cmd_ha_node_delete.add_argument('name', help='The node hostname.')\n\n def _add_ha_service_cmd(self, parser):\n # openlab ha service\n cmd_ha_service = parser.add_parser('service',\n help='Manage HA service.')\n cmd_ha_service_subparsers = cmd_ha_service.add_subparsers(\n title='service', dest='service')\n # openlab ha service list\n cmd_ha_service_list = cmd_ha_service_subparsers.add_parser(\n 'list', help='List all services.')\n cmd_ha_service_list.set_defaults(func=self.ha_service_list)\n cmd_ha_service_list.add_argument(\n '--node', action='append',\n help='Filter the services with the specified node name.')\n cmd_ha_service_list.add_argument(\n '--role', action='append',\n choices=['master', 'slave', 'zookeeper'],\n help='Filter the services with the specified node role.')\n cmd_ha_service_list.add_argument(\n '--status', action='append',\n choices=['up', 'down', 'restarting'],\n help='Filter the services with the specified status.')\n # openlab ha service get\n cmd_ha_service_get = cmd_ha_service_subparsers.add_parser(\n 'get', help='Get a service.')\n cmd_ha_service_get.set_defaults(func=self.ha_service_get)\n cmd_ha_service_get.add_argument('name', help='service name.')\n cmd_ha_service_get.add_argument(\n '--node', required=True, help=\"The node where the service run.\")\n\n def _add_ha_cmd(self, parser):\n # openlab ha\n cmd_ha = parser.add_parser('ha',\n help='Manage OpenLab HA deployment.')\n cmd_ha_subparsers = cmd_ha.add_subparsers(title='ha', dest='ha')\n self._add_ha_node_cmd(cmd_ha_subparsers)\n self._add_ha_service_cmd(cmd_ha_subparsers)\n\n def create_parser(self):\n parser = argparse.ArgumentParser(\n description='The command line tool for OpenLab management',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-c', dest='config',\n help='path to config file')\n parser.add_argument('-f', dest='format', choices=['raw', 'pretty'],\n default='pretty',\n help='output format')\n\n subparsers = parser.add_subparsers(title='commands',\n dest='command')\n self._add_check_cmd(subparsers)\n self._add_ha_cmd(subparsers)\n\n return parser\n\n def _get_cloud_list(self, cloud):\n cloud_conf_location = self.config.get(\n 'check', 'cloud_conf', fallback='/etc/openstack/clouds.yaml')\n with open(cloud_conf_location) as f:\n clouds = yaml.load(f, Loader=yaml.FullLoader)\n clouds_list = [c for c in clouds['clouds']]\n\n if cloud not in clouds_list + ['all']:\n raise exceptions.ClientError(\n \"Error: Cloud %(cloud)s is not found. Please use the cloud \"\n \"in %(clouds_list)s or just use 'all'.\" % {\n 'cloud': cloud, 'clouds_list':clouds_list})\n\n clouds_list = clouds_list if cloud == 'all' else [cloud]\n return clouds_list\n\n def _header_print(self, header):\n print(_color(header))\n print(_color(\"=\" * 48))\n\n def check(self):\n utils.NOCOLOR = self.args.nocolor\n\n cloud_list = self._get_cloud_list(self.args.cloud)\n\n if self.args.type != 'all':\n # Filter the plugins with specific ptype\n plugins = list(filter(lambda x: x.ptype == self.args.type,\n base.Plugin.plugins))\n else:\n plugins = base.Plugin.plugins\n\n cnt = len(cloud_list)\n for i, c in enumerate(cloud_list):\n header = \"%s/%s. %s cloud check\" % (i + 1, cnt, c)\n self._header_print(header)\n for plugin in plugins:\n plugin.cloud = c\n plugin.check_begin()\n plugin.check()\n plugin.check_end()\n # the failed flag would be record when do check()\n if self.args.recover and plugin.failed:\n plugin.recover()\n\n def _zk_wrapper(func):\n def wrapper(self, *args, **kwargs):\n if self.zk is None:\n self.zk = zk.ZooKeeper(config=self.config)\n try:\n self.zk.connect()\n func(self, *args, **kwargs)\n finally:\n self.zk.disconnect()\n return wrapper\n\n @_zk_wrapper\n def ha_node_list(self):\n result = self.zk.list_nodes()\n if self.args.format == 'pretty':\n print(utils.format_output('node', result))\n else:\n print(result.to_dict())\n\n @_zk_wrapper\n def ha_node_get(self):\n node_name = self.args.name\n result = self.zk.get_node(node_name)\n if self.args.format == 'pretty':\n print(utils.format_output('node', result))\n else:\n print(result.to_dict())\n\n @_zk_wrapper\n def ha_node_create(self):\n if self.args.type == 'zookeeper':\n if self.args.role != 'zookeeper':\n raise argparse.ArgumentTypeError(\n 'zookeeper node must be zookeeper type.')\n else:\n if self.args.role == 'zookeeper':\n raise argparse.ArgumentTypeError(\n 'zookeeper node must be zookeeper type.')\n\n result = self.zk.create_node(self.args.name, self.args.role,\n self.args.type, self.args.ip)\n\n if self.args.format == 'pretty':\n print(utils.format_output('node', result))\n else:\n print(result.to_dict())\n\n @_zk_wrapper\n def ha_node_update(self):\n node_name = self.args.name\n if self.args.maintain is None and not self.args.role:\n raise exceptions.ClientError(\"Too few arguments\")\n maintain = self.args.maintain\n role = self.args.role\n result = self.zk.update_node(node_name, maintain, role)\n if self.args.format == 'pretty':\n print(utils.format_output('node', result))\n else:\n print(result.to_dict())\n\n @_zk_wrapper\n def ha_node_delete(self):\n node_name = self.args.name\n self.zk.delete_node(node_name)\n\n @_zk_wrapper\n def ha_service_list(self):\n result = self.zk.list_services(self.args.node, self.args.role,\n self.args.status)\n if self.args.format == 'pretty':\n print(utils.format_output('service', result))\n else:\n print(result.to_dict())\n\n @_zk_wrapper\n def ha_service_get(self):\n result = self.zk.get_service(self.args.name.lower(), self.args.node)\n if self.args.format == 'pretty':\n print(utils.format_output('service', result))\n else:\n print(result.to_dict())\n\n def run(self):\n # no arguments, print help messaging, then exit with error(1)\n if not self.args.command:\n self.parser.print_help()\n return 1\n if not getattr(self.args, 'func', None):\n help_message = subprocess.getoutput(\"%s -h\" % ' '.join(sys.argv))\n print(help_message)\n return 1\n try:\n self.args.func()\n except exceptions.ClientError as e:\n print(e)\n return 1\n\n def _initConfig(self):\n self.config = configparser.ConfigParser()\n if self.args.config:\n locations = [self.args.config]\n else:\n locations = ['/etc/openlab/openlab.conf',\n '~/openlab.conf',\n '/usr/local/etc/openlab/openlab.conf']\n\n for fp in locations:\n if os.path.exists(os.path.expanduser(fp)):\n self.config.read(os.path.expanduser(fp))\n return\n raise exceptions.ClientError(\"Unable to locate config file in \"\n \"%s\" % locations)\n\n def _main(self):\n self.parser = self.create_parser()\n self.args = self.parser.parse_args()\n self._initConfig()\n self.run()\n\n @classmethod\n def main(cls):\n return cls()._main()\n\n\ndef main():\n return OpenLabCmd.main()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"openlabcmd/openlabcmd/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":13805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"78729843","text":"import numpy as np\nimport json\nimport os\nimport cv2 as cv\nfrom tools import bruit_gauss, calc_erreur, peano_transform_img, transform_peano_in_img, line_transform_img, \\\n transform_line_in_img, peano_to_neighbours, sigmoid_np, heaviside_np\nfrom markov_chain_neigh import *\nfrom markov_chain import *\nfrom markov_field import *\nfrom sklearn.cluster import KMeans\n\nm1 = 0\nm2 = 0.6\nsig1 = 1\nsig2 = 1\n\nw = np.array([0, 1])\nmax_val = 255\nimage = ['./images/beee2.bmp', './images/cible2.bmp', './images/promenade2.bmp', './images/zebre2.bmp']\n\niter = 100\niter_gibbs = 100\nnb_simu = 10\nresults = []\nparams = []\nresfolder = './results_non_sup2'\nresolution = (256, 256)\nif not os.path.exists(resfolder):\n os.makedirs(resfolder)\n\nfor path in image:\n img = cv.imread(path, 0)\n img_name = (path.split('/')[-1]).split('.')[0]\n img = cv.resize(img, resolution)\n img = heaviside_np(img)\n signal = peano_transform_img(img)\n\n # bruite image\n signal_noisy = bruit_gauss(img, w, m1, sig1, m2, sig2)\n cv.imwrite(os.path.join(resfolder, img_name + '_noisy.bmp'), sigmoid_np(signal_noisy) * max_val)\n\n # récupère les voisins\n neighboursh, neighboursv = peano_to_neighbours(signal_noisy)\n\n # parcours de peano sur image\n peano_noisy = peano_transform_img(signal_noisy)\n\n # kmeans pour estimer les paramètres à priori\n Y = peano_noisy.reshape(-1, 1)\n\n kmeans = KMeans(n_clusters=w.shape[0], max_iter=100, n_init=100).fit(Y)\n hidden = kmeans.labels_\n labels_name = np.unique(hidden)\n p_init, A_init = calc_probaprio_mc(hidden, labels_name)\n\n hidden_2d = transform_peano_in_img(hidden, resolution[0])\n proba_init = estim_proba_apri(hidden_2d)\n\n m1_init = peano_noisy[hidden == labels_name[0]].sum() / hidden.shape[0]\n sig1_init = np.sqrt(((peano_noisy[hidden == labels_name[0]] - m1_init) ** 2).sum() / hidden.shape[0])\n\n m2_init = peano_noisy[hidden == labels_name[1]].sum() / hidden.shape[0]\n sig2_init = np.sqrt(((peano_noisy[hidden == labels_name[1]] - m2_init) ** 2).sum() / hidden.shape[0])\n\n p_est1, A_est1, m1_est1, sig1_est1, m2_est1, sig2_est1 = estim_param_EM_mc(iter, peano_noisy, p_init, A_init,\n m1_init,\n sig1_init, m2_init, sig2_init)\n\n p_est2, A_est2, m1_est2, sig1_est2, m2_est2, sig2_est2 = estim_param_EM_mc_neigh(iter, peano_noisy, neighboursh,\n neighboursv, p_init, A_init,\n m1_init, sig1_init, m2_init,\n sig2_init)\n\n\n params.append({'param_mc': {'p': p_est1.tolist(), 't': A_est1.tolist(), 'mu1': m1_est1.tolist(),\n 'sig1': sig1_est1.tolist(), 'mu2': m2_est1.tolist(),\n 'sig2': sig2_est1.tolist()},\n 'param_mc_neigh': {'p': p_est2.tolist(), 't': A_est2.tolist(), 'mu1': m1_est2.tolist(),\n 'sig1': sig1_est2.tolist(), 'mu2': m2_est2.tolist(),\n 'sig2': sig2_est2.tolist()}})\n\n segmentation_peano_mc = mpm_mc(peano_noisy, w, p_est1, A_est1, m1_est1, sig1_est1, m2_est1,\n sig2_est1)\n segmentation_peano_mc_neigh = mpm_mc_neigh(peano_noisy, neighboursh, neighboursv, w, p_est2, A_est2, m1_est2,\n sig1_est2, m2_est2,\n sig2_est2)\n cv.imwrite(os.path.join(resfolder, img_name + '_segmentation_peano_mc.bmp'),\n transform_peano_in_img(segmentation_peano_mc, resolution[0]) * max_val)\n\n\n cv.imwrite(os.path.join(resfolder, img_name + '_segmentation_peano_mc_neigh.bmp'),\n transform_peano_in_img(segmentation_peano_mc_neigh, resolution[0]) * max_val)\n\n\n\n results.append({'img': path, 'err_mc': calc_erreur(signal, segmentation_peano_mc),\n 'err_mc_neigh': calc_erreur(signal, segmentation_peano_mc_neigh)})\n\nwith open(os.path.join(resfolder, 'results_nonsup.txt'), 'w') as f:\n json.dump(results, f, ensure_ascii=False)\n\nwith open(os.path.join(resfolder, 'params_nonsup.txt'), 'w') as f:\n json.dump(params, f, ensure_ascii=False)\n","sub_path":"image_segmentation_nonsup_em2.py","file_name":"image_segmentation_nonsup_em2.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"246850460","text":"from dados import acessa_arquivo\n\nX,Y = acessa_arquivo()\n\ndados_treino = X[:90]\nmarcacoes_treino = Y[:90]\n\ndados_teste = X[-9:]\nmarcacoes_teste = Y[-9:]\n\nfrom sklearn.naive_bayes import MultinomialNB\n\nmodelo = MultinomialNB()\nmodelo.fit(dados_treino,marcacoes_treino)\n\nresultados = modelo.predict(dados_teste)\ndiferencas = resultados - marcacoes_teste\n\nacertos = [d for d in diferencas if d==0]\n\ntotal_acertos = len(acertos)\ntotal_dados = len(dados_teste)\n\ntaxa_acerto = 100.0*total_acertos/total_dados\n\nprint(taxa_acerto)\nprint(total_dados)\n","sub_path":"Alura/Machine Learning - Introdução a machine learning com classificação/Aula 02/classifica_acesso.py","file_name":"classifica_acesso.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"270719731","text":"print(\"---Nomor 4---\\n\")\nprint(\"Diketahui:\\n\")\njarakAB = 125\nkec_AB = 62\njarakBC =256\nkec_BC = 70\nprint(\"Jarak kota A dan B =\",jarakAB,\"km\")\nprint(\"Kecepatan rata-rata A -> B =\",kec_AB,\"km/jam\")\nprint(\"Jarak kota B dan C =\",jarakBC,\"km\")\nprint(\"Kecepatan rata rata B-> C =\", kec_BC,\"km/jam\")\njam_awal = 6\nmenit_awal = 0\ndetik_awal = 0\njam_istirahat = 0\nmenit_istirahat = 45\ndetik_istirahat = 0\nprint(\"Pak Amir berangkat pada pukul =\",jam_awal,\".\",menit_awal)\nprint(\"Istirahat di kota B selama =\", menit_istirahat,\"menit\")\nprint(\"\\nPertanyaan:\\nPukul berapa pak Amir sampai di kota C?\\nJawab:\\n\")\n\n#waktu untuk A ke B\nwaktuAB = ((jarakAB*1000) / (kec_AB*1000/3600))\nprint(\"waktu yang dibutuhkan dari A ke B dalam satuan detik\",int(waktuAB),\"detik\")\njam_AB = waktuAB // 3600\nsisa_detik = waktuAB % 3600\nmenit_AB = sisa_detik // 60\nsisa_detikAB = sisa_detik % 60\nprint(\"Dalam satuan jam adalah\",int(jam_AB),\"jam\",int(menit_AB),\"menit\",int(sisa_detikAB),\"detik\\n\")\n\n#waktu untuk B ke C\nwaktuBC = ((jarakBC*1000) / (kec_BC*1000/3600))\nprint(\"waktu yang dibutuhkan dari B ke C dalam satuan detik\",int(waktuBC),\"detik\")\njam_BC = waktuBC // 3600\nsisa_detik = waktuBC % 3600\nmenit_BC = sisa_detik // 60\nsisa_detikBC = sisa_detik % 60\nprint(\"Dalam satuan jam adalah\",int(jam_BC),\"jam\",int(menit_BC),\"menit\",int(sisa_detikBC),\"detik\\n\")\n\n#penghitungan jam A ke B\njam_AkeB = jam_awal + jam_AB \nmenit_AkeB = menit_awal + menit_AB \ndetik_AkeB = detik_awal + sisa_detikAB\nif (detik_AkeB > 60):\n menit_AkeB = menit_awal + menit_AB + 1\n detik_AkeB = (detik_awal + sisa_detikAB) % 60\nif (menit_AkeB > 60):\n jam_AkeB = jam_awal + jam_AB + 1\n menit_AkeB = (menit_awal + menit_AB) % 60\nprint(\"Pak Amir sampai di kota B pada jam\",int(jam_AkeB),\"lebih\",int(menit_AkeB),\"menit\",int(detik_AkeB),\"detik \\n\") \n\n#waktu istirahat di B\njam_diB = jam_AkeB + jam_istirahat\nmenit_diB = menit_AkeB + menit_istirahat\ndetik_diB = detik_AkeB + detik_istirahat\nif (detik_diB > 60):\n menit_diB = menit_AkeB + menit_istirahat + 1\n detik_diB = (detik_AkeB + detik_istirahat) % 60\nif (menit_diB > 60):\n jam_diB = jam_AkeB + jam_istirahat + 1\n menit_diB = (menit_AkeB + menit_istirahat) % 60\nprint(\"Pak Amir istirahat di kota B pada jam\",int(jam_diB),\"lebih\",int(menit_diB),\"menit\",int(detik_diB),\"detik \\n\") \n\n#Pak Amir sampai di C\njam_BkeC = jam_diB + jam_BC\nmenit_BkeC = menit_diB + menit_BC\ndetik_BkeC = detik_diB + sisa_detikBC\nif (detik_BkeC > 60):\n menit_BkeC = menit_diB + menit_BC + 1\n detik_BkeC = (detik_diB + sisa_detikBC) % 60\nif (menit_BkeC > 60):\n jam_BkeC = jam_diB + jam_BC + 1\n menit_BkeC = (menit_BkeC + menit_BC) % 60\nprint(\"Pak Amir sampai di kota C pada jam\",int(jam_BkeC),\"lebih\",int(menit_BkeC),\"menit\",int(detik_BkeC),\"detik \\n\")\n\nprint(\"Jadi, Pak Amir sampai di kota C pada jam\",int(jam_BkeC),\"lebih\",int(menit_BkeC),\"menit\",int(detik_BkeC),\"detik \\n\")\n\n","sub_path":"Praktikum 04/latihan_4.py","file_name":"latihan_4.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"414111837","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 27 05:46:01 2019\n\n@author: David\n\"\"\"\n\nimport pytest\nimport tasks\nfrom tasks import Task\n\n@pytest.mark.skipif(tasks.__version__< '0.2.0', reason = 'not supported until version 0.2.0')\ndef test_unique_id_1():\n \"\"\"Calling unique_id() twice should return different numbers.\"\"\"\n id_1 = tasks.unique_id()\n id_2 = tasks.unique_id()\n assert id_1 != id_2\n \ndef test_unique_id_2():\n \"\"\"unique_id() should return an unused id.\"\"\"\n ids = []\n ids.append(tasks.add(Task('one')))\n ids.append(tasks.add(Task('two')))\n ids.append(tasks.add(Task('three')))\n #grab a unique ID\n uid = tasks.unique_id()\n # make sure it isn't in the list of existing ids\n assert uid not in ids\n \n@pytest.mark.xfail()\ndef test_unique_id_is_a_duck():\n \"\"\" demonstrate xfail\"\"\"\n uid = tasks.unique_id()\n assert uid == 'a duck'\n \n@pytest.mark.xfail()\ndef test_unique_id_is_not_a_duck():\n \"\"\"demonstrate xpass\"\"\"\n uid = tasks.unique_id()\n assert uid != ' a duck'\n \n \n@pytest.fixture(autouse=True)\ndef initilized_tasks_db(tmpdir):\n \"\"\"connnect to db befre testing, disonnect after\"\"\"\n # setup :start db\n tasks.start_tasks_db(str(tmpdir), 'tiny')\n \n yield # this is where the testing happens\n \n # teardown : stop db\n tasks.stop_tasks_db() \n ","sub_path":"ch2/tasks_proj/tests/func/test_unique_id.py","file_name":"test_unique_id.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"445327924","text":"'''\nGiven a list of intervals, each with a pre-defined weights.\nThe intervals overlap, and of the overlapping intervals, only one can be used.\nFind the maximum possible total sum of the weights.\n\nHere is an example.\nIntervals - Weight\n 0- 5 - 15\n 4- 9 - 18\n 10-15 - 12\n 8-21 - 19\n 25-30 - 25\nHere, the intervals 0-5, 4-9 and 8-21 overlap.\nThe intervals 10-15 and 8-21 also overlap.\nThe maximum sum would be 55 (18+12+25).\n'''\n# time: O(nlogn)\nimport unittest\nimport bisect\ndef find_maximum(intervals, weights):\n def find_last_no_overlapping(x, hi):\n return bisect.bisect(ends, x, 0, hi) - 1\n\n weighted_intervals = [[start, end, weight] for [start, end], weight in zip(intervals, weights)]\n weighted_intervals.sort(key=lambda i: i[1])\n ends = [i[1] for i in weighted_intervals]\n dp = [0] * len(weighted_intervals)\n for i, (start, end, weight) in enumerate(weighted_intervals):\n if i == 0:\n dp[i] = weight\n else:\n last_index = find_last_no_overlapping(start, i)\n prev_weight = dp[last_index] if last_index >= 0 else 0\n dp[i] = max(dp[i-1], prev_weight + weight)\n return dp[len(weighted_intervals)-1]\n\nclass Test(unittest.TestCase):\n data = [\n ([[0,5], [4, 9], [10,15], [25, 30],[8, 21]], [15, 18, 12, 19, 25], 59) ]\n\n def test_method(self):\n for intervals, weights, expected in self.data:\n actual = find_maximum(intervals, weights)\n self.assertEqual(actual, expected)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"google/maximumSumInASequenceOfOverlappingIntervals.py","file_name":"maximumSumInASequenceOfOverlappingIntervals.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"135550720","text":"#!/usr/bin/env python3\n\"\"\"\nAuthors: Christoph Hafemeister, Patrick Roelli\n\"\"\"\nimport sys\nimport gzip\nimport csv\nimport warnings\nfrom collections import defaultdict\nfrom collections import OrderedDict\nfrom itertools import islice\nfrom itertools import combinations\nimport pandas as pd\nimport time\nimport locale\nimport Levenshtein\nimport regex\nimport argparse\nfrom argparse import RawTextHelpFormatter\nimport pkg_resources\nversion = pkg_resources.require(\"cite_seq_count\")[0].version\n\n\ndef get_args():\n \"\"\"\n Get args.\n \"\"\"\n desc = \"This script counts matching antobody tags from two fastq files. Version {}\".format(version)\n parser = argparse.ArgumentParser(prog='CITE Seq Count', description=desc,\n formatter_class=RawTextHelpFormatter)\n\n input_desc = \"Required input files.\"\n inputs = parser.add_argument_group('Inputs', description=input_desc)\n\n inputs.add_argument('-R1', '--read1', dest='read1_path', required=True,\n help=\"The path of read1 in gz format.\")\n inputs.add_argument('-R2', '--read2', dest='read2_path', required=True,\n help=\"The path of read2 in gz format.\")\n inputs.add_argument('-t', '--tags', dest='tags', required=True,\n help=(\"The path to the csv file containing the \"\n \"antibody\\nbarcodes as well as their respective \"\n \"names.\\n\\nExample of an antibody barcode file \"\n \"structure:\\n\\n\"\n \"ATGCGA,First_tag_name\\n\"\n \"GTCATG,Second_tag_name\"))\n\n bc_desc = (\"Positions of the cellular barcodes and UMI. If your cellular \"\n \"barcodes and UMI are positioned as follows:\\n\"\n \"\\tBarcodes from 1 to 16 and UMI from 17 to 26\\n\"\n \"then this is the input you need:\\n\"\n \"\\t-cbf 1 -cbl 16 -umif 17 -umil 26\")\n barcodes = parser.add_argument_group('Barcodes', description=bc_desc)\n\n barcodes.add_argument('-cbf', '--cell_barcode_first_base', dest='cb_first',\n required=True, type=int,\n help=(\"Postion of the first base of your cell \"\n \"barcodes.\"))\n barcodes.add_argument('-cbl', '--cell_barcode_last_base', dest='cb_last',\n required=True, type=int,\n help=(\"Postion of the last base of your cell \"\n \"barcodes.\"))\n barcodes.add_argument('-umif', '--umi_first_base', dest='umi_first',\n required=True, type=int,\n help=\"Postion of the first base of your UMI.\")\n barcodes.add_argument('-umil', '--umi_last_base', dest='umi_last',\n required=True, type=int,\n help=\"Postion of the last base of your UMI.\")\n\n barcodes_filtering = parser.add_mutually_exclusive_group(required=True)\n barcodes_filtering.add_argument('-cells', '--expected_cells', dest='cells',\n required=False, type=int,\n help=(\"Number of expected cells from your \"\n \"run.\"))\n whitelist_help = (\"A csv file containning a whitelist of barcodes produced\"\n \" by the mRNA data.\\n\\n\\tExample:\\n\\tATGCTAGTGCTA\\n\"\n \"\\tGCTAGTCAGGAT\\n\\tCGACTGCTAACG\\n\")\n barcodes_filtering.add_argument('-wl', '--whitelist', dest='whitelist',\n required=False, type=str,\n help=whitelist_help)\n\n filters_desc = (\"Filtering for structure of antibody barcodes as well as \"\n \"maximum hamming distance.\")\n filters = parser.add_argument_group('filters', description=filters_desc)\n filters.add_argument('-hd', '--hamming-distance', dest='hamming_thresh',\n required=False, type=int, default=2,\n help=(\"Maximum hamming distance allowed for antibody \"\n \"barcode.\"))\n parser.add_argument('-n', '--first_n', required=False, type=int,\n dest='first_n', default=None,\n help=\"Select n reads to run on instead of all.\")\n parser.add_argument('-o', '--output', required=True, type=str,\n dest='outfile', help=\"Write result to file.\")\n parser.add_argument('-u', '--unknown-tags', required=False, type=str,\n dest='unknowns_file', help=\"Write table of unknown tags to file.\")\n parser.add_argument('--debug', action='store_true',\n help=\"Print extra information for debugging.\")\n regex_pattern = parser.add_mutually_exclusive_group(required=False)\n regex_pattern.add_argument('-tr', '--TAG_regex',\n help=\"Only use if you know what you are doing.\"\n \"The regex that will be used to validate\\n\"\n \"an antibody barcode structure. Must be given in regex syntax.\"\n \"example:\"\n \"\\\"^[ATGC]{6}[TGC][A]{6,}\\\"\",\n dest='tag_regex',\n required=False,\n type=str)\n regex_pattern.add_argument('-l', '--legacy', required=False,\n dest='legacy', default=False, action='store_true',\n help=\"Use this option if you used an earlier versions\"\n \" of the kit that adds a T C or G at the end and you\"\n \" expect polyA tails in the data.\")\n return parser\n\n\ndef parse_whitelist_csv(args):\n file = open(args.whitelist, mode='r')\n csvReader = csv.reader(file)\n length_barcodes = args.cb_last - args.cb_first + 1\n whitelist = [row[0].strip() for row in csvReader\n if (len(row[0].strip()) == length_barcodes)]\n return set(whitelist)\n\n\ndef parse_tags_csv(filename):\n file = open(filename, mode='r')\n csvReader = csv.reader(file)\n odict = OrderedDict()\n for row in csvReader:\n odict[row[0].strip()] = row[1].strip()\n return odict\n\ndef check_tags(ab_map, maximum_dist):\n # Adding the barcode to the name of the TAG\n # This means we don't need to share the mapping of the antibody and the barcode.\n new_ab_map = {}\n for TAG in ab_map:\n new_ab_map[TAG] = ab_map[TAG] + '-' + TAG\n if(len(ab_map) == 1):\n return(new_ab_map)\n for a,b in combinations(new_ab_map.keys(),2):\n if(Levenshtein.distance(a,b)<= maximum_dist):\n sys.exit('Minimum hamming distance of TAGS barcode is less than given threshold\\nPlease use a smaller distance; exiting')\n return(new_ab_map)\n\ndef generate_regex(ab_map, args, R2_length, max_polyA):\n \"\"\"Generate regex based ont he provided TAGS\"\"\"\n lengths = OrderedDict()\n for TAG in ab_map:\n if (len(TAG) in lengths.keys()):\n lengths[len(TAG)]['mapping'][TAG]=ab_map[TAG]\n else:\n lengths[len(TAG)]=OrderedDict()\n lengths[len(TAG)]['mapping'] = OrderedDict()\n lengths[len(TAG)]['mapping'][TAG] = ab_map[TAG]\n #If there is only one length and the user provides a regex, us the users regex\n if ((len(lengths)==1) & (args.tag_regex is not None)):\n for length in lengths.keys():\n lengths[length]['regex'] = args.tag_regex\n return(lengths)\n if((len(lengths) != 1) & (args.tag_regex is not None)):\n exit('You cannot use your own regex with tag barcodes of different lengths')\n for length in lengths.keys():\n pattern = [''] * length\n for TAG in lengths[length]['mapping'].keys():\n for position in range(0,length):\n if (TAG[position] in pattern[position]):\n continue\n else:\n pattern[position] += TAG[position]\n if(args.legacy):\n lengths[length]['regex'] = '^([{}])[TGC][A]{{{},}}'.format(']['.join(pattern), min(max_polyA,(R2_length-length-1)))\n else:\n lengths[length]['regex'] = '^([{}])'.format(']['.join(pattern))\n return(lengths)\n\ndef get_read_length(file_path):\n with gzip.open(file_path, 'r') as fastq_file:\n secondlines = islice(fastq_file, 1, 1000, 4)\n temp_length = len(next(secondlines).rstrip())\n for sequence in secondlines:\n read_length = len(sequence.rstrip())\n if(temp_length != read_length):\n sys.exit('Reads length is not consistent in {}, please trim all reads at the same length before rerunning'.format(file_path))\n temp_length = read_length\n return(read_length)\n\n\ndef check_read_lengths(R1_length, R2_length, args):\n barcode_length = args.cb_last - args.cb_first + 1\n umi_length = args.umi_last - args.umi_first + 1\n barcode_umi_length = barcode_length + umi_length\n barcode_slice = slice(args.cb_first - 1, args.cb_last)\n umi_slice = slice(args.umi_first - 1, args.umi_last)\n\n if(barcode_umi_length) > R1_length:\n sys.exit('Read 1 length is shorter than the option you are using for cell and UMI barcodes length. Please check your options and rerun.')\n elif(barcode_umi_length) < R1_length:\n print(\"**WARNING**\\nRead 1 length is {}bp but you are using {}bp for cell and UMI barcodes combined.\\nPlease be sure you are using the correct positions for the cell barcode and UMI\\n\".format(R1_length, barcode_umi_length))\n return(barcode_slice, umi_slice, barcode_umi_length)\n\ndef main():\n parser = get_args()\n if not sys.argv[1:]:\n parser.print_help(file=sys.stderr)\n sys.exit(2)\n\n # Load args\n args = parser.parse_args()\n if args.whitelist:\n whitelist = parse_whitelist_csv(args)\n\n # Load TAGS barcodes\n ab_map = parse_tags_csv(args.tags)\n ab_map = check_tags(ab_map, args.hamming_thresh)\n \n #Get read lengths\n R1_length = get_read_length(args.read1_path)\n R2_length = get_read_length(args.read2_path)\n\n #Generate regex patterns automatically\n regex_patterns = generate_regex(ab_map=ab_map, args=args, R2_length=R2_length, max_polyA=6)\n if(args.debug):\n print(regex_patterns)\n \n # Create a set for UMI reduction. Fast way to check if it already exists\n UMI_reduce = set()\n # Create result table\n res_table = defaultdict(lambda: defaultdict(int))\n no_match_table = defaultdict(int)\n\n # Set counter\n n = 0\n \n # Check that read 1 and options match and define slices\n (barcode_slice, umi_slice, barcode_umi_length) = check_read_lengths(R1_length, R2_length, args)\n \n unique_lines = set()\n with gzip.open(args.read1_path, 'rt') as textfile1, \\\n gzip.open(args.read2_path, 'rt') as textfile2:\n # Read all 2nd lines from 4 line chunks. If first_n not None read only 4 times the given amount.\n secondlines = islice(zip(textfile1, textfile2), 1, (args.first_n * 4 if args.first_n is not None else args.first_n), 4)\n print('loading')\n\n t = time.time()\n for x, y in secondlines:\n x = x.strip()\n y = y.strip()\n line = x[barcode_slice] + x[umi_slice] + y\n unique_lines.add(line)\n\n n += 1\n if n % 1000000 == 0:\n print(\"Loaded last 1,000,000 lines in {:.3} seconds. Total \"\n \"lines loaded {:,} \".format(time.time()-t, n))\n t = time.time()\n\n print('{:,} reads loaded'.format(n))\n print('{:,} uniques reads loaded'.format(len(unique_lines)))\n\n n = 1\n for line in unique_lines:\n if n % 1000000 == 0:\n print(\"Processed 1,000,000 lines in {:.4} secondes. Total \"\n \"lines processed: {:,}\".format(time.time()-t, n))\n t = time.time()\n\n cell_barcode = line[barcode_slice]\n if args.whitelist:\n if cell_barcode not in whitelist:\n n += 1\n continue\n\n\n UMI = line[umi_slice]\n TAG_seq = line[barcode_umi_length:]\n BC_UMI_TAG = cell_barcode + UMI + TAG_seq\n if args.debug:\n print(\"\\nline:{0}\\ncell_barcode:{1}\\tUMI:{2}\\tTAG_seq:{3}\\nline length:{4}\\tcell barcode length:{5}\\tUMI length:{6}\\tTAG sequence length:{7}\".format(line, cell_barcode,\n UMI, TAG_seq, len(line), len(cell_barcode), len(UMI), len(TAG_seq)))\n\n # Check if UMI + TAG already in the set\n if BC_UMI_TAG not in UMI_reduce:\n # Check structure of the TAG\n no_structure_match=True\n for length in regex_patterns.keys():\n match = regex.search(r'(?:({})){{i<={}}}'.format(regex_patterns[length]['regex'],args.hamming_thresh), TAG_seq)\n if args.debug:\n print(\"{0}\\t{1}\".format(regex_patterns[length]['regex'], TAG_seq))\n print(match)\n\n if match:\n no_structure_match=False\n TAG_seq = match.group(0)[0:length]\n\n # Increment read count\n res_table[cell_barcode]['total_reads'] += 1\n\n # Get distance from all barcodes\n temp_res = defaultdict()\n for key, value in regex_patterns[length]['mapping'].items():\n temp_res[value] = Levenshtein.hamming(TAG_seq, key)\n # Get smallest value and get respective tag_name\n min_value = min(temp_res.values())\n min_index = list(temp_res.values()).index(min_value)\n best = list(temp_res.keys())[min_index]\n\n # ambiguous\n if not isinstance(min_value, int):\n if args.debug:\n print(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\".format(\n cell_barcode, UMI, x, y, TAG_seq))\n\n res_table[cell_barcode]['ambiguous'] += 1\n continue\n\n # If over threshold\n if min_value >= args.hamming_thresh:\n res_table[cell_barcode]['no_match'] += 1\n no_match_table[TAG_seq] += 1\n continue\n\n res_table[cell_barcode][best] += 1\n\n # Increment bad structure\n if(no_structure_match):\n res_table[cell_barcode]['bad_struct'] += 1\n\n # Add BC_UMI_TAG to set\n UMI_reduce.add(BC_UMI_TAG)\n\n n += 1\n \n print(\"Done counting\")\n res_matrix = pd.DataFrame(res_table)\n if ('total_reads' not in res_matrix.index):\n exit('No match found. Please check your regex or tags file')\n #Add potential missing cells if whitelist is used\n if args.whitelist:\n res_matrix = res_matrix.reindex(whitelist, axis=1,fill_value=0)\n res_matrix.fillna(0, inplace=True)\n if args.cells:\n most_reads_ordered = res_matrix.sort_values(by='total_reads',\n ascending=False,\n axis=1).columns\n n_top_cells = int(args.cells + args.cells/100 * 30)\n top_Cells = most_reads_ordered[0:n_top_cells]\n res_matrix = res_matrix.loc[:, res_matrix.columns.isin(top_Cells)]\n\n res_matrix.to_csv(args.outfile, float_format='%.f')\n \n if args.unknowns_file:\n keys = list(no_match_table.keys())\n vals = list(no_match_table.values())\n no_match_matrix = pd.DataFrame({\"tag\": keys, \"total\": vals})\n no_match_matrix = no_match_matrix.sort_values(by='total', ascending=False) \n no_match_matrix.to_csv(args.unknowns_file, float_format='%.f', index=False)\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"cite_seq_count/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":16164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"410628999","text":"import os\nimport sys\nimport cv2\nimport pickle\nimport face_recognition\nfrom time import sleep\n\nvsrc = 'rtsp://admin:spspsp01!!:@192.168.1.108:554/cam/realmonitor?channel=1&subtype=0'\nvcap = cv2.VideoCapture(vsrc)\n\ndownsample_scale = 0.5\nassert 0. < downsample_scale < 1.\n\nsampler = 0\nsampling_cycle = 75\nlocs = []\n\nret, frame = vcap.read()\nprint (frame.shape)\n\nperson_name = input('What is your name? ')\nimg_dir = str(os.path.join(os.getcwd(), 'img'))\nimg_dir = str(os.path.join(img_dir, person_name))\npkl_dir = str(os.path.join(os.getcwd(), 'encodings'))\n\nif not os.path.exists(img_dir):\n os.mkdir(img_dir)\nif not os.path.exists(pkl_dir):\n os.mkdir(pkl_dir)\n\nprint ('Saving photos at {}'.format(img_dir))\n\nframe_num = 0\nmax_frame_num = 20\n\nsmall_frames = []\nface_encodings_tosave = []\nwhile frame_num < max_frame_num:\n \n ret, frame = vcap.read()\n if frame is None:\n print('Error: Reading a frame from camera failed.')\n sys.exit()\n \n sampler = (sampler + 1) % sampling_cycle\n if sampler != 1:\n continue\n\n frame = cv2.flip(frame, 0)\n frame_small = cv2.resize(frame, None, fx=downsample_scale, fy=downsample_scale)\n # Run detection model\n locs = face_recognition.face_locations(frame_small)\n if len(locs) != 1:\n print ('Failed to detect a face in the photo; trying again...')\n continue\n small_frames.append(frame_small)\n \n frame_num += 1\n print('Took photo #{}'.format(frame_num))\n\nfor sf in small_frames:\n new_face_encodings = face_recognition.face_encodings(sf)\n if len(new_face_encodings) != 1:\n print('photo #{} has bad encodings, discarded.')\n face_encodings_tosave.append(new_face_encodings[0])\n\nencodings_path = os.path.join(pkl_dir, person_name+'.encodings')\nprint ('Writing pickled encodings at {}'.format(encodings_path))\nwith open(encodings_path, 'wb') as f:\n pickle.dump(face_encodings_tosave, f)\n\n","sub_path":"take_pics.py","file_name":"take_pics.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"652836776","text":"from collections.abc import MutableSequence\nfrom typing import Any\n\nimport proto\n\nclass TopicConstant(proto.Message):\n resource_name: str\n id: int\n topic_constant_parent: str\n path: MutableSequence[str]\n def __init__(\n self,\n mapping: Any | None = ...,\n *,\n ignore_unknown_fields: bool = ...,\n resource_name: str = ...,\n id: int = ...,\n topic_constant_parent: str = ...,\n path: MutableSequence[str] = ...\n ) -> None: ...\n","sub_path":"google-stubs/ads/googleads/v14/resources/types/topic_constant.pyi","file_name":"topic_constant.pyi","file_ext":"pyi","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"416624236","text":"'''\nCore Function \n'''\ndef searchCore(text):\n vocaDic = ['','','','','','','','','','','','','','','','','','','','','','','','','','']\n textString = 'abcdefghijklmnopqrstuvwxyz'\n result = ''\n for character in text:\n for index in range(0,26):\n if (character == textString[index]):\n vocaDic[index] += textString[index]\n for elem in vocaDic:\n result += elem\n result.strip()\n return result\n\n'''\nInterface \n'''\ndef handle(text):\n #Part one text handle \n text = text.lower()\n searchText = searchCore(text)\n #Part two text search\n fin = open(\"words.txt\")\n for line in fin:\n line = line.strip()\n if(searchText == searchCore(line)):\n print(line,end=\" \")\n\n\ntext = input(\"Enter a jumbled word of any length: \")\nhandle(text)\n\n'''\nExample Output\n\nEnter a jumbled word of any length: aa\naa \n\nEnter a jumbled word of any length: UPsYr\npursy syrup \n\n'''\n\n'''\nWrite a program to help one cheat at the online and/or hardcopy Jumble word game. Its input should be a single user-entered word (uppercase, lowercase, or a mixture of both). Its output should be every word in words.txtPreview the document that can be derived by reordering the letters in the input word. The input word and the output word(s) thus have to be the exact same length with the exact same set of letters, other than your program should ignore case when determining matches. Sample input and output is given below. Note that in most cases, your program will only find one output word. And of course, if the input word cannot be unjumbled to match a word in the words.txt file, your output line will be empty.\n\nSubmit your program via uploading a file named jumble.py, which I will execute myself.\n\nAs usual, do not use any features we have not studied (recursion + everything after Chapter 10 in the book).\n'''","sub_path":"CIS 40/Lab6.py","file_name":"Lab6.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"615002752","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport argparse\n\ntry:\n import argparse_parents\nexcept ImportError:\n sys.path.append(os.path.dirname(__file__))\n try:\n import argparse_parents\n finally:\n sys.path.remove(os.path.dirname(__file__))\n\n\ndef load_unique_common(unique_common_file_path):\n genome_to_unique_common = {}\n with open(unique_common_file_path) as read_handler:\n for line in read_handler:\n genome_to_unique_common[line.split(\"\\t\")[0]] = line.split(\"\\t\")[1].strip('\\n')\n return genome_to_unique_common\n\n\ndef print_filtered_data(stream, unique_common_file_path, keyword):\n genome_to_unique_common = load_unique_common(unique_common_file_path)\n for line in stream:\n line = line.strip()\n if len(line) == 0 or line.startswith(\"@\"):\n print(line)\n continue\n bin = line.split('\\t')[0]\n if bin in genome_to_unique_common and (keyword is None or genome_to_unique_common[bin] == keyword):\n continue\n print(line)\n\n\ndef filter_data(bin_metrics, unique_common_file_path, keyword):\n genome_to_unique_common = load_unique_common(unique_common_file_path)\n filtered_bin_metrics = []\n for bin in bin_metrics:\n bin_id = bin['mapped_genome']\n if bin_id not in genome_to_unique_common or genome_to_unique_common[bin_id] != keyword:\n filtered_bin_metrics.append(bin)\n return filtered_bin_metrics\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Exclude genome bins from table of precision and recall per genome. The table can be provided as file or via the standard input\")\n parser.add_argument('file', nargs='?', type=argparse.FileType('r'), help=argparse_parents.HELP_FILE)\n parser.add_argument('-r', '--genomes_file', help=argparse_parents.HELP_GENOMES_FILE, required=True)\n parser.add_argument('-k', '--keyword', help=argparse_parents.HELP_KEYWORD, required=False)\n args = parser.parse_args()\n if not args.file and sys.stdin.isatty():\n parser.print_help()\n parser.exit(1)\n print_filtered_data(sys.stdin if not sys.stdin.isatty() else args.file,\n args.genomes_file,\n args.keyword)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"utils/exclude_genomes.py","file_name":"exclude_genomes.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"204358956","text":"\r\ndef avtor(tvit):\r\n t2 = []\r\n for črka in tvit:\r\n if črka != \":\":\r\n t2.append(črka)\r\n\r\n else:\r\n return \"\".join (t2)\r\n\r\n return t2\r\n\r\ndef unikati (s):\r\n se= []\r\n for stvar in s:\r\n if stvar not in se:\r\n se.append (stvar)\r\n return se\r\n\r\ndef se_zacne_z(tvit, c):\r\n s=[]\r\n tviti=tvit.split()\r\n\r\n for beseda in tviti:\r\n if beseda.startswith(c):\r\n s.append( izloci_besedo(beseda))\r\n return s\r\n\r\ndef izloci_besedo(beseda):\r\n s=[]\r\n\r\n for letter in beseda:\r\n if letter.isalnum () or letter== \"-\":\r\n s.append(letter)\r\n\r\n def custva(tviti, hashtagi):\r\n s = []\r\n for tvit in tviti:\r\n avtor1 = avtor(tvit)\r\n hash = se_zacne_z(tvit, \"#\")\r\n for tag in hashtagi:\r\n if (tag in hash):\r\n s.append(avtor1)\r\n\r\n s.sort()\r\n\r\n return unikati(s)\r\n\r\n return \"\".join(s)\r\n\r\n\r\ndef besedilo(tvit):\r\n this= []\r\n notime = False\r\n for črka in tvit:\r\n if notime == True :\r\n this.append(črka)\r\n if črka== \":\":\r\n notime = True\r\n for all in this:\r\n if all == \" \":\r\n this.remove(all)\r\n break\r\n\r\n return \"\".join(this)\r\n\r\ndef zadnji_tvit(tviti):\r\n tweets= {}\r\n for tvit in tviti:\r\n tweets[avtor(tvit)] = besedilo(tvit)\r\n\r\n return tweets\r\n\r\ndef prvi_tvit(tviti):\r\n tweets= {}\r\n for tvit in tviti:\r\n if avtor(tvit) not in tweets:\r\n tweets[avtor(tvit)] = besedilo(tvit)\r\n\r\n return tweets\r\n\r\ndef prestej_tvite(tviti):\r\n counter= {}\r\n for tvit in tviti:\r\n if avtor(tvit) not in counter:\r\n i= 0\r\n counter[avtor(tvit)]= i+1\r\n else:\r\n counter[avtor(tvit)]= int(counter[avtor(tvit)])+ 1\r\n\r\n return counter\r\n\r\ndef omembe(tviti):\r\n mention= {}\r\n for tvit in tviti:\r\n if avtor(tvit) in mention.keys () :\r\n mention[avtor(tvit)] += (se_zacne_z(tvit, \"@\" ))\r\n else:\r\n mention[avtor(tvit)]=(se_zacne_z(tvit , \"@\" ))\r\n\r\n return mention\r\n\r\n\r\ndef neomembe(ime, omembe):\r\n\r\n s=[]\r\n z=[]\r\n\r\n for key , imena in omembe.items():\r\n if key != ime and key not in s:\r\n s.append(key)\r\n\r\n else:\r\n z+=(imena)\r\n for all in z:\r\n for to in s:\r\n if to in z:\r\n s.remove(to)\r\n\r\n\r\n\r\n\r\n\r\n\r\n return s\r\n\r\n\r\ndef se_poznata(ime1, ime2, omembe):\r\n\r\n for key, drugo in omembe.items():\r\n if key == ime1 and ime2 in omembe[key]:\r\n return True\r\n break\r\n for key, drugo in omembe.items():\r\n if key == ime2 and ime1 in omembe[key]:\r\n return True\r\n break\r\n\r\n return False\r\n\r\n\r\ndef hashtagi(tviti):\r\n s={}\r\n h=[]\r\n for tvit in tviti:\r\n tole = se_zacne_z(tvit, \"#\")\r\n h.extend(tole)\r\n h= unikati(h)\r\n\r\n for tag in h:\r\n for tvit in tviti:\r\n if tag in tvit:\r\n if tag in s.keys():\r\n s[tag].append(avtor(tvit))\r\n else:\r\n s.update({tag:[avtor(tvit)]})\r\n for tag in h:\r\n s[tag].sort()\r\n\r\n\r\n\r\n return s\r\n\r\n\r\n\r\n\r\n","sub_path":"code/batch-2/vse-naloge-brez-testov/DN6-M-038.py","file_name":"DN6-M-038.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"439809514","text":"from django.shortcuts import render\nfrom teachers.models import Teacher\n\n\ndef index(request):\n teachers = Teacher.objects.order_by('techer_id') # '-techer_id' for ascending order\n context = {\n 'teachers': teachers,\n }\n return render(request, 'teachers/teacher_index.html', context)\n\n\ndef teacher_detail(request, teacher_id):\n instructor = Teacher.objects.get(pk=teacher_id)\n context = {\n 'instructor': instructor,\n }\n return render(request, 'teachers/detail.html', context)","sub_path":"teachers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"489519554","text":"import sys\nimport os\nimport logging.handlers\n\nsys.path.insert(0, os.path.join(sys.path[0], '..', '..', '..', 'Python Projects'))\n\nfrom core.python_projects_logging_setup import set_up_python_projects_logging\n\nfrom algorithm_run.algorithm_execution.algorithm_execution_controller import AlgorithmExecutionController\n\n# read in all of the expected communication values\npipe_name = str(sys.argv[1])\npipe_code = str(sys.argv[2])\nvalidation_string = str(sys.argv[3])\nbegin_comm_string = str(sys.argv[4])\nreceived_token = str(sys.argv[5])\nend_comm_string = str(sys.argv[6])\n\nlog_path = str(sys.argv[7]) if len(sys.argv) == 8 else 'execution_log.log'\n\n# SETTING UP LOGGER\nlog_handler = logging.handlers.TimedRotatingFileHandler(log_path, 'H', interval=24)\nlogger = set_up_python_projects_logging('Algorithm Execution Script', logging_handler=log_handler)\n\ntry:\n logger.info('Beginning Execution Controller')\n logger.info('Path: ' + str(os.path.abspath(sys.path[0])))\n # create the execution controller\n my_executor = AlgorithmExecutionController(pipe_name, pipe_code, validation_string, begin_comm_string, received_token, end_comm_string)\n\n my_executor.send_mics() # call the MICS\n my_executor.finish_executing() # close the connection to C# once execution ends\nexcept SystemExit as ex:\n logger.error('Exit with code: ' + str(ex.code))\n sys.exit(ex.code)\nexcept Exception as e:\n logger.exception('Ran into some other unhandled exception during execution!')\n sys.exit(55)\n","sub_path":"Algorithm Core/Algorithm Execution/Execution Scripts/algorithm_execution_script.py","file_name":"algorithm_execution_script.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"584867235","text":"__author__ = 'Mackenzie Larson'\n\"\"\"\n\nProgram: CS 115 Lab 08 (lab08a_1.py)\nAuthor: Mackenzie Larson\nDescription: This program uses the graphics package to create a number of circles.\n\n\"\"\"\nfrom graphics import *\n\nnum_circles = 5\nwin = GraphWin(\"Circles\", 400, 600)\nradius = 30\ncolors = ['red', 'blue', 'green', 'yellow', 'orange']\n\nfor i in range(num_circles):\n click_point = win.getMouse() # Get the point on the window where the mouse was clicked\n circle = Circle(click_point, radius) # Create a circle centered at the point of the click\n circle.setFill(colors[i]) # Each circle gets a different color from list, colors\n circle.draw(win)\n\n# wait for the user to click the mouse one more time before we close the window.\nwin.getMouse()\nwin.close()","sub_path":"CS115/Lab08/lab08a_1.py","file_name":"lab08a_1.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"301985134","text":"\"\"\"\nWSGI config for ctdatacms project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/\n\"\"\"\n\nimport os,site\nconfig = os.environ.get(\"DJANGO_CONFIGURATION\",\"\")\nif (config==\"Dev\"):\n site.addsitedir('/var/www/cms/lib/python2.7/site-packages')\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"openatbocms.settings\")\nos.environ.setdefault(\"DJANGO_CONFIGURATION\", config)\n\nfrom configurations.wsgi import get_wsgi_application\n_application = get_wsgi_application()\n\n\ndef application(environ, start_response):\n os.environ['DJANGO_DATABASE_URL'] = environ.get('DJANGO_DATABASE_URL','')\n os.environ['DJANGO_CONFIGURATION'] = environ.get('DJANGO_CONFIGURATION','')\n os.environ.setdefault('DJANGO_SECRET_KEY', environ.get('DJANGO_SECRET_KEY',''))\n return _application(environ, start_response)\n","sub_path":"openatbocms/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"327248164","text":"# root file in the module/file hierarchy. Calls the load homepage function.\n# created to enable 'back' button after selecting mcq vs true or false in the homepage\n\nfrom tkinter import *\nfrom homepage import open_homepage\n\nroot= Tk()\nroot.title(\"Quizardry\")\nroot.geometry(\"700x600\")\nroot.config(background=\"#ffffff\")\nroot.resizable(0, 0)\n\nopen_homepage(root)\n\n\nroot.mainloop()","sub_path":"Quizardry.py","file_name":"Quizardry.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"491759402","text":"\n#4. Median of Two Sorted Arrays\n\ndef findMedian(sort1, sort2):\n count1 = len(sort1)\n count2 = len(sort2)\n max1 = sort1[count1-1]\n max2 = sort2[count2-1]\n max = max1 if (max1 > max2) else max2\n sort1.append(max+1)\n sort2.append(max+1)\n \n i = j = 0\n m1 = (count1+count2)//2\n m0 = m1-1 if (((count1+count2) & 1) == 0) else m1\n #print((m0,m1))\n for k in range(m1+1):\n left = sort1[i]\n right = sort2[j]\n if (left < right):\n i += 1\n v = left\n else:\n j += 1\n v = right\n \n if (k == m0): \n v0 = v \n \n if (k == m1): \n v1 = v\n break\n \n if (m1 == m0): return v1 \n else: return (v0, v1)\n \n \ndata1 = [2,4,6]\ndata2 = [1,2,3,5,7] \nprint(findMedian(data1, data2)) \n \n ","sub_path":"Leetcode/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"552089183","text":"# !/usr/bin/python3\n# coding: utf-8\n\nimport os\n\nfrom core.cmd import detex\n\n\ndef find_files(root_folder, extension=None):\n \"\"\"\n :param root_folder: str\n Folder to scan\n :param extension: str or None\n Finds files ending with just this extension. None means any extension\n :return: [] of str\n List of files found in folder\n \"\"\"\n\n lst = []\n for fil in os.listdir(root_folder):\n if os.path.isdir(os.path.join(root_folder, fil)):\n lst += find_files(\n os.path.join(root_folder, fil), extension\n ) # get list of files in directory\n else: # this is a file\n if extension is not None:\n if fil.endswith(extension):\n lst.append(os.path.join(root_folder, fil))\n return lst\n\n\ndef get_file(path):\n \"\"\"\n :param path: str\n Path of file to read\n :return: str\n Content of file\n \"\"\"\n\n with open(path, \"r\") as reader:\n return reader.read()\n\n\ndef get_lines_of_file(path, encoding=\"utf-8\"):\n \"\"\"\n :param path: str\n Path of file to read\n :param encoding: str\n Which encoding to use when parsing file\n :return: [] of str\n Lines in file\n \"\"\"\n\n raw = get_file(path)\n lines = raw.split(\"\\n\")\n return [\n bytes(line, encoding).decode(encoding, \"ignore\")\n for line in lines\n ]\n\n\ndef get_content_of_file(path, encoding=\"utf-8\"):\n \"\"\"\n :param path: str\n Path of file to read\n :param encoding: str\n Which encoding to use when parsing file\n :return: str\n Content of file\n \"\"\"\n\n lines = get_lines_of_file(path, encoding=encoding)\n return \"\\n\".join(lines)\n\n\ndef get_content_of_text_file(path):\n \"\"\"\n :param path: str\n Path of file to read\n :return: str\n Content of file\n \"\"\"\n\n with open(path, \"r\") as reader:\n return reader.read()\n\n\ndef get_content_of_latex_file(path):\n \"\"\"\n :param path: str\n Path of .tex file to read\n :return: str\n Content of file\n \"\"\"\n\n return detex(path)\n","sub_path":"scripts/py/core/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"649747462","text":"from __future__ import division\nimport numpy\nimport matplotlib.pyplot as plt\n\n\n# Setup\nnx, ny = 240, 200\ntmax, tgap = 100, 10\nc = numpy.ones((nx,ny))*0.25\nf = numpy.zeros_like(c)\ng = numpy.zeros_like(c)\n\n\n# Plot using the matplotlib\nplt.ion()\nimag = plt.imshow(c.T, origin='lower', vmin=-0.2, vmax=0.2)\nplt.colorbar()\n\n\n# Main loop for the time evolution\nfor tn in xrange(1,tmax+1):\n g[nx//3,ny//2] += numpy.sin(0.1*tn) \n\n for i in xrange(1,nx-1):\n for j in xrange(1,ny-1):\n f[i,j] = 0.001*(i + j)\n\n for i in xrange(1,nx-1):\n for j in xrange(1,ny-1):\n g[i,j] = 0\n\n\n if tn%tgap == 0:\n print(\"%d (%d %%)\" % (tn, tn/tmax*100))\n imag.set_array(f.T)\n plt.draw()\n #plt.savefig('./png/%.5d.png' % tn) \n","sub_path":"code_examples.bak/wave2d_numpy_f90_cuda/wave2d_naive_00.py","file_name":"wave2d_naive_00.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"481662596","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nfrom tests.common.tensorio import compare_tensor\nfrom akg.utils import kernel_exec as utils\nfrom tests.common.test_op.ascend.hpl_lu import hpl_lu\n\n\ndef gen_data(shape, dtype):\n num = shape[0]\n support_list = {\"float16\": np.float16, \"float32\": np.float32}\n\n one_tensor = np.zeros((num, num))\n for i in range(num):\n for j in range(num):\n one_tensor[i, j] = min(min(i, j), 4) + 1\n upper_matrix = np.triu(one_tensor).astype(support_list[dtype])\n lower_matrix = np.tril(one_tensor).astype(support_list[dtype])\n for i in range(num):\n lower_matrix[i, i] = 1.0\n input1 = np.dot(lower_matrix, upper_matrix)\n expect = upper_matrix + lower_matrix - np.eye(num)\n output = np.full(expect.shape, np.nan, expect.dtype)\n return input1, output, expect\n\n\ndef hpl_lu_run(shape, dtype, poly_sch=True, attrs=None):\n\n attrs = {\n \"enable_double_buffer\": False,\n \"enable_pre_poly_loop_partition\": False,\n \"enable_post_poly_loop_partition\": False,\n }\n\n mod = utils.op_build_test(hpl_lu, [shape, ], [dtype, ], kernel_name=\"hpl_lu\",\n polyhedral=poly_sch, attrs=attrs)\n input1, output, expect = gen_data(shape, dtype)\n output = utils.mod_launch(mod, (input1, output), expect=expect, outputs=(0,))\n rtol = atol = 1e-04\n res = compare_tensor(output, expect, rtol=rtol, atol=atol)\n print(\"Test {}\".format(\"Pass\" if res else \"Failed\"))\n return (input1,), output, expect, res\n","sub_path":"tests/common/test_run/ascend/hpl_lu_run.py","file_name":"hpl_lu_run.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"195366381","text":"\noutFile = open( 'noID.csv', 'w+' )\nfor line in open( 'myanime.csv', 'r' ):\n items = line.split( ',' )\n outFile.write( ','.join(items[ 1: ] ) )\noutFile.close()\n\ninFile = open('noID.csv','r')\n\noutFile = open('nodup_myanime.csv','w+')\n\nlistLines = []\n\nfor line in inFile:\n\n if line in listLines:\n continue\n\n else:\n outFile.write(line)\n listLines.append(line)\n\noutFile.close()\n\ninFile.close()","sub_path":"myanimelist/remove_dups.py","file_name":"remove_dups.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"124100150","text":"\"\"\"\n@author: Mahmoud I.Zidan\n\"\"\"\n'''\nits purpose is to get the ground truth detection positions per frame.\nspecifically for Oxford TownCentre dataset\n(http://www.robots.ox.ac.uk/~lav/Research/Projects/2009bbenfold_headpose/project.html)\n\nData format:\npersonNumber, frameNumber, headValid, bodyValid, headLeft, headTop, headRight, headBottom, bodyLeft, bodyTop, bodyRight, bodyBottom\n\nNote: we ignore using/tracking head detection data\n'''\n\nimport numpy as np\nimport sys\nimport tensorflow as tf\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\nimport cv2\n\nclass GroundTruthDetections:\n\n def __init__(self, fname= '.\\dataset\\TownCentre-groundtruth.top'):\n self.all_dets = np.loadtxt(fname ,delimiter=',') #load detections\n self._frames = int(self.all_dets[:, 1].max())+1 #0 to 4500 inclusive\n\n '''as in practical realtime MOT, the detector doesn't run on every single frame'''\n def _do_detection(self, detect_prob = .1):\n\n return int(np.random.choice(2, 1, p=[1 - detect_prob, detect_prob]))\n\n '''returns the detected items positions or [] if no detection'''\n def get_detected_items(self,frame, detect_prob=0.4):\n\n if self._do_detection(detect_prob=detect_prob) or frame == 0:\n return self.all_dets[self.all_dets[:, 1] == frame, 8:]\n\n else:\n return []\n\n def get_total_frames(self):\n return self._frames\n\n\nclass ObjectDetectionAPI:\n def __init__(self):\n print('Object Detection API initialization!')\n\n sys.path.append(\"..\")\n\n MODEL_NAME = 'D:/tensorflow_models_checkpoints/checkpoints/faster_rcnn_resnet50_lowproposals_coco_2018_01_28'\n\n PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\n PATH_TO_LABELS = 'C:/development/tensorflow_models/research/object_detection/data/mscoco_label_map.pbtxt'\n\n NUM_CLASSES = 90\n\n self.detection_graph = tf.Graph()\n\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n self.label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n\n categories = label_map_util.convert_label_map_to_categories(self.label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\n self.category_index = label_map_util.create_category_index(categories)\n\n self.sess = tf.Session(graph=self.detection_graph)\n\n def do_detection(self, frame, frame_no):\n with self.detection_graph.as_default():\n image_np_expanded = np.expand_dims(frame, axis=0)\n image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n\n (boxes, scores, classes, num_detections) = self.sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n vis_util.visualize_boxes_and_labels_on_image_array(\n frame,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n self.category_index,\n use_normalized_coordinates=True,\n line_thickness=8)\n\n boxes = np.squeeze(boxes)\n id = 0\n dets = []\n\n for i in range(int(num_detections)):\n id += 1\n # bbox = (boxes[i,1]*frame.shape[1], boxes[i,0]*frame.shape[0], (boxes[i,3]-boxes[i,1])*frame.shape[1], (boxes[i,2]-boxes[i,0])*frame.shape[0])\n # print(bbox)\n bbox = (boxes[i,0]*frame.shape[0], boxes[i,1]*frame.shape[1], boxes[i,2]*frame.shape[0], boxes[i,3]*frame.shape[1])\n\n det = [id, frame_no, 1, 1, 0, 0, 0, 0, 1, 1, bbox[1], bbox[0], bbox[3], bbox[2]]\n dets.append(det)\n # print(dets)\n\n # cv2.imshow('object detection', cv2.resize(frame, (640, 480)))\n #\n # if (cv2.waitKey(1) & 0xff) == 27:\n # cv2.destroyAllWindows()\n\n return np.asarray(dets)[:, 10:]\n\n","sub_path":"testing/utilities/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"572529586","text":"import numpy as np \nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport sys\nimport os\n\n\nKLD = np.load('./curve/KLD.npy')\nMSE = np.load('./curve/MSE.npy')\naa = KLD\nplt.figure(figsize=(20,10))\nplt.subplot(1,2,1)\nfor i in range(2,len(KLD)-2):\n aa[i] = (KLD[i-2]+ KLD[i-1] + KLD[i] + KLD[i+1]+ KLD[i+2])/5.0\nplt.title('training KLD with lambda:3e-06')\nplt.xlabel('epoch')\nplt.plot(range(len(KLD)),aa[:,0])\n\nplt.subplot(1,2,2)\nbb = MSE\nfor i in range(2,len(MSE)-2):\n bb[i] = (MSE[i-2]+MSE[i-1] + MSE[i] + MSE[i+1]+MSE[i+2])/5.0\nplt.title('training MSE')\nplt.xlabel('epoch')\nplt.plot(range(len(MSE)),bb[:,0])\n\nplt.savefig(os.path.join(sys.argv[2],'fig1_2.jpg'))\nplt.clf()\n\nGAN_d = np.load('./curve/gan_d.npy')\nGAN_g = np.load('./curve/gan_g.npy')\n\nplt.figure(figsize=(20,10))\nplt.subplot(1,2,1)\nplt.title('discriminator loss binary_entropy')\nplt.xlabel('step')\nplt.plot(range(len(GAN_d[:20000])),GAN_d[:20000,0])\n\nplt.subplot(1,2,2)\nplt.title('generator loss binary_entropy')\nplt.xlabel('step')\nplt.plot(range(len(GAN_g[:20000])),GAN_g[:20000])\n\nplt.savefig(os.path.join(sys.argv[2],'fig2_2.jpg'))\nplt.clf()\n\nACGAN_d = np.load('./curve/acgan_d.npy')\nACGAN_g = np.load('./curve/acgan_g.npy')\n\n\nplt.figure(figsize=(20,10))\nplt.subplot(1,2,1)\n\nplt.title('discriminator loss binary_entropy')\nplt.xlabel('step')\nplt.plot(range(len(ACGAN_d)),ACGAN_d[:,4])\nplt.subplot(1,2,2)\nplt.title('generator loss binary_entropy')\nplt.xlabel('step')\nplt.plot(range(len(ACGAN_g)),ACGAN_g[:,0])\n\nplt.savefig(os.path.join(sys.argv[2],'fig3_2.jpg'))\n","sub_path":"hw4/fig.py","file_name":"fig.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"243976585","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserRegisterForm\n\ndef register(request):\n ''' თუ ჩვენ ვიღებთ პოსტ მოთხოვნას,\n მაშინ ის ინახავს მომხმარებლის შექმნის ფორმას ამ პოსტის მონაცემებით,\n მაგრამ ნებისმიერ სხვა მოთხოვნით, უბრალოდ ცარიელი ფორმა იქნება და პოსტი '''\n\n if request.method == 'POST': # ეს ვერ გავიგე რას აკეთებს\n form = UserRegisterForm(request.POST) # \n if form.is_valid(): # თუ არის True\n form.save() # ბაზაში ამახსოვრებ რეგისტრირებულ მომხმარებელს\n username = form.cleaned_data.get('username') \n messages.success(request, f'Your Account has been created! you are able to Log In !') # გამოაქვს შეტყობინება რეგისტრაციის შესახებ\n return redirect('login') # აბრუნებს მთავარ გვერდძე\n else:\n form = UserRegisterForm()\n return render(request, 'users/register.html', {'form':form})\n\n@login_required\ndef profile(request):\n return render(request, 'users/profile.html')","sub_path":"blog_django/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"569842451","text":"# Short-term Cepstrum Distance\n# pr6_5_2\n\nfrom Noisy import *\nfrom Universal import *\nfrom VAD import *\n\n\ndef rcceps(x):\n \"\"\"\n 计算实倒谱\n \"\"\"\n y = np.fft.fft(x)\n\n return np.fft.ifft(np.log(np.abs(y))).real\n\n\n\nif __name__ == '__main__':\n\t# Set_I\n\tIS = 0.25 # unvoice segemnt length\n\twlen = 200 # frame length 25ms\n\tinc = 80 # frame shift\n\tfilename = 'bluesky1.wav'\n\tSNR = 10\n\t\n\t# PART_I\n\tspeech = Speech()\n\txx, fs = speech.audioread(filename, 8000)\n\txx = xx - np.mean(xx) # DC\n\tx = xx / np.max(xx) # normalized\n\tN = len(x)\n\ttime = np.arange(N) / fs\n\tnoisy = Noisy()\n\tsignal, _ = noisy.Gnoisegen(x, SNR) # add noise\n\twnd = np.hamming(wlen) # window function\n\toverlap = wlen - inc\n\tNIS = int((IS * fs - wlen) / inc + 1) # unvoice segment frame number\n\ty = speech.enframe(signal, list(wnd), inc).T\n\tfn = y.shape[1] # frame number\n\tframeTime = speech.FrameTime(fn, wlen, inc, fs) # frame to time\n\t\n\tU = np.zeros((wlen, fn))\n\tfor i in range(fn):\n\t\tu = y[:, i] # one frame\n\t\tU[:, i] = rcceps(u) # real cepstrum\n\t\n\tC0 = np.mean(U[:, 0 : 4], axis=1) # first 5th frame cepstrum coefficient average as background noise cepstrum coefficient\n\t\n\tDcep = np.zeros(fn)\n\tfor i in range(5, fn):\n\t\tCn = U[:, i]\n\t\tDst0 = (Cn[0] - C0[0]) ** 2\n\t\tDstm= 0\n\t\tfor k in range(1, 12):\n\t\t\tDstm += (Cn[k] - C0[k]) ** 2\n\t\tDcep[i] = 4.3429 * np.sqrt(Dst0 + Dstm) # cepstrum distance\n\t\t\n\tDcep[0:4] = Dcep[5]\n\t\n\t\n\tVad = VAD()\n\tDstm = Vad.multimidfilter(Dcep, 10) # smoothing\n\tdth = np.max(Dstm[0 : NIS])\n\tT1 = dth\n\tT2 = 1.5 * dth\n\t[voiceseg, vsl, SF, NF] = Vad.vad_param1D(Dstm, T1, T2)\n\t\n\t# figure\n\tplt.figure(figsize=(9, 16))\n\tplt.subplot(3, 1, 1)\n\tplt.plot(time, x)\n\tfor k in range(vsl):\n\t\tnx1 = voiceseg['begin'][k]\n\t\tnx2 = voiceseg['end'][k]\n\t\tprint('{}, begin = {}, end = {}'.format(k + 1, nx1, nx2))\n\t\tplt.plot(np.array([frameTime[nx1], frameTime[nx1]]), np.array([-1, 1]), 'k', linewidth=1)\n\t\tplt.plot(np.array([frameTime[nx2], frameTime[nx2]]), np.array([-1, 1]), 'k--', linewidth=1)\n\tplt.axis([0, np.max(time), -1, 1])\n\tplt.xlabel('Time [s]')\n\tplt.ylabel('Amplitude')\n\tplt.title('Clean Speech Signal')\n\tplt.subplot(3, 1, 2)\n\tplt.plot(time, signal)\n\tplt.axis([0, np.max(time), np.min(signal), np.max(signal)])\n\tplt.xlabel('Time [s]')\n\tplt.ylabel('Amplitude')\n\tplt.title('Noisy Speech Signal SNR = {}dB'.format(SNR))\n\tplt.subplot(3, 1, 3)\n\tplt.plot(frameTime, Dstm)\n\tplt.axis([0, np.max(time), 0, 1.2 * np.max(Dstm)])\n\tplt.xlabel('Time [s]')\n\tplt.ylabel('Amplitude')\n\tplt.title('Short-term Cepstrum Distance')\n\tfor k in range(vsl):\n\t\tnx1 = voiceseg['begin'][k]\n\t\tnx2 = voiceseg['end'][k]\n\t\tplt.plot(np.array([frameTime[nx1], frameTime[nx1]]), np.array([0, 1.2 * np.max(Dstm)]), 'k', linewidth=1)\n\t\tplt.plot(np.array([frameTime[nx2], frameTime[nx2]]), np.array([0, 1.2 * np.max(Dstm)]), 'k--', linewidth=1)\n\t\tplt.plot(np.array([0, np.max(time)]), np.array([T1, T1]), 'b', linewidth=1)\n\t\tplt.plot(np.array([0, np.max(time)]), np.array([T2, T2]), 'r--', linewidth=1)\n\tplt.savefig('images/vad_cepstrum_distance.png', bbox_inches='tight', dpi=600)\n\tplt.show()\n","sub_path":"Chapter6_VoiceActivityDetection/pr6_5_2.py","file_name":"pr6_5_2.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"528321537","text":"# import mimetypes\nimport magic\nimport os\nimport setting\nfrom datetime import date, datetime, timedelta\nimport re\n\nimport discord\nfrom discord.ext import tasks\n## testroleiギルドの[テストBOT007]にて起動\n#TOKEN = setting.tToken\n#CHANNEL = setting.tChannel\n#SERVER = setting.tServer\n\nTOKEN = setting.dToken\nCHANNEL = setting.wChannel\nSERVER = setting.dServer\nLOG_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"timelog\")\nMAX_SEND_MESSAGE_LENGTH = 2000\n\n\ndef minutes2time(m):\n hour = m // 60\n minute = m % 60\n result_study_time = str(hour) + \"時間\" + str(minute) + \"分\"\n return result_study_time\n\n##[検討]ここをいつ起動しても先週の月〜日を指す方法に変更するのもあり\n## 現在は、1日前から遡って7日分取得する方法\ndef arr_days(today):\n days = []\n for i in reversed(range(1, 8)):\n# for i in reversed(range(2, 9)): # 火曜日用\n day = today - timedelta(days=i)\n days.append(datetime.strftime(day, '%Y-%m-%d'))\n return days\n\ndef serialize_log(*args, end=\"\\n\"):\n context = \"\".join(map(str, args)) + end\n return context\n\n\ndef construct_user_record(user_name, studyWeekday, sum_study_time):\n userWeekResult = serialize_log(\"Name:\", user_name)\n #ex) 配列内の[04-20]月-日の文字列を[20]0埋めしない日に変換\n studyDay = []\n for item in studyWeekday:\n item_mod = re.sub(r'(^[0-9]{2})-0?([1-9]?[0-9]$)',r'\\2',item)\n studyDay.append(item_mod)\n #ex) [04-20]\n #userWeekResult += serialize_log(\" 勉強した日付:\", str(studyWeekday))\n userWeekResult += serialize_log(\" 勉強した日付:\", str(studyDay))\n userWeekResult += serialize_log(\" 合計勉強時間:\", str(minutes2time(sum_study_time)))\n return userWeekResult\n\n\ndef compose_user_records(strtoday, days, users_log):\n code_block = \"```\"\n separate = \"====================\\n\"\n start_message = serialize_log(\"@everyone \")\n start_message += code_block + \"\\n\"\n start_message += serialize_log(\"今日の日付:\", strtoday)\n start_message += serialize_log(\"先週の日付:\", days[0], \"~\", days[-1])\n week_result = [start_message]\n for user_log in users_log:\n if len(week_result[-1] + (separate + user_log)) >= MAX_SEND_MESSAGE_LENGTH - len(code_block):\n week_result[-1] += code_block # end code_block\n week_result.append(code_block) # start code_block\n week_result[-1] += separate + user_log\n week_result[-1] += code_block # end code_block\n return week_result\n\n\ndef read_file(file_path):\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n lines_strip = [line.strip() for line in lines]\n return lines_strip\n\n\ndef exclude_non_txt(file_list):\n file_list_result = list(file_list)\n print('対象ファイル数 : ',len(file_list))\n print('--- 対象ファイルの[名前/ファイルタイプ]と[対象から除外か否か]の処理結果を出力 ---')\n for file in file_list:\n file_type = magic.from_file(file, mime=True)\n print(f'\\n python-magic: {file_type} --> [file]: {file}',end='') # 確認用\n if file_type != 'text/plain':\n print('--> [ remove ]',end='') # 確認用\n file_list_result.remove(file)\n print('\\n--- (除外対象)ファイルタイプが[text/plain]でない対象 --- ')\n result = list(set(file_list) - set(file_list_result))\n for x in result:\n print(x)\n print('--- end --- ')\n return file_list_result\n\n\n\ndef aggregate_users_record(days):\n \"\"\"\n 各ユーザーの1週間の学習時間と日数を集計する\n \"\"\"\n user_list = [os.path.join(LOG_DIR, txt) for txt in os.listdir(LOG_DIR)]\n user_list = exclude_non_txt(user_list)\n memberStudytime = []\n users_record = []\n obj = {}\n for user_log in user_list:\n # ログファイル読み込み\n lines_strip = read_file(user_log)\n # 1週間以内に勉強した日の学習ログのみ抜き出す\n study_logs = []\n for line in lines_strip:\n if \"Study time\" in line:\n study_logs += [line for day in days if day in line]\n # 勉強した日がないユーザーは処理をスキップする\n if study_logs == []:\n print(f'{user_log}: 学習記録がありません')\n continue\n # 学習ログから勉強した日付を抜き出す\n study_days = []\n for log in study_logs:\n study_days += [day[-5:] for day in days if day in log]\n study_days = sorted(set(study_days), key=study_days.index)\n # 学習ログから合計勉強時間を算出する\n sum_study_time = 0\n for log in study_logs:\n sum_study_time += int(log.split(\",\")[-1])\n user_name = os.path.splitext(os.path.basename(user_log))[0]\n memberStudytime.append({\"username\": user_name, \"studydays\": study_days, \"sumstudytime\": sum_study_time})\n memberStudytime.sort(key=lambda x: x[\"sumstudytime\"], reverse=True)\n for studytime in memberStudytime :\n user_record = construct_user_record(studytime[\"username\"], studytime[\"studydays\"], studytime[\"sumstudytime\"])\n users_record.append(user_record)\n print(\"~< ソート済整形データ >~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(memberStudytime)\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n return users_record\n\n\ndef create_week_result():\n today = datetime.today()\n strtoday = datetime.strftime(today, '%Y-%m-%d')\n days = arr_days(today)\n user_records = aggregate_users_record(days)\n print(user_records[0])\n week_result = compose_user_records(strtoday, days, user_records)\n return week_result\n\n# (確認用)実行された時に出力されるデータの想定\nstr_weekResult = create_week_result()\nprint(str_weekResult)\nprint(len(str_weekResult))\nfor strR in str_weekResult:\n print(strR)\n print(\"文字数: \",len(strR))\n\n\nclient = discord.Client()\n\n@client.event\nasync def on_message(message):\n if message.content.startswith(\"¥Week_Result\"):\n print('Command ¥Week_Result : ', message.author.name)\n if message.author.id != 603567991132782592:\n print('管理者(SuPleiades)以外のメンバーが実行しました')\n return\n print(f'手動週間集計実行日: {datetime.now().strftime(\"%Y-%m-%d %H:%M\")}')\n channel = client.get_channel(CHANNEL)\n week_results = create_week_result()\n for week_result in week_results:\n await channel.send(week_result)\n\n\n@tasks.loop(seconds=60)\nasync def post_week_result():\n if datetime.now().strftime('%H:%M') == \"07:30\":\n if date.today().weekday() == 0:\n print(f'週間集計実行日: {datetime.now().strftime(\"%Y-%m-%d %H:%M\")}')\n channel = client.get_channel(CHANNEL)\n week_results = create_week_result()\n for week_result in week_results:\n await channel.send(week_result)\n\npost_week_result.start()\n\nclient.run(TOKEN)\n","sub_path":"entry_exit/post_week_result.py","file_name":"post_week_result.py","file_ext":"py","file_size_in_byte":7075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"574080565","text":"import os\nimport datetime\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = 'fake-key'\n\nINSTALLED_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'rest_framework_jwt',\n 'corsheaders',\n 'django_filters',\n 'agape.authentication',\n 'agape.contacts',\n 'agape.people',\n 'agape.organizations',\n 'agape.groups',\n 'agape.members',\n 'agape.events'\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\n\nROOT_URLCONF = 'tests.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n },\n]\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST_NAME': os.path.join(os.path.dirname(__file__), 'test.db'),\n }\n}\n\nAUTH_USER_MODEL = 'authentication.User'\n\nREST_FRAMEWORK = {\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json'\n # Uncomment to force all pages to require authentication\n # 'DEFAULT_AUTHENTICATION_CLASSES': (\n # 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',\n # ),\n}\n\nJWT_AUTH = {\n 'JWT_SECRET_KEY': SECRET_KEY,\n 'JWT_AUTH_HEADER_PREFIX': 'Bearer',\n 'JWT_EXPIRATION_DELTA': datetime.timedelta(days=7),\n # 'JWT_ALLOW_REFRESH': False,\n # 'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7)\n}\n\nCSRF_COOKIE_NAME=\"XSRF-TOKEN\"\nCSRF_HEADER_NAME=\"HTTP_X_XSRF_TOKEN\"\nCORS_ALLOW_HEADERS = (\n 'accept',\n 'accept-encoding',\n 'authorization',\n 'content-type',\n 'dnt',\n 'origin',\n 'user-agent',\n 'x-xsrf-token',\n 'x-requested-with',\n)\nCORS_EXPOSE_HEADERS = (\n 'Set-Cookie',\n)","sub_path":"agape-core/tests/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"643853928","text":"'''\nCreated on May 25, 2015\n\n@author: hsd\n'''\n\nfrom resourceline import ResourceLine\nfrom memlinegrammar import MemLineGrammar\n\nclass MemLine(ResourceLine):\n\n MEM_TOTAL_KB = \"mem_total_kb\"\n MEM_USED_KB = \"mem_used_kb\"\n MEM_USED_PERCENTAGE = \"mem_used_percentage\"\n\n _grammar = MemLineGrammar().get_grammar()\n\n def __init__(self, line):\n super(MemLine, self).__init__()\n\n self._total_kb = None\n self._used_kb = None\n self._used_percentage = None\n\n parse_results = self._grammar.parseString(line)\n self._store_parse_results(parse_results)\n\n page_size = int(parse_results.get(MemLineGrammar.MEM_PAGE_SIZE))\n total = int(parse_results.get(MemLineGrammar.MEM_TOTAL))\n\n self._total_kb = self._calculate_total_kb(total, page_size)\n self._used_kb = self._calculate_used_kb(parse_results, total, page_size)\n self._used_percentage = self._calcuate_used_percentage(parse_results)\n\n def addFields(self, fields):\n fields[self.MEM_TOTAL_KB] = self._total_kb\n fields[self.MEM_USED_KB] = self._used_kb\n fields[self.MEM_USED_PERCENTAGE] = self._used_percentage\n\n def _calculate_total_kb(self, total, page_size):\n return int((total * page_size) / 1000)\n\n def _calculate_used_kb(self, parse_results, total, page_size):\n free = parse_results.get(MemLineGrammar.MEM_FREE)\n page_cache = parse_results.get(MemLineGrammar.MEM_PAGE_CACHE)\n buffer_cache = parse_results.get(MemLineGrammar.MEM_BUFFER_CACHE)\n available = int(free) + int(page_cache) + int(buffer_cache)\n return int(((total - available) * page_size) / 1000)\n\n def _calcuate_used_percentage(self, parse_results):\n return int((self._used_kb / self._total_kb) * 100)\n","sub_path":"memline.py","file_name":"memline.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"260893680","text":"import math\r\nimport random\r\nimport pygame\r\nimport math3d\r\n\r\nclass physicsObject(object):\r\n\tdef __init__(self, pos, vel):\r\n\t\tself.mPos = math3d.VectorN(pos)\r\n\t\tself.mVel = math3d.VectorN(vel)\r\n\t\t\r\n\tdef update(self, dt):\r\n\t\tself.mPos += self.mVel*dt\r\n\t\r\n\tdef accel(self, a, dt):\r\n\t\taV = math3d.VectorN(a)\r\n\t\tself.mVel += aV*dt\r\n\t\t\r\n\tdef friction(self, f, dt):\r\n\t\told = self.mVel.copy()\r\n\t\tfrict = -(self.mVel.normalized_copy()*f)\r\n\t\tself.mVel += frict*dt\r\n\t\tif self.mVel.dot(old) < 0:\r\n\t\t\tself.mVel = math3d.VectorN(2)\r\n\t\t\t\r\n\tdef getCenteredImgPos(self, img, imgRect):\r\n\t\ttmp = math3d.VectorN((self.mPos[0]-imgRect[2], self.mPos[1]-imgRect[3])).iTuple()\r\n\t\treturn tmp\r\n\r\nclass objectManager(object):\r\n\tdef __init__(self):\r\n\t\tself.screen = pygame.display.get_surface()\r\n\t\tself.screenWidth = pygame.display.Info().current_w\r\n\t\tself.screenHeight = pygame.display.Info().current_h\r\n\t\tself.boulderList = []\r\n\t\tself.playerImage = self.loadSprites(\"images/indiana_jones.bmp\",(29, 34), 12, (65,136,164))\r\n\t\tself.player = player(self.getRandPos(29,34).iTuple(),(0,0),self.playerImage)\r\n\t\tself.boulderImage = self.loadSprites(\"images/boulder.bmp\", (64,64),100,(64,64,64))\r\n\r\n\t\tfor i in range(10):\r\n\t\t\ttmpV = self.getRandPos(64, 64)\r\n\t\t\ttmpS = random.uniform(.5,2)\r\n\t\t\tif self.boulderList:\r\n\t\t\t\toverlap = True\r\n\t\t\t\twhile overlap:\r\n\t\t\t\t\tfor b in self.boulderList:\r\n\t\t\t\t\t\tnewV = b.mPos-tmpV\r\n\t\t\t\t\t\tif newV.dot(newV) > pow(b.radius+(32*tmpS), 2):\r\n\t\t\t\t\t\t\toverlap = False\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\toverlap = True\r\n\t\t\t\t\t\t\ttmpV = self.getRandPos(64, 64)\r\n\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t\tvX = random.randint(-250,250)\r\n\t\t\tvY = random.randint(-250,250)\r\n\t\t\tself.boulderList.append(boulder(tmpV.iTuple(),(vX,vY),self.boulderImage,tmpS))\r\n\t\t\t\r\n\t\trad = 20\r\n\t\tself.laser = laser(self.getRandPos(20,20).iTuple(), self.screenWidth, \r\n\t\t\t\t\t\t\trad, self.boulderList, self.player)\r\n\t\t\r\n\tdef update(self, eList, dt):\r\n\t\tself.laser.update(dt)\r\n\t\tself.player.update(eList, dt)\r\n\t\r\n\t\tif self.boulderList:\r\n\t\t\tfor b in self.boulderList:\r\n\t\t\t\tb.update(dt)\r\n\t\t\t\t\r\n\t\t\t#For bouncing with other boulders, look in the link in the assignment pdf\r\n\t\t\tif len(self.boulderList) >= 2:\r\n\t\t\t\tfor i in range(len(self.boulderList)):\r\n\t\t\t\t\tfor j in range(i+1, len(self.boulderList)):\r\n\t\t\t\t\t\tB1 = self.boulderList[i]\r\n\t\t\t\t\t\tB2 = self.boulderList[j]\r\n\t\t\t\t\t\tn = B2.mPos-B1.mPos\r\n\t\t\t\t\t\tun = n.normalized_copy()\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif n.dot(n) <= pow(B1.radius+B2.radius,2):\r\n\t\t\t\t\t\t\tV1 = B1.mVel\r\n\t\t\t\t\t\t\tV2 = B2.mVel\r\n\t\t\t\t\t\t\tnV = V2-V1\r\n\t\t\t\t\t\t\tunV = nV.normalized_copy()\r\n\t\t\t\t\t\t\tutV = math3d.VectorN((-unV[1],unV[0]))\r\n\t\t\t\t\t\t\tdotV = unV.dot(utV)\t\t\t\t\t\t\t#?\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tnV1 = unV.dot(V1)\r\n\t\t\t\t\t\t\ttV1 = utV.dot(V1)\r\n\t\t\t\t\t\t\tnV2 = unV.dot(V2)\r\n\t\t\t\t\t\t\ttV2 = utV.dot(V2)\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tnew_nV1 = (nV1*(B1.radius - B2.radius)+2*B2.radius*nV2)/(B1.radius+B2.radius)\r\n\t\t\t\t\t\t\tnew_nV2 = (nV2*(B2.radius - B1.radius)+2*B1.radius*nV1)/(B1.radius+B2.radius)\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tnew_nV1 *= unV\r\n\t\t\t\t\t\t\tnew_tV1 = tV1*utV\r\n\t\t\t\t\t\t\tnew_nV2 *= unV\r\n\t\t\t\t\t\t\tnew_tV2 = tV1*utV\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tnew_V1 = new_nV1 + new_tV1\r\n\t\t\t\t\t\t\tnew_V2 = new_nV2 + new_tV2\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tB1.mVel = new_V1\r\n\t\t\t\t\t\t\tB2.mVel = new_V2\r\n\t\t\t#===============End of boulder collision===============\r\n\t\t\t\t\r\n\tdef render(self, dt):\r\n\t\tif self.boulderList:\r\n\t\t\tfor b in self.boulderList:\r\n\t\t\t\tb.render(self.screen, dt)\r\n\t\t\r\n\t\tself.laser.render(self.screen, dt)\r\n\t\t\r\n\t\tself.player.render(self.screen, dt)\r\n\t\t\r\n\tdef loadSprites(self, imagePath, dimension, numImages, colorkey):\r\n\t\timageSurface = pygame.image.load(imagePath).convert()\r\n\t\timageSurface.set_colorkey(colorkey)\r\n\t\tsurfaces = []\r\n\t\tfor i in range(numImages):\r\n\t\t\trect = pygame.Rect(i*dimension[0], 0, dimension[0], dimension[1])\r\n\t\t\tsurfaces.append(imageSurface.subsurface(rect))\r\n\t\treturn surfaces\r\n\t\r\n\tdef getRandPos(self, w, h):\r\n\t\tx = random.randint(w,self.screenWidth-w)\r\n\t\ty = random.randint(h,self.screenHeight-h)\r\n\t\treturn math3d.VectorN((x,y))\r\n\t\t\r\nclass boulder(physicsObject):\r\n\tdef __init__(self, pos, vel, images, scale):\r\n\t\tsuper().__init__(pos,vel)\r\n\t\tself.img = images\r\n\t\tself.scaleMultiplier = scale\r\n\t\tself.radius = 64/2 * self.scaleMultiplier\r\n\t\tself.rotation = 0\r\n\t\tself.rotIncr = 50\r\n\t\tself.indexIncr = 100\r\n\t\tself.indexFloat = 0\r\n\t\tself.imgIndex = 0\r\n\t\tself.imgRect = self.img[self.imgIndex].get_rect()\r\n\t\t\r\n\tdef update(self, dt):\r\n\t\tsuper().update(dt)\r\n\t\tself.rotation += self.rotIncr * dt\r\n\t\tself.indexFloat += (self.indexIncr*dt)\r\n\t\tif self.indexFloat >= 100:\r\n\t\t\tself.indexFloat = 0\r\n\t\tself.imgIndex = round(self.indexFloat)\r\n\t\tif self.imgIndex >= len(self.img):\r\n\t\t\tself.imgIndex = 0\r\n\t\tself.imgRect = self.img[self.imgIndex].get_rect()\r\n\t\t\r\n\t\t#Bounce when hit screen edges\r\n\t\tif self.mPos[0] < round(self.radius):\r\n\t\t\tself.mPos[0] = round(self.radius)\r\n\t\t\tself.mVel[0] *= -1\r\n\t\telif self.mPos[0] > pygame.display.Info().current_w - round(self.radius):\r\n\t\t\tself.mPos[0] = pygame.display.Info().current_w - round(self.radius)\r\n\t\t\tself.mVel[0] *= -1\r\n\t\telif self.mPos[1] < round(self.radius):\r\n\t\t\tself.mPos[1] = round(self.radius)\r\n\t\t\tself.mVel[1] *= -1\r\n\t\telif self.mPos[1] > pygame.display.Info().current_h - round(self.radius):\r\n\t\t\tself.mPos[1] = pygame.display.Info().current_h - round(self.radius)\r\n\t\t\tself.mVel[1] *= -1\r\n\r\n\tdef render(self, surf, dt):\r\n\t\ttmpV = math3d.VectorN((self.imgRect[2],self.imgRect[3]))\r\n\t\ttmpV *= self.scaleMultiplier\r\n\t\tscaleImg = pygame.transform.scale(self.img[self.imgIndex], tmpV.iTuple())\r\n\t\trotImg = pygame.transform.rotate(scaleImg, self.rotation)\r\n\t\trotRect = rotImg.get_rect()\r\n\t\tscreen.blit(rotImg, self.mPos-math3d.VectorN((rotRect[2]/2,rotRect[3]/2)))\r\n\t\r\n\tdef accel(self, a, dt):\r\n\t\tsuper().accel(a, dt)\r\n\t\t\r\n\tdef friction(self, f, dt):\r\n\t\tsuper().friction(f,dt)\r\n\t\t\r\nclass player(physicsObject):\r\n\tdef __init__(self,pos, vel, images):\r\n\t\tsuper().__init__(pos, vel)\r\n\t\tself.velMax = 700\r\n\t\tself.velInc = 7000\r\n\t\tself.images = images\r\n\t\tself.rightImgs = [0,1,2]\r\n\t\tself.upImgs = [3,4,5]\r\n\t\tself.leftImgs = [6,7,8]\r\n\t\tself.downImgs = [9,10,11]\r\n\t\tself.imgRect = self.images[0].get_rect()\r\n\t\tself.isMoving = False\t\t\t#For mouse\r\n\t\tself.stillMoving = False\t\t#For walking animation\r\n\t\tself.lastDirection = \"down\"\r\n\t\tself.frameFloat = 0\r\n\t\tself.currFrame = 1\t\t\t\t#Keeps track of walking animation frame\r\n\t\tself.currImg = self.images[self.downImgs[self.currFrame]]\r\n\t\t\r\n\tdef update(self, eList, dt):\r\n\t\tif not self.mVel.isZero():\r\n\t\t\tself.stillMoving = True\r\n\t\t\tself.friction(self.velInc*.8, dt)\r\n\t\tif self.mVel.isZero():\r\n\t\t\tself.stillMoving = False\r\n\t\tsuper().update(dt)\r\n\t\tself.lastDirection = self.getLastDirection()\r\n\t\t\r\n\t\tfor e in eList:\r\n\t\t\tif e.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\tif e.button == 1:\r\n\t\t\t\t\tself.isMoving = True\r\n\t\t\t\t\t\r\n\t\t\tif e.type == pygame.MOUSEBUTTONUP:\r\n\t\t\t\tif e.button == 1:\r\n\t\t\t\t\tself.isMoving = False\r\n\t\t\t\t\t\r\n\t\tif self.isMoving:\r\n\t\t\tself.mouseMove(dt)\r\n\t\t\t#print(self.getLastDirection())\r\n\t\t\t\t\t\r\n\t\tkeys = pygame.key.get_pressed()\r\n\t\tif (keys[pygame.K_UP] or keys[pygame.K_w]):\r\n\t\t\tself.accel((0, -self.velInc), dt)\r\n\t\t\tself.lastDirection = \"up\"\r\n\t\tif (keys[pygame.K_DOWN] or keys[pygame.K_s]):\r\n\t\t\tself.accel((0, self.velInc), dt)\r\n\t\t\tself.lastDirection = \"down\"\r\n\t\tif (keys[pygame.K_RIGHT] or keys[pygame.K_d]):\r\n\t\t\tself.accel((self.velInc,0), dt)\r\n\t\t\tself.lastDirection = \"right\"\r\n\t\tif (keys[pygame.K_LEFT] or keys[pygame.K_a]):\r\n\t\t\tself.accel((-self.velInc,0), dt)\r\n\t\t\tself.lastDirection = \"left\"\r\n\t\t\t\t\t\r\n\tdef render(self, surf, dt):\r\n\t\tif self.stillMoving:\r\n\t\t\tself.frameFloat += 10*dt\r\n\t\t\tif self.frameFloat >= 2:\r\n\t\t\t\tself.frameFloat = 0\r\n\r\n\t\tif not self.stillMoving:\r\n\t\t\tself.currFrame == 1\r\n\t\t\t\r\n\t\tself.currFrame = round(self.frameFloat)\r\n\t\t#print(self.currFrame)\r\n\t\tif self.lastDirection == \"up\":\r\n\t\t\tself.currImg = self.images[self.upImgs[self.currFrame]]\r\n\t\telif self.lastDirection == \"down\":\r\n\t\t\tself.currImg = self.images[self.downImgs[self.currFrame]]\r\n\t\telif self.lastDirection == \"right\":\r\n\t\t\tself.currImg = self.images[self.rightImgs[self.currFrame]]\r\n\t\telif self.lastDirection == \"left\":\r\n\t\t\tself.currImg = self.images[self.leftImgs[self.currFrame]]\r\n\t\tif self.currFrame >= 2:\r\n\t\t\tself.currFrame = 0\r\n\t\tsurf.blit(self.currImg, self.mPos.iTuple())\r\n\t\t\r\n\tdef accel(self, a, dt):\r\n\t\tsuper().accel(a, dt)\r\n\t\t\r\n\t\tif self.mVel.dot(self.mVel) > pow(self.velMax,2):\r\n\t\t\tself.mVel = self.mVel.normalized_copy()*self.velMax\r\n\t\t\r\n\tdef friction(self, f, dt):\r\n\t\tsuper().friction(f, dt)\r\n\t\t\r\n\tdef mouseMove(self, dt):\r\n\t\ttmp = (math3d.VectorN(pygame.mouse.get_pos()) - self.mPos).normalized_copy()\r\n\t\ttmp = tmp*self.velInc\r\n\t\tself.accel(tmp.iTuple(), dt)\r\n\t\t\r\n\tdef getLastDirection(self):\r\n\t\ttmp = \"down\"\r\n\t\tmousePos = math3d.VectorN(pygame.mouse.get_pos())\r\n\t\ttmpV = (mousePos - self.mPos).iTuple()\r\n\t\tangle = math.atan2(-tmpV[1], tmpV[0])*180/math.pi\r\n\t\tif angle >= -45 and angle <= 45:\r\n\t\t\ttmp = \"right\"\r\n\t\telif angle > 45 and angle <= 135:\r\n\t\t\ttmp = \"up\"\r\n\t\telif angle > 135 and angle <= 180 or angle >= -180 and angle <= -135:\r\n\t\t\ttmp = \"left\"\r\n\t\telif angle < -45 and angle > -135:\r\n\t\t\ttmp = \"down\"\r\n\t\treturn tmp\r\n\t\t\r\nclass laser(object):\r\n\tdef __init__(self, pos, screenWidth, rad, boulderList, player):\r\n\t\tself.mPos = math3d.VectorN(pos)\r\n\t\tself.objectList = boulderList[:]\r\n\t\tself.objectList.append(player)\r\n\t\tself.angleMax = 360\r\n\t\tself.angleMin = 0\r\n\t\t#self.angle = 180\r\n\t\ttmp = math3d.VectorN((pygame.display.Info().current_w/2,pygame.display.Info().current_h/2)) - self.mPos\r\n\t\tself.angle = math.atan2(-tmp[1], tmp[0])*180/math.pi + 180\r\n\t\tself.angleInc = 50\r\n\t\t\r\n\t\tself.laserLength = screenWidth*.6\r\n\t\tself.endPos = self.mPos-self.mPos.normalized_copy()*self.laserLength\r\n\t\tself.color = (255,255,255)\r\n\t\tself.mRad = rad\r\n\t\tself.beamColor = (255,0,0)\r\n\t\t\r\n\tdef update(self, dt):\r\n\t\t#print(self.angle)\r\n\t\tself.angle += self.angleInc*dt\r\n\t\t#if self.angle >= self.angleMax or self.angle <= self.angleMin:\r\n\t\t#\tself.angleInc *= -1\r\n\t\tx = math.cos(math.radians(self.angle))\r\n\t\ty = math.sin(math.radians(self.angle))\r\n\t\ttmp = math3d.VectorN((x,y))*self.laserLength\r\n\t\tself.endPos = tmp + self.mPos\r\n\t\t\r\n\t\t#Hitting object\r\n\t\t#Get perpendicular line with object and check if that line is less than radius of object\r\n\t\t#Best way is to check all objects\r\n\t\tfor obj in self.objectList:\r\n\t\t\tpass\r\n\t\t\r\n\t\t\t\r\n\tdef render(self, surf, dt):\r\n\t\tpygame.draw.line(surf, self.beamColor, self.mPos.iTuple(), self.endPos.iTuple())\r\n\t\tpygame.draw.circle(surf, self.color, self.mPos.iTuple(), self.mRad)\r\n\t\t\r\n#====================Game==================\r\n\r\npygame.display.init()\r\n\r\nsize = width, height = 1280,720\r\nclock = pygame.time.Clock()\r\ndone = False\r\n\r\nscreen = pygame.display.set_mode(size)\r\n\r\nM = objectManager()\r\n\r\nwhile not done:\r\n\teList = pygame.event.get()\r\n\t#Update\r\n\tdt = clock.tick()/1000\r\n\tM.update(eList, dt)\r\n\t\r\n\tfor e in eList:\r\n\t\tif e.type == pygame.QUIT:\r\n\t\t\tdone = True\r\n\t\r\n\tkeys = pygame.key.get_pressed()\r\n\tif keys[pygame.K_ESCAPE]:\r\n\t\tdone = True\r\n\t\t\r\n\t#Draw\r\n\tbgColor = 0, 0, 0\r\n\tscreen.fill(bgColor)\r\n\tM.render(dt)\r\n\t\t\r\n\tpygame.display.flip()\r\n\t\r\npygame.display.quit()","sub_path":"Lab5 - Boulder/boulder.py","file_name":"boulder.py","file_ext":"py","file_size_in_byte":10920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"327659392","text":"#!/usr/bin/env python\n\"\"\"Data for testing various DIS widgets\n\nHistory:\n2005-07-21 ROwen Bug fix: was not dispatching MainDataList in order\n (because it was specified as a normal non-ordered dict).\n2008-04-24 ROwen Bug fix: had too few filter names.\n2014-02-03 ROwen Updated to use TUI.Base.TestDispatcher\n\"\"\"\nimport TUI.Base.TestDispatcher\n\ntestDispatcher = TUI.Base.TestDispatcher.TestDispatcher(actor=\"spicam\", delay=0.5)\ntuiModel = testDispatcher.tuiModel\n\nMainDataList = (\n 'filterNames=\"SDSS u\\'\", \"SDSS g\\'\", \"SDSS r\\'\", \"SDSS i\\'\", \"SDSS z\\'\", \"Hodge 6629\"',\n 'filterID=1',\n 'filterName=\"Hodge 6629\"',\n 'shutter=\"closed\"',\n 'ccdState=\"ok\"',\n 'ccdBin=2,2',\n 'ccdWindow=1,1,1024,514',\n 'ccdUBWindow=1,1,2048,1028',\n 'ccdOverscan=50,50',\n 'name=\"dtest030319.\"',\n 'number=1',\n 'places=4',\n 'path=\"/export/images\"',\n 'basename=\"/export/images/dtest030319.0001\"',\n 'ccdTemps=-113.8,-106.7',\n 'ccdHeaters=0.0,0.0',\n)\n\n# Each element of animDataSet is list of keywords\nAnimDataSet = (\n)\n\ndef start():\n testDispatcher.dispatch(MainDataList)\n\ndef animate():\n testDispatcher.runDataSet(AnimDataSet)\n","sub_path":"TUI/Inst/SPIcam/TestData.py","file_name":"TestData.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"462331407","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, cross_validation, metrics\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.externals import joblib\n\n\nactivity_label = {'1': 'backhand',\n '2': 'forehand',\n }\n\n### Open data set\nX = []\ny = []\nX_val = [] # validation set features\ny_val = [] # validation set target\n\nprint(\"Opening dataset...\")\ntry:\n with open(\"X_train.txt\", 'rU') as f:\n res = list(f)\n for line in res: # each line is one sample, or row (can be viewed as 1*561 vector)\n line.strip(\"\\n\")\n features = line.split(\" \")\n while features.__contains__(\"\"):\n features.remove(\"\")\n # print(len(features)) is 561, applied for each line in the file --> 10000*561 feature matrix!!\n for i in range(len(features)):\n features[i] = float(features[i])\n X.append(features)\n\n # read the classes from file and put them in list.\n with open(\"y_train.txt\", 'rU') as f:\n res = list(f)\n for line in res:\n y.append(int(line.strip(\"\\n\")[0]))\n\nexcept:\n print(\"Error in reading the train set file.\")\n exit()\ntry:\n # do the same for test sets.\n with open(\"X_test.txt\", 'rU') as f:\n res = list(f)\n for line in res:\n line.strip(\"\\n\")\n features = line.split(\" \")\n while features.__contains__(\"\"):\n features.remove(\"\")\n for i in range(len(features)):\n features[i] = float(features[i])\n X_val.append(features)\n\n with open(\"y_test.txt\", 'rU') as f:\n res = list(f)\n for line in res:\n y_val.append(int(line.strip(\"\\n\")[0]))\n f.close()\nexcept:\n print(\"Error in reading the train set file.\")\n exit()\nprint(\"Dataset opened.\")\n\nX = np.array(X) # change to matrix\ny = np.array(y) # change to matrix (sklearn models only accept matrices)\n\nprint(\"Separating data into 67% training set & 33% test set...\")\nX_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.33,\n random_state=33) # random split.\nprint(\"Dataset separated.\\n\")\n\nprint(\"---------------Random Forest---------------\")\nn_estimators_list = range(1, 6) # try from one to 21 estimators.\nresult_random_forests = [] # to be used later for comparing rf with different estimators.\nmax_score_rf = float(\"-inf\") # just in case we get NaN\nbest_param_rf = None\nfor trees in n_estimators_list:\n print(\"Testing %d trees\" % trees)\n rf_clf = RandomForestClassifier(n_estimators=trees, max_depth=None, min_samples_split=1, random_state=0)\n scores = cross_validation.cross_val_score(rf_clf, X_train, y_train, scoring=\"accuracy\", cv=6)\n result_random_forests.append(scores.mean())\n if scores.mean() > max_score_rf:\n max_score_rf = scores.mean()\n best_param_rf = {\"n_estimators\": trees}\n\nrf_clf_test_score = RandomForestClassifier(n_estimators=best_param_rf.get(\"n_estimators\"), max_depth=None,\n min_samples_split=1, random_state=0).fit(X_test, y_test).score(X_test,\n y_test)\n# print(\"Test set accuracy: \", rf_clf_test_score)\n\nrf_clf = RandomForestClassifier(n_estimators=best_param_rf.get(\"n_estimators\"), max_depth=None, min_samples_split=1,\n random_state=0).fit(X, y)\n# save trained model for future use.\njoblib.dump(rf_clf, 'rf_clf.pkl', compress=9)\n\ncount1 = 0\ncount2 = 0\nactualist = []\npredlist = []\n\nfor i in range(len(X_val)):\n count2 += 1\n classinrow = X_val[i]\n classinrow = np.array(X_val[i]).reshape(1, -1) # Need to do this so we can do predictions in sklearn\n # yval ma3roof, a is our prediction of yval. each xval is a set of features la one sample. 561. based on these feature it predicts activity. cool. check into validation sets..\n predicted = rf_clf.predict(classinrow) # predict class for each row.. each i is a row.\n actual = y_val[i]\n actualist.append(actual)\n predlist.append(predicted[0])\n if predicted == actual:\n count1 += 1\nprint()\nprint(\"Number of trees in forest: \", len(n_estimators_list))\nprint(\"Results: \", result_random_forests)\nprint(\"Best accuracy: \", max_score_rf)\nprint(\"Best parameter: \", best_param_rf)\nprint(\"Test set accuracy: \", rf_clf_test_score)\n\nprint(\"Total cases: \", count2)\nprint(\"Correct Prediction: \", count1)\nprint(\"Correct prediction rate: \", float(count1) / count2)\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"631181024","text":"taban = \"\"\nwhile True:\n yapi_malzemesi = input(\"Piramit malzemesini giriniz: \")\n taban_uznlk = len(yapi_malzemesi)*(int(input(\"Oluşturulmasını istediğiniz piramitin taban uzunluğunu giriniz (tek sayı): \"))+2)\n m = taban_uznlk // 2 # m=ortanca terim (0 dan saymaya başlayınca)\n for item in range(taban_uznlk):\n taban += \" \"\n xler = \"\"\n taban_default = taban\n\n for sirasayisi in range(int((taban_uznlk+len(yapi_malzemesi))/2)):\n for items in range(2*sirasayisi-len(yapi_malzemesi)):\n xler= xler + yapi_malzemesi\n taban= taban[:m-sirasayisi]+xler+taban[(m+sirasayisi+len(yapi_malzemesi)):]\n print(taban)\n taban=taban_default\n xler = \"\"\n tekrar = input(\"\\nÇıkmak için 0, bir piramit daha yapmak için 1 yazınız.\")\n if tekrar == \"0\":\n break\n\n\n#piramit tabanının yüksekliğine eşit olacağını düşündüğüm için hataya düştüm en başta.\n#bir de sıra olayında kafam karıştı +1 mi -1 mi derken.\n#sonunda kağıda çizdim piramidi, sayıları öyle tutturabildim.\n#bunun gibi projelerde sayıları önce kağıda çizerek saptamak daha iyi bir fikir.","sub_path":"piramit_deneysel.py","file_name":"piramit_deneysel.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"498132272","text":"\n\"\"\"\nCreated on Mon Dec 26 09:10:20 2019\n\n@author: Ybenson Augustave\n\"\"\"\n### IMPORTACAO DAS BIBLIOTECA \"BeautifulSoup\" PARA PROCURAR AS TAGS NA PAGINA HTML E JSON\nfrom bs4 import BeautifulSoup\nimport json\n\n\n#### CODIGO PARA LER O ARQUIVO HTMTL ######\npath = '/home/aluno/Desktop/archive/' # O DIRETORIO DO ARQUIVO\nfile_read = 'processo3.html' # O NOME DO ARQUIVO\narquivo = open(path + file_read, 'r')\ndocumento = arquivo.read()\n\ndef processo3(documento):\n #def processo1(documento):\n ##### Codigo para pegar o numero do processo na pagina HTML ######\n soup = BeautifulSoup(documento, \"lxml\")\n selector = 'html > body > div > table > tbody > tr > td > div > table > tbody > tr > td > table > tbody > tr > td > span'\n numero_de_processo = (soup.select(selector))\n soup = BeautifulSoup(str(numero_de_processo[0]))\n selector = 'html > body > span'\n found = soup.select(selector)\n numero_de_processo = soup.find('span').contents\n \n numero_de_processo = numero_de_processo[0].strip()\n \n \n #### CODIGO PARA PEGAR O VALOR DA CAUSA #####\n soup = BeautifulSoup(documento, \"lxml\")\n selector = 'html > body > div > table > tbody > tr > td > div > table > tbody > tr > td > span'\n valor_da_causa = (soup.select(selector))\n soup = BeautifulSoup(str(valor_da_causa[7]))\n selector = 'span'\n found = soup.select(selector)\n valor_da_causa = soup.find('span').contents\n \n valor_da_causa = valor_da_causa[0].strip()\n valor_da_causa = valor_da_causa.replace(\" \", \"\")\n \n \n \n ##### CODIGO PARA PEAGAR A CLASSE DO PROCESSO NA PAGINA HTML #########\n soup = BeautifulSoup(documento, \"lxml\")\n selector = 'html > body > div > table > tbody > tr > td > div > table > tbody > tr > td > table > tbody > tr > td > span > span'\n classe = (soup.select(selector))\n soup = BeautifulSoup(str(classe[0]))\n selector = 'span'\n found = soup.select(selector)\n classe = soup.find('span').contents\n classe = classe[0].strip()\n \n \n ####### CODIGO PARA PEGAR O NOME DO JUIZ DO PROCESSO #######\n soup = BeautifulSoup(documento, \"lxml\")\n selector = 'html > body > div > table > tbody > tr > td > div > table > tbody > tr > td > span'\n nome_do_juiz = (soup.select(selector))\n soup = BeautifulSoup(str(nome_do_juiz[5]))\n selector = 'span'\n found = soup.select(selector)\n nome_do_juiz = soup.find('span').contents\n nome_do_juiz = nome_do_juiz[0].strip()\n \n \n #### CODIGO PARA PEGAR AS PARTES DO PROCESSO ####\n soup = BeautifulSoup(documento, \"lxml\")\n selector = 'html > body > div > table > tbody > tr > td > div > span'\n partes_do_processo = (soup.select(selector))\n soup = BeautifulSoup(str(partes_do_processo[0]))\n \n selector = 'span'\n found = soup.select(selector)\n found = str(found[0])\n partes_do_processo = soup.find('span').contents\n partes_do_processo = partes_do_processo[0].strip()\n \n advogado_processo = BeautifulSoup(documento, 'html.parser')\n\n for s in advogado_processo.findAll('span', attrs = {'class':'mensagemExibindo'}): \n \n #TRATAMENTO DE ERROR\n try:\n \n advogado_processo = (s.next_sibling.strip())\n if advogado_processo == '':\n pass\n else:\n advogado_processo = 'Advogado(a)', advogado_processo\n partes_do_processo = (partes_do_processo, advogado_processo)\n \n \n except NoneType:\n pass \n \n result=({'Número do processo': numero_de_processo, 'Valor da causa': valor_da_causa, 'Classe': classe, 'Nome do Juiz':nome_do_juiz, 'Partes do Processo': partes_do_processo})\n return str(result)\n \n \nprint(processo3(documento))\n\n\n\n\n\n\n\n","sub_path":"API_processo3.py","file_name":"API_processo3.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"26511362","text":"from subprocess import call\nfrom shutil import copy\nimport os\nimport sys\nimport filecmp\n\ndef gradeHW():\n\twrong = 0\n\tprint(\"copying files into \" + sys.argv[1])\n\tcopy(\"check_attack.txt\", sys.argv[1])\n\tcopy(\"check_attack_solution.txt\", sys.argv[1])\n\tcopy(\"check_king_validity.txt\", sys.argv[1])\n\tcopy(\"check_king_validity_solution.txt\", sys.argv[1])\n\tcopy(\"check_piece_position.txt\", sys.argv[1])\n\tcopy(\"check_piece_position_solution.txt\", sys.argv[1])\n\tcopy(\"test-input.txt\", sys.argv[1])\n\tcopy(\"test-output.txt\", sys.argv[1])\n\tcopy(\"more-input.txt\", sys.argv[1])\n\tcopy(\"more-output.txt\", sys.argv[1])\n\tprint(\"---\")\n\tprint(\"navigating to \" + sys.argv[1])\n\tos.chdir(sys.argv[1])\n\tprint(\"running make\")\n\tprint(\"---\")\n\tcall([\"make\"])\n\n\tprint(\"---\")\n\tif(os.path.exists(\"ChessBoard.jar\")):\n\t\tprint(\"✓ [ChessBoard.jar properly created]\")\n\telse:\n\t\tprint(\"✗ [ChessBoard.jar not properly created]\")\n\t\twrong += 1;\n\n\tprint(\"checking attacks\")\n\tcall([\"java\", \"-jar\", \"ChessBoard.jar\", \"check_attack.txt\", \"out.txt\"])\n\tprint(\"---\")\n\tif(os.path.exists(\"out.txt\")):\n\t\tprint(\"✓ [out.txt properly created]\")\n\t\tcall([\"cat\", \"out.txt\"])\n\telse:\n\t\tprint(\"✗ [out.txt not properly created]\")\n\t\twrong += 1;\n\tprint(\"---\")\n\tcall([\"cat\", \"check_attack_solution.txt\"])\n\tif(filecmp.cmp(\"out.txt\", \"check_attack_solution.txt\")):\n\t\tprint(\"✓ [check_attack_solution.txt solutions match]\")\n\telse:\n\t\tprint(\"✗ [check_attack_solution.txt solutions do not match]\")\n\t\twrong += 1;\n\tcall([\"rm\", \"out.txt\"])\n\n\tprint(\"checking king validity\")\n\tcall([\"java\", \"-jar\", \"ChessBoard.jar\", \"check_king_validity.txt\", \"out.txt\"])\n\tprint(\"---\")\n\tif(os.path.exists(\"out.txt\")):\n\t\tprint(\"✓ [out.txt properly created]\")\n\t\tcall([\"cat\", \"out.txt\"])\n\telse:\n\t\tprint(\"✗ [out.txt not properly created]\")\n\t\twrong += 1;\n\tprint(\"---\")\n\tcall([\"cat\", \"check_king_validity_solution.txt\"])\n\tif(filecmp.cmp(\"out.txt\", \"check_king_validity_solution.txt\")):\n\t\tprint(\"✓ [check_king_validity_solution.txt solutions match]\")\n\telse:\n\t\tprint(\"✗ [check_king_validity_solution.txt solutions do not match]\")\n\t\twrong += 1;\n\tcall([\"rm\", \"out.txt\"])\n\n\tprint(\"checking piece positions\")\n\tcall([\"java\", \"-jar\", \"ChessBoard.jar\", \"check_piece_position.txt\", \"out.txt\"])\n\tprint(\"---\")\n\tif(os.path.exists(\"out.txt\")):\n\t\tprint(\"✓ [out.txt properly created]\")\n\t\tcall([\"cat\", \"out.txt\"])\n\telse:\n\t\tprint(\"✗ [out.txt not properly created]\")\n\t\twrong += 1;\n\tprint(\"---\")\n\tcall([\"cat\", \"check_piece_position_solution.txt\"])\n\tif(filecmp.cmp(\"out.txt\", \"check_piece_position_solution.txt\")):\n\t\tprint(\"✓ [check_piece_position_solution.txt solutions match]\")\n\telse:\n\t\tprint(\"✗ [check_piece_position_solution.txt solutions do not match]\")\n\t\twrong += 1;\n\tcall([\"rm\", \"out.txt\"])\n\n\tcall([\"java\", \"-jar\", \"ChessBoard.jar\", \"test-input.txt\", \"test-out.txt\"])\n\tprint(\"---\")\n\tif(os.path.exists(\"test-out.txt\")):\n\t\tprint(\"✓ [test-out.txt properly created]\")\n\t\tcall([\"cat\", \"test-out.txt\"])\n\telse:\n\t\tprint(\"✗ [test-out.txt not properly created]\")\n\t\twrong += 1;\n\tprint(\"---\")\n\tcall([\"cat\", \"test-output.txt\"])\n\tif(filecmp.cmp(\"test-out.txt\", \"test-output.txt\")):\n\t\tprint(\"✓ [test-output.txt solutions match]\")\n\telse:\n\t\tprint(\"✗ [test-output.txt solutions do not match]\")\n\t\twrong += 1;\n\tcall([\"rm\", \"test-out.txt\"])\n\n\tcall([\"java\", \"-jar\", \"ChessBoard.jar\", \"more-input.txt\", \"more-out.txt\"])\n\tprint(\"---\")\n\tif(os.path.exists(\"more-out.txt\")):\n\t\tprint(\"✓ [more-out.txt properly created]\")\n\t\tcall([\"cat\", \"more-out.txt\"])\n\telse:\n\t\tprint(\"✗ [more-out.txt not properly created]\")\n\t\twrong += 1\n\tprint(\"---\")\n\tcall([\"cat\", \"more-output.txt\"])\n\tif(filecmp.cmp(\"more-out.txt\", \"more-output.txt\")):\n\t\tprint(\"✓ [more-output.txt solutions match]\")\n\telse:\n\t\tprint(\"✗ [more-output.txt solutions do not match]\")\n\t\twrong += 1;\n\tcall([\"rm\", \"more-out.txt\"])\n\tprint(\"---\")\n\n\tif(os.path.exists(\"README\")):\n\t\tprint(\"✓ [README exists]\")\n\t\tcall([\"cat\", \"README\"])\n\telse:\n\t\tprint(\"✗ [README does not exist]\")\n\t\twrong += 1\n\n\tprint(\"\\n---\")\n\tprint(\"cleaning out test files\")\n\tcall([\"rm\", \"check_attack.txt\"])\n\tcall([\"rm\", \"check_attack_solution.txt\"])\n\tcall([\"rm\", \"check_king_validity.txt\"])\n\tcall([\"rm\", \"check_king_validity_solution.txt\"])\n\tcall([\"rm\", \"check_piece_position.txt\"])\n\tcall([\"rm\", \"check_piece_position_solution.txt\"])\n\tcall([\"rm\", \"test-input.txt\"])\n\tcall([\"rm\", \"test-output.txt\"])\n\tcall([\"rm\", \"more-input.txt\"])\n\tcall([\"rm\", \"more-output.txt\"])\n\tprint(\"---\")\n\tprint(\"cleaning out with make\")\n\tcall([\"make\", \"clean\"])\n\tprint(\"---\")\n\tif(wrong > 0):\n\t\tprint(str(wrong) + \" problems found in your code\")\n\telse:\n\t\tprint(\"No problems found. You're literally my favorite person right now\")\n\ngradeHW()\n","sub_path":"checkerhw2.py","file_name":"checkerhw2.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"80446537","text":"# -*- coding: utf-8 -*-\r\n# @Time:2018.11.14 11:25\r\n# @Author:Zhang\r\n# @Desc :\r\n\r\n# lis = [11,22]\r\n# num1=5\r\n# def test1():\r\n# global lis\r\n# #lis.append(33)\r\n# lis=lis+[44]\r\n# global num1\r\n# num1=4\r\n#\r\n# test1()\r\n# print(lis)\r\n# print(num1)\r\n\r\n\r\nimport threading\r\nimport time\r\n\r\nnum = 100\r\n\r\n\r\ndef test1():\r\n global num\r\n num += 1\r\n print(\"test1---num=%d\" % num)\r\n\r\n\r\ndef test2():\r\n print(\"test2---num=%d\" % num)\r\n\r\n\r\ndef main():\r\n t1 = threading.Thread(target=test1)\r\n t2 = threading.Thread(target=test2)\r\n\r\n t1.start()\r\n time.sleep(1)\r\n t2.start()\r\n time.sleep(1)\r\n\r\n print(\"main-----num=%d\" % num)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"myClass/myT_class/t1114_thread/thread_global.py","file_name":"thread_global.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"599853178","text":"import os\nimport sys\nimport collections\nimport logging\n\nimport six\nimport pandas as pd\nfrom lxml import etree\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef get_sumo_dir():\n return os.path.join(os.environ['SUMO_HOME'])\n\n\ndef get_sumo_tools_dir():\n tools = os.path.join(get_sumo_dir(), 'tools')\n return tools\n\n\ndef append_sumo_tools_dir():\n if 'SUMO_HOME' in os.environ:\n tools_dir = get_sumo_tools_dir()\n if tools_dir not in sys.path:\n sys.path.append(tools_dir)\n else:\n sys.exit(\"Please declare environment variable 'SUMO_HOME'\")\n\n\ndef get_edge_neighbors(edge):\n return edge.getIncoming(), edge.getOutgoing()\n\n\ndef get_net_name(netfile):\n return os.path.basename(\n os.path.splitext(os.path.splitext(netfile)[0])[0])\n\n\ndef get_net_dir(netfile):\n return os.path.dirname(os.path.realpath(netfile))\n\n\ndef load_data(network_name, output_tag):\n pass\n\n\nclass IterParseWrapper(object):\n _tag = None\n _schema_file = None\n def __init__(self, xml_file, validate=False):\n if validate:\n try:\n schema_file = self._schema_file\n schema = etree.XMLSchema(file=schema_file)\n tree = etree.iterparse(xml_file, schema=schema, tag=self._tag)\n except etree.XMLSchemaParseError:\n _logger.warning(\n 'Error in xml validation of %s, skipping validation.',\n xml_file,\n exc_info=True)\n tree = etree.iterparse(xml_file, tag=self._tag)\n else:\n tree = etree.iterparse(xml_file, tag=self._tag)\n self.tree = tree\n self.get_next()\n\n def get_next(self):\n _, self.item = six.next(self.tree)\n\n def iterate_until(self, stop_time):\n while self.interval_end() <= stop_time:\n yield self.item\n self.item.clear()\n try:\n self.get_next()\n except StopIteration:\n return\n\n def interval_end(self):\n return float(self.item.attrib.get('end'))\n\n def interval_begin(self):\n return float(self.item.attrib.get('begin'))\n\n\nclass E1IterParseWrapper(IterParseWrapper):\n _tag = 'interval'\n _schema_file = os.path.join(get_sumo_dir(), 'data', 'xsd', 'det_e1_file.xsd')\n\n\nclass E2IterParseWrapper(IterParseWrapper):\n _tag = 'interval'\n _schema_file = os.path.join(get_sumo_dir(), 'data', 'xsd', 'det_e2_file.xsd')\n\n\ndef xml_to_list_of_dicts(\n xml_file, tags_to_filter=None, attributes_to_get=None\n):\n if attributes_to_get is None:\n get_all = True\n else:\n get_all = False\n\n data = etree.parse(xml_file)\n all_records = []\n for child in data.iter(tags_to_filter):\n if get_all:\n record = dict(child.items())\n else:\n record = {}\n for attr in attributes_to_get:\n if attr in child.keys():\n record[attr] = child.get(attr)\n all_records.append(record)\n\n return all_records\n\n\ndef parse_detector_output_xml(data_file, ids=None, fields=None):\n parsed = etree.iterparse(data_file, tag='interval')\n\n records = {}\n\n for _, element in parsed:\n det_id = element.attrib['id']\n if ids is None or det_id in ids:\n if fields is None:\n record = {col: element.attrib[col]\n for col in element.keys()\n if col not in ['begin', 'id']}\n else:\n record = {col: element.attrib[col]\n for col in fields\n if col in element.keys()}\n\n records[(int(round(float(element.attrib['begin']))), det_id,\n )] = record\n\n df = pd.DataFrame.from_dict(records, orient='index', dtype=float)\n df.index.set_names(['time', 'det_id'], inplace=True)\n\n return df\n\n\ndef parse_tls_output_xml(data_file):\n parsed = etree.iterparse(data_file, tag='tlsSwitch')\n\n records = []\n\n for _, element in parsed:\n records.append(\n (element.attrib['id'],\n element.attrib['fromLane'],\n element.attrib['toLane'],\n element.attrib['programID'],\n float(element.attrib['begin']),\n float(element.attrib['end']),\n float(element.attrib['duration']))\n )\n\n df = pd.DataFrame.from_records(\n records,\n columns=[\n 'tls_id', 'fromLane', 'toLane', 'programID',\n 'begin', 'end', 'duration'])\n df.set_index(['tls_id', 'fromLane', 'toLane'], inplace=True)\n\n return df\n\n\ndef verify_xml_schema(xml_file):\n tools_dir = get_sumo_tools_dir()\n schemacheck_py_file = os.path.join(\n tools_dir, 'xml', 'schemaCheck.py')\n\n # schemaCheck.py errors when you send it a valid tls output file?\n raise NotImplementedError\n\n\ndef in_interval(value, interval):\n assert(len(interval)) == 2\n return interval[0] <= value <= interval[1]\n\n\ndef iterfy(x):\n if isinstance(x, collections.Iterable) and type(x) not in six.string_types:\n return x\n else:\n return (x,)\n","sub_path":"trafficgraphnn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"327082128","text":"import os\nimport json\nimport requests\nimport sys\nimport datetime\nimport math\nimport pandas as pd\n\n\ndef get_historical_asset_price_data(assets, mertic_id, start_date, end_date):\n # we can sanitize the inputs to this function if we are taking them from the command line and keep filters based on whether\n # the values or start_date and end_date are in correct format or not\n # we can also check if the assets or the metric_id belong to our set of values that are available\n asset_price_dictionary = {}\n date_values = []\n price_list = []\n i = 0\n\n for asset in assets:\n url = \"\"\n url = \"https://data.messari.io/api/v1/assets/\" + asset + \"/metrics/\" + metric_id + \"/time-series?start=\" + start_date + \"&end=\" + \\\n end_date + \"&interval=1d\" # the value of the interval can be taken as an argument as well for free tier we can support 1 day and 1 week\n response = requests.get(url)\n # also we are assuming that the api will always return the response in the same format(/with same structure)\n data_value = response.json()['data']\n price_list = []\n for data in data_value[\"values\"]:\n\n if i == 0:\n epoch = math.trunc(int(data[0])/1000)\n date_utc = datetime.datetime.utcfromtimestamp(\n epoch).strftime('%Y-%m-%d')\n date_values.append(date_utc)\n\n price_list.append(data[1])\n\n i = i+1\n asset_price_dictionary[asset.upper()] = price_list\n\n # print(date_values)\n # print(asset_price_dictionary)\n\n # reference https://www.javatpoint.com/how-to-create-a-dataframes-in-python\n # NOTE: currently the data frame created is sorted alphabetically and that can be changed if needed\n df = pd.DataFrame(asset_price_dictionary, index=date_values)\n # sorting the data frame by index(/date) inplace\n df.sort_index(inplace=True)\n # here we can append start / end date and the assets in the filename to make it easier to deal with on subsequent calls\n df.to_csv('asset_price_data.csv')\n return df\n\n\ndef get_assets():\n # changing this to api version v2 returns the same result\n url = \"https://data.messari.io/api/v1/assets\"\n assets_symbol_set = set()\n response = requests.get(url)\n data_value = response.json()['data']\n i = 0\n for data in data_value:\n # we can replace this with slug or name which are unique\n assets_symbol_set.add(data[\"symbol\"]) # symbol is non unique\n # assets_symbol_set.add(data[\"name\"])\n # assets_symbol_set.add(data[\"slug\"])\n i = i+1\n print(\"total assets returned by this endpoint: \" + str(i))\n print(assets_symbol_set)\n\n\ndef get_available_metrics():\n url = \"https://data.messari.io/api/v1/assets/metrics\"\n metric_id_set = set()\n response = requests.get(url)\n data_value = response.json()['data']\n i = 0\n metric_values = data_value[\"metrics\"]\n for data in metric_values:\n metric_id_set.add(data[\"metric_id\"])\n i = i+1\n print(\"total metrics returned by this endpoint: \" + str(i))\n print(metric_id_set)\n\n\nif __name__ == \"__main__\":\n # get_assets()\n # get_available_metrics()\n # if we do not want to pass in the arguments and want to use variables we can set these values\n \"\"\"\n start_date = \"2021-10-10\"\n end_date = \"2021-10-13\"\n assets = [\"uni\", \"luna\", \"mkr\"]\n metric_id = \"price\"\n dataframe = get_historical_asset_price_data(\n assets, metric_id, start_date, end_date)\n print(dataframe)\n \"\"\"\n assets = []\n if len(sys.argv) < 5:\n print(\"please enter appropriate arguments start date, end date, metric and the symbol of the assets \\\n you would like to get the data for\")\n else:\n start_date = sys.argv[1]\n end_date = sys.argv[2]\n metric_id = sys.argv[3]\n for i in range(4, len(sys.argv)):\n assets.append(sys.argv[i])\n\n dataframe = get_historical_asset_price_data(\n assets, metric_id, start_date, end_date)\n print(dataframe)\n","sub_path":"get_price_data.py","file_name":"get_price_data.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"540045140","text":"import requests\nimport time\nimport csv\n\n\ndef take_1000_posts():\n token = '2e096dae2e096dae2e096dae2c2e79433c22e092e096dae706f136bed140c624f325219'\n version = 5.103\n domain = 'habr'\n count = 100\n offset = 0\n all_posts = []\n while offset < 1000:\n response = requests.get('https://api.vk.com/method/wall.get',\n params=\n {\n 'access_token': token,\n 'v' : version,\n 'domain' : domain,\n 'count' : count,\n 'offset' : offset\n })\n data = response.json()['response']['items']\n offset += 100\n all_posts.extend(data)\n time.sleep(0.5)\n return all_posts\n\n\n\ndef file_writer(all_posts):\n with open( 'ebanphysics.csv','w') as file:\n a_pen = csv.writer(file)\n a_pen.writerow(('likes','body','url'))\n for post in all_posts:\n if post['attachments'][0]['type']:\n img_url = 123\n\n\n\nall_posts = take_1000_posts()\nfile_writer(all_posts)\nprint(1)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"400896195","text":"import datetime\nfrom datetime import date\nfrom statistics import median\nfrom typing import Optional\n\nfrom api import get_friends\nfrom api_models import User\n\n\ndef age_predict(user_id: int) -> Optional[float]:\n \"\"\" Наивный прогноз возраста по возрасту друзей\n\n Возраст считается как медиана среди возраста всех друзей пользователя\n\n :param user_id: идентификатор пользователя\n :return: медианный возраст пользователя\n \"\"\"\n assert isinstance(user_id, int), \"user_id must be positive integer\"\n assert user_id > 0, \"user_id must be positive integer\"\n #curdate = datetime.date.today()\n friends = [User(**i) for i in get_friends(user_id, 'bdate')]\n # bdates = get_friends(user_id, 'bdate')\n bdates = []\n # в блоке try выполн. инструкция, except - исключение\n for friend in friends:\n birthday = friend.bdate\n try:\n age = calculate_age(datetime.datetime.strptime(birthday, \"%d.%m.%Y\"))\n bdates.append(age)\n except (ValueError, TypeError):\n pass\n\n if bdates:\n bd = float(median(bdates))\n print(\"\\nНаивный прогноз возраста по возрасту друзей: \")\n print(bd)\n\n return bd\n\n\n\ndef calculate_age(born):\n today = date.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\nif __name__ == '__main__':\n age_predict(948354)\n\n#main()\n","sub_path":"homework04/age.py","file_name":"age.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"19419652","text":"from flask import Flask, render_template, request\n\nfrom utils import typograph_text\n\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef typograph():\n context = {}\n context['source_text'] = request.form.get('text')\n if context['source_text']:\n typographed_text = typograph_text(context['source_text'])\n context['typographed_text'] = typographed_text\n return render_template('form.html', **context)\n\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"482740434","text":"from bs4 import BeautifulSoup\nimport requests\nfrom concurrent.futures import ThreadPoolExecutor\n\n\ndef load_url(url):\n try:\n html = requests.get(url, stream=True)\n amount = len(url.split(\"/\"))\n filename = '{}/{}'.format(filepath, url.split(\"/\")[amount - 1])\n try:\n f = open(filename, 'wb')\n except:\n print(\"Incorrect path\")\n try:\n f.write(bytearray(html.text.encode('utf-8')))\n f.close()\n except:\n f.close()\n except requests.exceptions.RequestException as e:\n return e\n\n\ndef start_requests(url):\n req = requests.get(url)\n html_text = req.text\n B_soup = BeautifulSoup(html_text, 'lxml')\n if url == 'https://tsn.ua/news':\n soup = B_soup.find_all('a', {'class': 'c-card__link'})\n if url == 'https://www.unian.ua/detail/main_news':\n soup = B_soup.select('div.list-thumbs__info > a')\n link_list = []\n for temp in soup:\n link_list.append(temp.get('href'))\n threads = []\n with ThreadPoolExecutor(max_workers=20) as executor:\n for url in link_list:\n threads.append(executor.submit(load_url, url))\n\n\nwhile True:\n print('Choose what webpage do you want to parse:')\n print('1)tsn.ua\\n2)unian.ua')\n options = input()\n if options.isdigit():\n if int(options) is 1:\n url = 'https://tsn.ua/news'\n elif int(options) is 2:\n url = 'https://www.unian.ua/detail/main_news'\n else:\n print('Incorrect option')\n continue\n print('Input relative path from project:')\n data = input()\n filepath = data\n start_requests(url)\n print('Data generated')\n break\n else:\n print('Incorrect option')\n continue\n","sub_path":"generator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"588770412","text":"from distutils.core import setup\nfrom setuptools import find_packages\nimport subprocess\n\nmacos_ver = tuple(int(i) for i in subprocess.getoutput('sw_vers -productVersion').split('.'))\n\n\nwith open('README.md') as fh:\n long_description = fh.read()\n\nsetup(\n name='macuitest',\n packages=find_packages(),\n version='0.7.41',\n license='Apache-2.0 License',\n description='A simple UI testing framework for macOS',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Andrii Kislitsyn',\n author_email='andriikislitsyn@gmail.com',\n url='https://github.com/andriykislitsyn',\n download_url='https://github.com/andriykislitsyn/macuitest/archive/v0.7.41-alpha.tar.gz',\n keywords=['Testing', 'UI', 'Functional', 'macOS'],\n install_requires=[\n 'biplist',\n 'opencv-python == 3.4.8.29' if macos_ver < (10, 13) else 'opencv-python',\n 'pyobjc-framework-ApplicationServices',\n 'pyobjc-framework-AVFoundation',\n 'pyobjc-framework-Cocoa',\n 'pyobjc-framework-CoreText',\n 'pyobjc-framework-Quartz',\n 'pytesseract',\n ],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Education :: Testing',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"324548694","text":"#Write a function called after_second that accepts two\n#arguments: a target string to search, and string to search\n#for. The function should return everything in the first\n#string *after* the *second* occurrence of the search term.\n#You can assume there will always be at least two\n#occurrences of the search term in the first string.\n#\n#For example:\n# after_second(\"11223344554321\", \"3\") -> 44554321\n#\n#The search term \"3\" appears at indices 4 and 5. So, this\n#returns everything from the index 6 to the end.\n#\n# after_second(\"heyyoheyhi!\", \"hey\") -> hi!\n#\n#The search term \"hey\" appears at indices 0 and 5. The\n#search term itself is three characters. So, this returns\n#everything from the index 8 to the end.\n#\n#Hint: This may be more complicated than it looks! You'll\n#have to look at the length of the search string and\n#either modify the target string or take advantage of the\n#extra arguments you can pass to find().\n\n\n#Write your function here!\ndef after_second(target_string, search_string):\n search_string_length = len(search_string)\n #print(\"Search string length: \" + str(search_string_length))\n first_occurence = target_string.find(search_string)\n next_target_string = target_string[(first_occurence + 1):]\n second_occurence = next_target_string.find(search_string)\n start_index_of_result_string = second_occurence + search_string_length\n result = next_target_string[start_index_of_result_string:]\n return result\n\n#Below are some lines of code that will test your function.\n#You can change the value of the variable(s) to test your\n#function with different inputs.\n#\n#If your function works correctly, this will originally\n#print 44554321 and hi!, each on their own line.\nprint(after_second(\"11223344554321\", \"3\"))\nprint(after_second(\"heyyoheyhi!\", \"hey\"))\n","sub_path":"looking_for_substrings.py","file_name":"looking_for_substrings.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"616759517","text":"from sqlalchemy.dialects.mysql import BIGINT, VARCHAR, DATETIME, ENUM\nfrom app import db\n\nstatus_enums = [\"admin\", \"employee\"]\n\nclass User(db.Model):\n __tablename__ = 'User'\n __table_args__ = {\n 'mysql_engine': 'InnoDB',\n 'mysql_charset': 'utf8',\n 'extend_existing': True\n }\n\n id = db.Column(\n BIGINT(20, unsigned=True),\n primary_key=True,\n index=True\n )\n\n name = db.Column(\n VARCHAR(128)\n )\n\n\n username = db.Column(\n VARCHAR(128),\n unique=True\n )\n\n email = db.Column(\n VARCHAR(128)\n )\n\n\n password = db.Column(\n VARCHAR(128)\n )\n\n birthday = db.Column(\n DATETIME()\n )\n\n joinDate = db.Column(\n DATETIME()\n )\n\n status = db.Column(\n ENUM(*status_enums),\n default=status_enums[0],\n server_default=status_enums[0]\n )","sub_path":"app/model/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"513207071","text":"from baza import dodaj\n\nprint(\"Witaj w mojej bazie.\")\n\nopcje = \"\"\"Podaj wybór:\n1 - dodaj osobę\n2 - usuń osobę\nQ - zakończ program\"\"\"\n\nwybor = None\n\nwhile wybor != 'Q':\n print(opcje)\n wybor = input('Twój wybór: ').upper()\n if wybor == '1':\n osoba = input(\"Podaj imię: \")\n dodaj(osoba)\n","sub_path":"dzien4_5_6/dzien6/szkola.py","file_name":"szkola.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"446837996","text":"##二分法+双指针(从中间开始,反方向行走)\n#思想:1,从中间往两边走,\n#2019/9/3\n\nclass Solution:\n\n def kClosestNumbers(self,A,target,k):\n if not A:\n return []\n index = self.findFirstIndex(A,target)\n left,right = index-1,index\n result = []\n\n for i in range(k):\n if left<0:\n result.append(A[right])\n right+=1\n elif right>=len(A):\n result.append(A[left])\n left-=1\n else:\n if target-A[left]<=A[right]-target:\n result.append(A[left])\n left-=1\n else:\n result.append(A[right])\n right+=1\n return result\n #第一个大于等于target的数\n def findFirstIndex(self,A,target):\n start,end=0,len(A)-1\n while start+1=target:\n end=mid\n else:\n start=mid\n\n if A[start]>=target:\n return start\n if A[end]>=target:\n return end\n\n return len(A)-1\n\na= Solution()\narr = [1, 3, 5, 7]\ntar = 2\nk=2\nprint(a.kClosestNumbers(arr, tar,k))\n","sub_path":"lintcode/第一层/460_在排序数组中找最接近的K个数.py","file_name":"460_在排序数组中找最接近的K个数.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"368118687","text":"# from gm_main import *\r\nimport os\r\nimport sys\r\ndef main():\r\n inputcontent = sys.stdin.read().splitlines()\r\n for line in inputcontent:\r\n argv = line.split()[0]\r\n inputfilename = argv\r\n outputfilename = inputfilename[:-3] + \"output\" + \".txt\"\r\n inputfile = open(inputfilename, 'r')\r\n outputfile = open(outputfilename, 'w')\r\n lines = inputfile.read().splitlines()\r\n for line in lines:\r\n if \"#\" not in (line):\r\n src_id = line.split()[0]\r\n dst_id = line.split()[1]\r\n outputfile.write(src_id+\", \"+dst_id+\"\\n\")\r\n inputfile.close()\r\n outputfile.close()\r\n os.system(\"python gm_main.py --file `pwd`/\" + outputfilename +\r\n \" --dest_dir `pwd`/output --belief_file `pwd`/priorsbelief.txt --unweighted --undirected\")\r\n os.system(\"cp -rf `pwd`/output `pwd`/\" + outputfilename[:-4])\r\nmain()\r\n","sub_path":"phase2/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"613347302","text":"import os\nimport sys\nimport matplotlib.pyplot as plt\n\ndef plot(fname_, fname2_=None):\n \"\"\"Visualizes the data given in input string fname.\n\n :param fname: string containing the path to the file\n \"\"\"\n #module global vars\n global fname, fname2, data1, data2, states\n fname = fname_\n fname2 = fname2_\n \n # States for line deletion and boundary\n states = {}\n states['delete'] = False\n states['boundary'] = True\n states['xup'] = 0\n states['xlo'] = 0\n states['yup'] = 0\n states['ylo'] = 0\n states['num'] = 0\n states['dL'] = 1\n # Override standard settings\n plt.rcParams['keymap.save'] = ['ctrl+s']\n plt.rcParams['keymap.all_axes'] = []\n plt.rcParams['keymap.home'] = ['h', 'home']\n plt.rcParams['keymap.quit'] = ['q']\n plt.rcParams['keymap.save'] = ['ctrl+s']\n plt.rcParams['keymap.yscale'] = []\n # Start event handler for key press\n \n # Load Surfaces\n data1 = loadFile(fname)\n data2 = None\n if fname2 is not None:\n data2 = loadFile(fname2)\n \n fig = plt.figure(1)\n cid = fig.canvas.mpl_connect('key_press_event', keyPressEventFunction)\n # Start with first surface only\n plotRewind()\n plt.show()\n # Close event handling\n fig.canvas.mpl_disconnect(cid)\n \ndef loadFile(fname):\n \"\"\"Read topography information from file and store them for later usage.\n\n :param fname: string containing the path to the file\n \n :return\n retruns a list surfaces\n with every surface beeing a dictonary with\n time: simulation Time\n numel: number of elements of the Surface\n \"xVals\": x Values of the Surface as list\n \"yVals\": y Values of the Surface as list\n \"\"\"\n # Load surfaces\n fHandle = open(fname, \"r\")\n # Place holders for surfaces, times and number of points per surface\n surfaces = []\n times = []\n numels = []\n # Create variables to store one surface\n xValues = []\n yValues = []\n for line in fHandle:\n # Check where new surface begins and get time and number of elements\n if 'surface:' in line:\n splittedLine = line.split(' ')\n time = splittedLine[1]\n numel = splittedLine[2]\n # Store extracted surface, time and number of elements\n if xValues: #do not append if list is empty; needed because of first line\n surfaces.append( (xValues, yValues) )\n \n times.append(float(time))\n numels.append(float(numel))\n # Reset lists to be empty\n xValues = []\n yValues = []\n else:\n # Extract xy pair from line\n xyValuePair = line.rstrip('\\n').split(' ')\n xValue = xyValuePair[0]\n yValue = xyValuePair[1]\n xValues.append(float(xValue))\n yValues.append(float(yValue))\n # Store last line\n surfaces.append( (xValues, yValues) )\n # Close file and return extracted surfaces, times and number of points \n fHandle.close()\n \n merged_list = []\n for i in range(len(surfaces)):\n merged_list.append({\n \"time\": times[i],\n \"numel\": numels[i],\n \"xVals\": surfaces[i][0],\n \"yVals\": surfaces[i][1],\n })\n \n return merged_list\n\ndef keyPressEventFunction(event):\n \"\"\"Uses the surfaces structure and shows them to the selected settings.\n :param event: event handle\n \"\"\"\n global data1, data2, states, fname\n \n # Change boundaries\n if event.key == 'b':\n states['boundary'] = not states['boundary']\n plt.draw()\n\n # Step 'steps' forward\n if event.key == ' ':\n if states['num'] + 1 < len(data1):\n if states['num'] + states['dL'] < len(data1):\n states['num'] += states['dL']\n \n else: #only advance as far as possible\n states['num'] = len(data1) - 1\n \n surface1 = data1[states['num']]\n xValues1 = surface1[\"xVals\"]\n yValues1 = surface1[\"yVals\"]\n \n if states['delete']:\n plt.cla()\n plt.grid(True,'major')\n plt.xlabel('x-values in nm')\n plt.ylabel('y-values in nm')\n \n if states['boundary']:\n adoptBoundaries(xValues1, yValues1, states)\n\n line, = plt.plot(xValues1, yValues1)\n if data2 is not None:\n surface2 = get_best_surface(data2, data1[states['num']]['time'])\n xValues2 = surface2[\"xVals\"]\n yValues2 = surface2[\"yVals\"]\n plt.plot(xValues2, yValues2, \"--\", color=line.get_color())\n \n plt.xlim(states['xlo'] - 1, states['xup'] + 1)\n plt.ylim(states['ylo'] - 1, states['yup'] + 1)\n plt.draw()\n\n\n # Change between delete state\n if event.key == 'd':\n states['delete'] = not states['delete']\n\n # Numbers pressed\n if str.isnumeric(event.key):\n states['dL'] = 2**int(event.key)\n\n # clear the figure and reset it\n if event.key == 'r':\n plotRewind()\n\n # change aspect ratio\n if event.key == 'a':\n if plt.axes().get_aspect() is 'equal':\n plt.axes().set_aspect( 'auto' )\n else:\n plt.axes().set_aspect( 'equal' )\n plt.draw()\n\n # show last line\n if event.key == 'l':\n surface = data1[-1]\n xValues = surface[\"xVals\"]\n yValues = surface[\"yVals\"]\n if states['delete']:\n plt.cla()\n if states['boundary']:\n adoptBoundaries(xValues, yValues, states)\n plt.xlim(states['xlo'] - 1, states['xup'] + 1)\n plt.ylim(states['ylo'] - 1, states['yup'] + 1)\n plt.plot(xValues, yValues)\n plt.draw()\n\n # Saving current figure\n if event.key == 's':\n plt.savefig(fname[:-4] + '.png', format='png')\n\ndef plotRewind():\n \"\"\"Resets the plot to only show the first line.\n\n :param event: event handler variable\n :param times: time variable for each line\n :param numels: number of points for the lines\n :param surface: coordinate values of the surface\n :param states: variable storing the delete and boundary state for\n the plot and the current x and y limits of the plot\n \"\"\"\n global data1, states\n states['num'] = 0\n \n # Read values from structure\n surface1 = data1[0]\n xValues1 = surface1[\"xVals\"]\n yValues1 = surface1[\"yVals\"]\n \n # check if plot limits havo to be adopted and do so if \n if states['boundary']:\n adoptBoundaries(xValues1, yValues1, states)\n # Plot the selected surface\n plt.cla()\n plt.xlim(states['xlo'] - 1, states['xup'] + 1)\n plt.ylim(states['ylo'] - 1, states['yup'] + 1)\n line, = plt.plot(xValues1, yValues1)\n if data2 is not None:\n surface2 = get_best_surface(data2, data1[0]['time'])\n xValues2 = surface2[\"xVals\"]\n yValues2 = surface2[\"yVals\"]\n plt.plot(xValues2, yValues2, \"--\", color=line.get_color())\n \n plt.grid(True,'major')\n plt.xlabel('x-values in nm')\n plt.ylabel('y-values in nm')\n plt.draw()\n\ndef adoptBoundaries(xValues, yValues, states):\n \"\"\"Set the limits according to the given data and store them.\n\n :param xValues: x coordinates of the points of the surrface\n :param yValues: y coordinates of the points of the surrface\n :param states: variable to store the current limits\n \"\"\"\n states['xlo'] = xValues[0]\n states['xup'] = xValues[-1]\n states['ylo'] = min(yValues)\n states['yup'] = max(yValues)\n\ndef get_best_surface(data, time):\n \"\"\"Searches for the nearest dataset for a given time\"\"\"\n best_surface = None\n smallest_diff = None\n \n for surface in data:\n diff = abs(surface['time'] - time)\n if smallest_diff is None or diff < smallest_diff:\n #we found a better one\n smallest_diff = diff\n best_surface = surface\n \n return best_surface\n \n\nif __name__ == \"__main__\":\n pass\n if len(sys.argv) > 1:\n dataFile = sys.argv[1]\n if os.path.isfile(dataFile):\n plot(dataFile)\n else:\n print('No such file: ' + dataFile)\n else:\n print('No filename given. Aborted!')\n","sub_path":"work/Aufgabe6_testen/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":8408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"541204056","text":"#-*- coding: utf-8 -*-\n\n'''\n Copyright (c) 2016 NSR (National Security Research Institute)\n \n Permission is hereby granted, free of charge, to any person obtaining a copy \n of this software and associated documentation files (the \"Software\"), to deal \n in the Software without restriction, including without limitation the rights \n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell \n copies of the Software, and to permit persons to whom the Software is \n furnished to do so, subject to the following conditions:\n \n The above copyright notice and this permission notice shall be included in \n all copies or substantial portions of the Software.\n \n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN \n THE SOFTWARE.\n'''\n\nfrom .lsh256 import LSH256\nfrom .lsh512 import LSH512\n\n## 해쉬 함수 wrapper 클래스\nclass LSHDigest:\n\n ## 파라미터에 맞는 LSH 알고리즘 객체 생성\n # @param [in] wordlenbits 워드 길이 (비트) 256, 512만 가능함\n # @param [in] outlenbits 출력 길이 (비트) 1 ~ 256 (LSH-256) 혹은 1 ~ 512 (LSH-512) 가 가능함\n # @return LSH 객체\n @staticmethod\n def getInstance(wordlenbits, outlenbits = None):\n if outlenbits is None:\n outlenbits = wordlenbits\n \n if wordlenbits == 256: \n return LSH256(outlenbits)\n \n elif wordlenbits == 512: \n return LSH512(outlenbits)\n \n else:\n raise ValueError(\"Unsupported algorithm parameter\");\n\n\n ## digest 함수 - 최종 해쉬값을 계산하여 리턴한다.\n # @param [in] wordlenbits 워드 길이 256, 512 중 하나여야 함\n # @param [in] outlenbits 출력 해시 길이 1 ~ wordlenbits 사이의 값이어야 함\n # @param [in] data 입력 데이터\n # @param [in] offset 데이터 시작 오프셋 (바이트)\n # @param [in] length 데이터 길이 (비트)\n # @return 계산된 해쉬값\n @staticmethod\n def digest(wordlenbits, outlenbits = None, data = None, offset = 0, length = -1):\n if outlenbits is None:\n outlenbits = wordlenbits\n \n lsh = LSHDigest.getInstance(wordlenbits, outlenbits)\n return lsh.final(data, offset, length)","sub_path":"2021/3번/lsh/lsh_digest.py","file_name":"lsh_digest.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"19964197","text":"from flask import Flask, render_template, request, redirect, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom forex_python.converter import CurrencyRates, CurrencyCodes, RatesNotAvailableError\n\napp = Flask(__name__)\n\n# the toolbar is only enabled in debug mode:\napp.debug = True\n\n# set a 'SECRET_KEY' to enable the Flask session cookies\napp.config['SECRET_KEY'] = 'hush-little-one'\n\n# redirect debugging false\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n\ntoolbar = DebugToolbarExtension(app)\n\n# Setup forex python\nrates = CurrencyRates()\ncodes = CurrencyCodes()\n\n\n@app.route('/')\ndef get_home():\n \"\"\" Display the home page with the currency form \"\"\"\n\n return render_template('index.html')\n\n\n@app.route('/error')\ndef get_error():\n \"\"\" Display the home page with error message \"\"\"\n\n msg = session['msg']\n return render_template('error.html', msg=msg)\n\n\n@app.route('/convert', methods=['POST'])\ndef convert_currency():\n \"\"\" Convert the currencies from the currency form \"\"\"\n\n from_curr = request.form['from-curr'].upper()\n to_curr = request.form['to-curr'].upper()\n\n # Error handling if the amount is not a number\n try:\n amount = float(request.form['amount'])\n except ValueError:\n session['msg'] = 'Not a valid amount.'\n return redirect('/error')\n\n # Checking to see which currency is the one with the issue and updating the message\n try:\n result = rates.convert(from_curr, to_curr, amount)\n except RatesNotAvailableError:\n\n if codes.get_symbol(from_curr) is None:\n session['msg'] = f'Not a valid code: {from_curr}'\n else:\n session['msg'] = f'Not a valid code: {to_curr}'\n return redirect('/error')\n\n symbol = codes.get_symbol(to_curr)\n session['result'] = result\n session['symbol'] = symbol\n return redirect('/result')\n\n\n@app.route('/result')\ndef get_result():\n \"\"\" Display the result of the conversion \"\"\"\n\n result = session['result']\n formatted_float = \"{:.2f}\".format(result)\n symbol = session['symbol']\n\n return render_template('result.html', result=formatted_float, symbol=symbol)\n\n\n# Needed for Replit flask server\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5500)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"555790234","text":"import sys\n\nsrc, tgt, i = sys.argv[1], sys.argv[2], sys.argv[3]\n\nif len(i) == 1:\n i = '0' + i\n \nsrc_name = f'tmp/subs.{src}-{tgt}.{src}{i}'\ntgt_name = f'tmp/subs.{src}-{tgt}.{tgt}{i}'\nsrc_res_name = f'tmp/subs.{src}-{tgt}.noneq.{src}{i}'\ntgt_res_name = f'tmp/subs.{src}-{tgt}.noneq.{tgt}{i}'\n\nwith open(src_name) as s, open(tgt_name) as t:\n src_lines = s.readlines()\n tgt_lines = t.readlines()\n\nwith open(src_res_name, 'w') as src_res, open(tgt_res_name, 'w') as tgt_res:\n for i in range(len(src_lines)):\n if src_lines[i] != tgt_lines[i]:\n src_res.writelines(src_lines[i])\n tgt_res.writelines(tgt_lines[i])\n","sub_path":"preprocess_data/.ipynb_checkpoints/find-equal-lines-checkpoint.py","file_name":"find-equal-lines-checkpoint.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"449803528","text":"from flask_debugtoolbar.panels import DebugPanel\n\n_ = lambda x: x\n\nclass RequestVarsDebugPanel(DebugPanel):\n \"\"\"\n A panel to display request variables (POST/GET, session, cookies).\n \"\"\"\n name = 'RequestVars'\n has_content = True\n\n def nav_title(self):\n return _('Request Vars')\n\n def title(self):\n return _('Request Vars')\n\n def url(self):\n return ''\n\n def process_request(self, request):\n self.request = request\n self.view_func = None\n self.view_args = []\n self.view_kwargs = {}\n\n def process_view(self, request, view_func, view_kwargs):\n self.view_func = view_func\n self.view_kwargs = view_kwargs\n\n def content(self):\n context = self.context.copy()\n context.update({\n 'get': [(k, self.request.args.getlist(k)) for k in self.request.args],\n 'post': [(k, self.request.form.getlist(k)) for k in self.request.form],\n 'cookies': [(k, self.request.cookies.get(k)) for k in self.request.cookies],\n 'view_func': '%s.%s' % (self.view_func.__module__, self.view_func.__name__) if self.view_func else '[unknown]',\n 'view_args': self.view_args,\n 'view_kwargs': self.view_kwargs or {}\n })\n if hasattr(self.request, 'session'):\n context.update({\n 'session': [(k, self.request.session.get(k)) for k in self.request.session.iterkeys()]\n })\n\n return self.render('panels/request_vars.html', context)\n\n","sub_path":"flask_debugtoolbar/panels/request_vars.py","file_name":"request_vars.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"224330172","text":"import re, time\n\n\n\nflagtypes = {\n'team_CTF_redflag':1,\n'team_CTF_blueflag':2,\n'RED':1,\n'BLUE':2,\n}\n\nflagactions = {\n0:'drop',\n1:'return',\n2:'score' \n \n}\n\ngametypes = {\n0: 'ffa', #free for all\n1: None,\n2: None,\n3: 'tdm', #Team death match\n4: 'ts', #Team survivor\n5: 'ftl', #Follow the leader\n6: 'cah', #Capture and hold\n7: 'ctf', #Capture the flag\n8: 'bm' #Bomb\n}\n\n# Method Of Death (MODs)\nMOD_WATER = {'id':'1'}\nMOD_SLIME = {'id': '2'}\nMOD_LAVA= {'id':'3'}\nMOD_CRUSH= {'id':'4'}\nMOD_TELEFRAG = {'id':'5'}\nMOD_FALLING= {'id':'6'}\nMOD_SUICIDE= {'id':'7'} # Dupe with 11\nMOD_TARGET_LASER= {'id':'8'}\nMOD_TRIGGER_HURT= {'id':'9'}\nMOD_CHANGE_TEAM= {'id':'10'}\n#MOD_SUICIDE={'id':'11'} # Dupe\nUT_MOD_KNIFE= {'id':'12'}\nUT_MOD_KNIFE_THROWN= {'id':'13'}\nUT_MOD_BERETTA= {'id':'14'}\nUT_MOD_DEAGLE= {'id':'15'}\nUT_MOD_SPAS= {'id':'16'}\nUT_MOD_UMP45= {'id':'17'}\nUT_MOD_MP5K= {'id':'18'}\nUT_MOD_LR300= {'id':'19'}\nUT_MOD_G36= {'id':'20'}\nUT_MOD_PSG1= {'id':'21'}\nUT_MOD_HK69= {'id':'22'}\nUT_MOD_BLED= {'id':'23'}\nUT_MOD_KICKED= {'id':'24'}\nUT_MOD_HEGRENADE= {'id':'25'}\nUT_MOD_FLASHGRENADE= {'id':'26'} #@DEV It is technically possible to die from this, I guarantee it!\nUT_MOD_SMOKEGRENADE= {'id':'27'}\nUT_MOD_SR8= {'id':'28'}\nUT_MOD_SACRIFICE= {'id':'29'}\nUT_MOD_AK103= {'id':'30'}\nUT_MOD_SPLODED= {'id':'31'}\nUT_MOD_SLAPPED= {'id':'32'}\nUT_MOD_BOMBED= {'id':'33'}\nUT_MOD_NUKED= {'id':'34'}\nUT_MOD_NEGEV= {'id':'35'}\nUT_MOD_HK69_HIT= {'id':'37'}\nUT_MOD_M4= {'id':'38'}\nUT_MOD_FLAG= {'id':'39'}\nUT_MOD_GOOMBA= {'id':'40'}\n\n# Hits (yes these differ from the kill ones, go fig)\n\"\"\"\n1 \n UT_MOD_KNIFE\n2 \n UT_MOD_BERETTA\n3 \n UT_MOD_DEAGLE\n4 \n UT_MOD_SPAS\n5 \n UT_MOD_MP5K\n6 \n UT_MOD_UMP45\n8 \n UT_MOD_LR300\n9 \n UT_MOD_G36\n10 \n UT_MOD_PSG1\n14 \n UT_MOD_SR8\n15 \n UT_MOD_AK103\n17 \n UT_MOD_NEGEV\n19 \n UT_MOD_M4\n21 \n UT_MOD_HEGRENADE\n\"\"\"\n\n# Hitzones\n\"\"\" verbatim:\n13:57 Hit: 2 1 5 9: chouille hit killgirl in the Legs\nRead the hit sequence as follows\nChouille is client 1\nKillGirl is client 2\n5 is the hit area (here the legs)\n9 is the weapon ID\n\nHit areas are:\n0: Head\n1: Helmet\n2: Torso\n3: Kevlar\n4: Arms\n5: Legs\n6: Body\n\"\"\"\n\n# Items (pickupable)\n# These don't have an integer attached to them, they just 'are'\n\"\"\"\nUT_WEAPON_M4\nUT_WEAPON_GRENADE_FRAG\nUT_WEAPON_NEGEV\nUT_WEAPON_BOMB\nUT_WEAPON_AK103\nUT_WEAPON_SR8\nUT_WEAPON_GRENADE_SMOKE\nUT_WEAPON_GRENADE_FLASH\nUT_WEAPON_GRENADE_HE\nUT_WEAPON_PSG1\nUT_WEAPON_G36\nUT_WEAPON_LR\nUT_WEAPON_HK69\nUT_WEAPON_UMP45\nUT_WEAPON_MP5K\nUT_WEAPON_SPAS12\nUT_WEAPON_DEAGLE\nUT_WEAPON_BERETTA\nUT_WEAPON_KNIFE\nUT_WEAPON_BOMB\nUT_ITEM_APR\nUT_ITEM_EXTRAAMMO\nUT_ITEM_HELMET\nUT_ITEM_LASER\nUT_ITEM_SILENCER\nUT_ITEM_MEDKIT\nUT_ITEM_NVG\nUT_ITEM_VEST\nUT_ITEM_BOMB\n\"\"\"\n\ndamage = {\n \n 'PASSIVE': [0, 0, 0, 0, 0, 0, 0, 0],\n MOD_TELEFRAG['id']: [0, 0, 0, 0, 0, 0, 0, 0],\n UT_MOD_KNIFE['id']: [100, 60, 44, 35, 20, 20, 44, 100],\n UT_MOD_KNIFE_THROWN['id']: [100, 60, 44, 35, 20, 20, 44, 100],\n UT_MOD_BERETTA['id']: [100, 34, 30, 20, 11, 11, 30, 100],\n UT_MOD_DEAGLE['id']: [100, 66, 57, 38, 22, 22, 57, 100],\n UT_MOD_SPAS['id']: [25, 25, 25, 25, 25, 25, 25, 100],\n UT_MOD_UMP45['id']: [100, 51, 44, 29, 17, 17, 44, 100],\n UT_MOD_MP5K['id']: [50, 34, 30, 20, 11, 11, 30, 100],\n UT_MOD_LR300['id']: [100, 51, 44, 29, 17, 17, 44, 100],\n UT_MOD_G36['id']: [100, 51, 44, 29, 17, 17, 44, 100],\n UT_MOD_PSG1['id']: [100, 63, 97, 63, 36, 36, 97, 100],\n UT_MOD_HK69['id']: [50, 50, 50, 50, 50, 50, 50, 100],\n UT_MOD_BLED['id']: [15, 15, 15, 15, 15, 15, 15, 15],\n UT_MOD_KICKED['id']: [20, 20, 20, 20, 20, 20, 20, 100],\n UT_MOD_HEGRENADE['id']: [50, 50, 50, 50, 50, 50, 50, 100],\n UT_MOD_SR8['id']: [100, 100, 100, 100, 50, 50, 100, 100],\n UT_MOD_AK103['id']: [100, 58, 51, 34, 19, 19, 51, 100],\n UT_MOD_NEGEV['id']: [50, 34, 30, 20, 11, 11, 30, 100],\n UT_MOD_HK69_HIT['id']: [20, 20, 20, 20, 20, 20, 20, 100],\n UT_MOD_M4['id']: [100, 51, 44, 29, 17, 17, 44, 100],\n UT_MOD_GOOMBA['id']: [100, 100, 100, 100, 100, 100, 100, 100],\n }\n\ngearInfo = {\n \n #Sidearms\n \n 'F':{'id':UT_MOD_BERETTA['id'], 'name':'Beretta', 'damage':damage[UT_MOD_BERETTA['id']]},\n \n 'G':{'id':UT_MOD_DEAGLE['id'],'name':'Desert Eagle', 'damage':damage[UT_MOD_DEAGLE['id']]},\n\n \n #Primary\n \n 'K':{'id':UT_MOD_HK69['id'], 'name':'HK69', 'damage': damage[UT_MOD_HK69['id']]},\n \n 'L':{'id':UT_MOD_LR300['id'], 'name':'LR300', 'damage': damage[UT_MOD_LR300['id']]},\n \n 'M':{'id':UT_MOD_G36['id'], 'name':'G36', 'damage': damage[UT_MOD_G36['id']]},\n \n 'N':{'id':UT_MOD_PSG1['id'], 'name':'PSG1','damage': damage[UT_MOD_PSG1['id']]},\n \n 'Z':{'id':UT_MOD_SR8['id'],'name':'SR8','damage': damage[UT_MOD_SR8['id']]},\n \n 'a':{'id':UT_MOD_AK103['id'],'name':'AK103','damage': damage[UT_MOD_AK103['id']]},\n \n 'c':{'id':UT_MOD_NEGEV['id'],'name':'Negav','damage': damage[UT_MOD_NEGEV['id']]},\n \n 'e':{'id':UT_MOD_M4['id'],'name':'M4','damage': damage[UT_MOD_M4['id']]},\n\n \n #Primary and Secondary\n \n 'H':{'id':UT_MOD_SPAS['id'],'name':'Spas', 'damage': damage[UT_MOD_SPAS['id']]},\n \n 'I':{'id':UT_MOD_MP5K['id'],'name':'MP5k', 'damage': damage[UT_MOD_MP5K['id']]},\n \n 'J':{'id':UT_MOD_UMP45['id'],'name':'UMP45', 'damage': damage[UT_MOD_UMP45['id']]},\n\n \n #Grenades\n \n 'O':{'id':UT_MOD_HEGRENADE['id'], 'name':'HE Grenade', 'damage': damage[UT_MOD_HEGRENADE['id']]},\n \n 'Q':{'name':'Smoke Grenade'}, #{'id':None,'name':'smoke grenade','damage': damage['PASSIVE']}, #Smoke nade\n\n \n #Items\n \n 'R':{'name':'Kevlar Vest'}, #Kevlar vest\n \n 'S':{'name':'Tac Goggles'}, #TAC Goggles\n \n 'T':{'name':'Medkit'}, #Medkit\n \n 'Y':{'name':'Silencer'}, #Silencer\n \n 'V':{'name':'Laser Sight'}, #laser sight\n \n 'W':{'name':'Kevlar Helmet'}, #kevlar helmet\n \n 'X':{'name':'Extra Ammo'}, #extra ammo\n\n 'U':None, #idk\n \n 'A':None #None :D\n}\n\ndef timeparse(timeStr): #CREDIT TO B3\n if not timeStr:\n return 0\n elif type(timeStr) is int:\n return timeStr\n\n timeStr = str(timeStr)\n if not timeStr:\n return 0\n elif timeStr[-1:] == 'h':\n return minutes2int(timeStr[:-1]) * 60\n elif timeStr[-1:] == 'm':\n return minutes2int(timeStr[:-1])\n elif timeStr[-1:] == 's':\n return minutes2int(timeStr[:-1]) / 60\n elif timeStr[-1:] == 'd':\n return minutes2int(timeStr[:-1]) * 60 * 24\n elif timeStr[-1:] == 'w':\n return minutes2int(timeStr[:-1]) * 60 * 24 * 7\n else:\n return minutes2int(timeStr)\n\ndef minutes2int(mins): #CREDIT TO B3\n if re.match('^[0-9.]+$', mins):\n return round(float(mins), 2)\n else:\n return 0\n\nclass UrTBotError(Exception): pass\nclass ConfigError(Exception): pass\nclass UrbanTerrorError(Exception): pass\n\ndef switchTeam(team):\n x = {'blue':RED_TEAM, 'red':BLUE_TEAM}\n if team.shortName in x: return x[team.shortName]\n else: return UNK_TEAM\n\ndef getItemID(item): return globals()[item.upper()]['id']\nGlob = lambda: globals()\n\nrconGameType = '.*?(\\\\d+).*?\\\\d+.*?(\\\\d+)'\nrconCurrentMap = re.compile(r'.*?(?:[a-z][a-z0-9_]*).*?(?:[a-z][a-z0-9_]*).*?(?:[a-z][a-z0-9_]*).*?((?:[a-z][a-z0-9_]*))', re.IGNORECASE|re.DOTALL)\nrconStatus = re.compile(r'^(?P[0-9]+)\\s+(?P[0-9-]+)\\s+(?P[0-9]+)\\s+(?P[0-9a-zA-Z]+)\\s+(?P.*?)\\s+(?P[0-9]+)\\s+(?P[0-9.]+):(?P[0-9-]+)\\s+(?P[0-9]+)\\s+(?P[0-9]+)$', re.I)\n\n\"\"\"\nSome content in this file has been found in the B3 source. \nWe thank the entire BigBrotherBot team for there contributions \nto the Urban Terror community, and credit much of the data/content \nin this file to them.\n\nAnd credit to utstatsbot for sorting most of this out and collecting it together.\nhttp://utstatsbot.googlecode.com/svn-history/r2/trunk/ut.log.format.txt\n\"\"\"\n\nclass Team():\n def __init__(self, nice, code, longn):\n self.shortName = nice\n self.abbrv = self.shortName[0]\n self.id = code\n self.longName = longn\n\n def __eq__(self, other):\n if isinstance(other, int):\n if other == self.id: return True\n elif isinstance(other, Team):\n if other.id == self.id: return True\n elif isinstance(other, str):\n if len(other) == 1 and self.abbrv == other: return True\n elif other == self.longName or other == self.shortName: return True\n return False\n\nRED_TEAM = Team('red', 1, 'Red Team')\nBLUE_TEAM = Team('blue', 2, 'Blue Team')\nSPEC_TEAM = Team('spec', 3, 'Spectator')\nUNK_TEAM = Team('unk', -1, 'Unknown')\n\nteams = {\n1:RED_TEAM,\n2:BLUE_TEAM,\n3:SPEC_TEAM,\n-1:UNK_TEAM\n}","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":8571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"214013952","text":"# -*- coding: utf-8 -*-\n'''\nServer worker process\n'''\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\n# Import pythond stdlib\nimport os\nimport logging\nimport threading\n\n# Import napalm-logs pkgs\nfrom napalm_logs.proc import NapalmLogsProc\n\nlog = logging.getLogger(__name__)\n\n\nclass NapalmLogsServerProc(NapalmLogsProc):\n '''\n Server sub-process class.\n '''\n def __init__(self,\n pipe,\n os_pipe_map,\n config):\n self.config = config\n self.__pipe = pipe\n self.__os_pipe_map = os_pipe_map\n self.__up = False\n\n def _identify_os(self, msg):\n '''\n Using the prefix of the syslog message,\n we are able to identify the operating system and then continue parsing.\n '''\n return ('junos', 'crap')\n\n def start(self):\n '''\n Take the messages from the queue,\n inspect and identify the operating system,\n then queue the message correspondingly.\n '''\n # Start suicide polling thread\n thread = threading.Thread(target=self._suicide_when_without_parent, args=(os.getppid(),))\n thread.start()\n self.__up = True\n while self.__up:\n # Take messages from the main queue\n msg = self.__pipe.recv()\n id_os = self._identify_os(msg)\n if not id_os or not isinstance(id_os, tuple):\n # _identify_os shoudl return a non-empty tuple\n # providing the info for the device OS\n # and the core message\n continue\n dev_os, core_msg = id_os\n # Then send the message in the right queue\n self.__os_pipe_map[dev_os].send(core_msg)\n\n def stop(self):\n self.__up = False\n","sub_path":"napalm_logs/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"47874860","text":"import RoadModel\nfrom random import choices\n\nclass Intersection:\n def __init__(self, incoming_roads, outgoing_roads, probability_matrix, lights=True):\n # list of roads incoming to that intersection\n self.inRoads = incoming_roads\n # list of roads outgoing from that intersection\n self.outRoads = outgoing_roads\n # probabilities of choosing specific outgoing road when coming from specific incoming road.\n # Those are the parameters specifying the traffic distribution over simulation area\n self.probabilities = probability_matrix\n self.posibilities = list(range(0, len(self.outRoads)))\n # counter of cars passing through that intersection\n self.counter = 0\n\n # variables for traffic lights handling\n self.lights = lights\n if self.lights:\n self.greenLight = 0\n for road in self.inRoads:\n road.toggleLights()\n self.inRoads[self.greenLight].toggleLights()\n\n # method handling road changes for cars arriving at the intersection\n def changeRoad(self):\n for road in range(len(self.inRoads)):\n cars = self.inRoads[road].removeCar()\n if not cars:\n continue\n else:\n for car in cars:\n choice = choices(self.posibilities, self.probabilities[road])[0]\n self.counter = self.counter + 1\n\n if road%2 == 0:\n if choice == road+1:\n changeDirection = False\n else:\n changeDirection = True\n else:\n if choice == road-1:\n changeDirection = False\n else:\n changeDirection = True\n\n self.outRoads[choice].addCar(car, changeDirection)\n for oroad in self.outRoads:\n oroad.order()\n\n # method for toggling lights on the intersection\n def toggleLights(self):\n if self.lights:\n self.inRoads[self.greenLight].toggleLights()\n self.greenLight = (self.greenLight+1) % len(self.inRoads)\n self.inRoads[self.greenLight].toggleLights()\n","sub_path":"src/IntersectionModel.py","file_name":"IntersectionModel.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"287712621","text":"from unittest import TestCase\n\nfrom musicscore.musicxml.elements.note import Notations\nfrom musicscore.musicxml.types.complextypes.notations import Fermata\n\n\nclass Test(TestCase):\n def test_1(self):\n fermata = Fermata()\n notations = Notations()\n notations.add_child(fermata)\n # print(notations.to_string())\n result = '''\n normal\n\n'''\n self.assertEqual(notations.to_string(), result)\n","sub_path":"tests/musicxml/elements/test_fermata.py","file_name":"test_fermata.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"148262922","text":"import FWCore.ParameterSet.Config as cms\n\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nfrom Configuration.StandardSequences.Eras import eras\n\nprocess = cms.Process('PLOT', eras.Run2_2017)\n\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff\")\nprocess.GlobalTag = GlobalTag(process.GlobalTag, '92X_dataRun2_HLT_v3', '')\n\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames=cms.untracked.vstring())\nprocess.source.fileNames.extend([\n\n'/store/data/Run2017B/ZeroBias/RAW/v1/000/297/469/00000/02E754DD-2459-E711-BDA8-02163E01A583.root'\n])\n\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load(\"Configuration.StandardSequences.RawToDigi_Data_cff\")\nprocess.load(\"SimCalorimetry.Configuration.hcalDigiSequence_cff\")\nprocess.load('SimCalorimetry.HcalTrigPrimProducers.hcaltpdigi_cff')\nprocess.load(\"EventFilter.L1TRawToDigi.caloStage2Digis_cfi\")\nprocess.load(\"EventFilter.L1TXRawToDigi.caloLayer1Stage2Digis_cfi\")\n\nprocess.TFileService = cms.Service(\"TFileService\",\n closeFileFasO=cms.untracked.bool(True),\n fileName=cms.string('analyze_tps.root'))\n\nprocess.emulTP = process.simHcalTriggerPrimitiveDigis.clone()\nprocess.emulTP.upgradeHF = cms.bool(True)\nprocess.emulTP.upgradeHE = cms.bool(True)\nprocess.emulTP.inputLabel = cms.VInputTag(\"hcalDigis\", \"hcalDigis\")\nprocess.emulTP.inputUpgradeLabel = cms.VInputTag(\"hcalDigis\", \"hcalDigis\")\n\nprocess.emulTP.numberOfSamples = cms.int32(4)\nprocess.emulTP.numberOfPresamples = cms.int32(2)\nprocess.emulTP.numberOfSamplesHF = cms.int32(2)\nprocess.emulTP.numberOfPresamplesHF = cms.int32(1)\n\nprocess.GlobalTag.toGet = cms.VPSet(\n cms.PSet(record = cms.string(\"HcalElectronicsMapRcd\"),\n tag = cms.string(\"HcalElectronicsMap_2017plan1_v3.0_data\"),\n connect = cms.string(\"frontier://FrontierProd/CMS_CONDITIONS\")\n )\n)\n\n# process.hcalDigis.InputLabel = cms.InputTag(\"source\")\nprocess.analyzeRAW = cms.EDAnalyzer(\"AnalyzeTP\",\n triggerPrimitives=cms.InputTag(\"hcalDigis\", \"\", \"\"))\nprocess.analyzeSIM = cms.EDAnalyzer(\"AnalyzeTP\",\n triggerPrimitives=cms.InputTag(\"emulTP\", \"\", \"\"))\nprocess.compare = cms.EDAnalyzer(\"CompareTP\",\n triggerPrimitives=cms.InputTag(\"hcalDigis\"),\n emulTriggerPrimitives=cms.InputTag(\"emulTP\"),\n swapIphi=cms.bool(False))\n\nprocess.analyzeL1T = cms.EDAnalyzer(\"AnalyzeTP\",\n triggerPrimitives = cms.InputTag(\"l1tCaloLayer1Digis\", \"\" , \"\")\n)\n\n\nprocess.emulTP2016 = process.simHcalTriggerPrimitiveDigis.clone()\nprocess.emulTP2016.upgradeHF = cms.bool(False)\nprocess.emulTP2016.upgradeHE = cms.bool(True)\nprocess.emulTP2016.inputLabel = cms.VInputTag(\"hcalDigis\", \"hcalDigis\")\nprocess.emulTP2016.inputUpgradeLabel = cms.VInputTag(\"hcalDigis\", \"hcalDigis\")\nprocess.emulTP2016.numberOfSamples = cms.int32(3)\nprocess.emulTP2016.numberOfPresamples = cms.int32(1)\n\nprocess.compare2016 = cms.EDAnalyzer(\"CompareTP\",\n triggerPrimitives=cms.InputTag(\"hcalDigis\"),\n emulTriggerPrimitives=cms.InputTag(\"emulTP2016\"),\n swapIphi=cms.bool(False))\n\nprocess.dump = cms.EDAnalyzer(\"EventContentAnalyzer\")\n\nprocess.p = cms.Path(\n process.hcalDigis *\n # process.dump *\n process.l1tCaloLayer1Digis *\n process.emulTP *\n process.analyzeRAW *\n process.analyzeSIM *\n process.compare \n# process.emulTP2016 *\n# process.compare2016\n)\n\n# print process.dumpPython()\n","sub_path":"test/analyze_tp_2017.py","file_name":"analyze_tp_2017.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"127338382","text":"#!python\nfrom more_or_less import PageOfHeight\nfrom more_or_less.fixed_size_screen import FixedSizeScreen\nfrom more_or_less.input import Input\nfrom more_or_less.more_page_builder import MorePageBuilder\nfrom more_or_less.output import Output\nfrom more_or_less.page_builder import StopOutput\nfrom more_or_less.wrapped_page import WrappedPage\nfrom unittest.mock import Mock\nimport unittest\n\n\nclass TestUtil(unittest.TestCase):\n\n def assertIsPageOfType(self, page, page_type):\n ''' assertIsInstance, but will first strip page-wrappers '''\n page = _skip_page_wrappers(page)\n self.assertIsInstance(page, page_type)\n\n def assertIsPageOfHeight(self, page, height):\n self.assertIsPageOfType(page, PageOfHeight)\n self.assertEqual(height, page.height)\n\n def assertIsFullscreenPage(self, page, screen_height=1000):\n self.assertIsPageOfHeight(page, _page_height_for_screen(screen_height))\n\n def get_more_page_builder(self, output=None, input=None, plugins=None, screen_height=1000):\n return MorePageBuilder(\n input=input or Mock(Input),\n output=output or Mock(Output),\n screen_dimensions=FixedSizeScreen(height=screen_height),\n plugins=plugins,\n )\n\n\nclass TestMorePageBuilder(TestUtil):\n\n def test_build_first_page_returns_page_of_screen_height_minus_one(self):\n screen_height = 10\n builder = self.get_more_page_builder(screen_height=screen_height)\n\n page = builder.build_first_page()\n\n self.assertIsPageOfHeight(page, screen_height - 1)\n\n def test_build_next_page_prompts_user_for_action(self):\n input = Mock(Input)\n input.get_character.return_value = ' '\n builder = self.get_more_page_builder(input=input)\n\n builder.build_next_page()\n\n input.get_character.assert_called_once_with('--More--')\n\n def test_returns_full_screen_page_if_user_presses_space(self):\n screen_height = 10\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input, screen_height=10)\n\n input.get_character.return_value = ' '\n page = builder.build_next_page()\n\n self.assertIsFullscreenPage(page, screen_height)\n\n def test_returns_one_line_page_if_user_presses_enter(self):\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input)\n\n input.get_character.return_value = '\\r'\n page = builder.build_next_page()\n\n self.assertIsPageOfHeight(page, 1)\n\n def test_enter_works_both_on_newline_and_carriage_return(self):\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input)\n\n input.get_character.return_value = '\\n'\n page = builder.build_next_page()\n\n self.assertIsPageOfHeight(page, 1)\n\n def test_stops_output_if_user_presses_q(self):\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input)\n\n input.get_character.return_value = 'q'\n with self.assertRaises(StopOutput):\n builder.build_next_page()\n\n def test_stops_output_if_user_presses_Q(self):\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input)\n\n input.get_character.return_value = 'Q'\n with self.assertRaises(StopOutput):\n builder.build_next_page()\n\n def test_stops_output_on_ctrl_c(self):\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input)\n\n input.get_character.side_effect = KeyboardInterrupt\n\n with self.assertRaises(StopOutput):\n builder.build_next_page()\n\n def test_ignores_unexpected_user_input(self):\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input)\n\n input.get_character.side_effect = ['a', 'b', 'c', '\\r']\n\n builder.build_next_page()\n\n self.assertEqual(4, input.get_character.call_count)\n\n def test_user_can_enter_count_before_enter(self):\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input)\n\n input.get_character.side_effect = ['5', '\\n']\n page = builder.build_next_page()\n\n self.assertIsPageOfHeight(page, 5)\n\n def test_count_becomes_the_new_default_for_enter(self):\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input)\n\n input.get_character.side_effect = ['5', '\\n']\n builder.build_next_page()\n\n input.get_character.side_effect = ['\\n']\n second_page = builder.build_next_page()\n\n self.assertIsPageOfHeight(second_page, 5)\n\n def test_can_specify_count_bigger_than_10(self):\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input)\n\n input.get_character.side_effect = ['5', '0', '0', '\\n']\n page = builder.build_next_page()\n\n self.assertIsPageOfHeight(page, 500)\n\n def test_user_can_enter_count_before_space(self):\n input = Mock(Input)\n builder = self.get_more_page_builder(input=input)\n\n input.get_character.side_effect = ['5', ' ']\n page = builder.build_next_page()\n\n self.assertIsPageOfHeight(page, 5)\n\n def test_count_does_not_become_the_new_default_for_space(self):\n input = Mock(Input)\n screen_height = 666\n builder = self.get_more_page_builder(input=input, screen_height=screen_height)\n\n input.get_character.side_effect = ['5', ' ']\n builder.build_next_page()\n\n input.get_character.side_effect = [' ']\n second_page = builder.build_next_page()\n\n self.assertIsFullscreenPage(second_page, screen_height)\n\n\ndef _page_height_for_screen(screen_height):\n height_reserved_for_more_prompt = 1\n return screen_height - height_reserved_for_more_prompt\n\n\ndef _skip_page_wrappers(page):\n while isinstance(page, WrappedPage):\n page = page.wrapped_page\n return page","sub_path":"tests/test_more_page_builder.py","file_name":"test_more_page_builder.py","file_ext":"py","file_size_in_byte":5857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"271766014","text":"from __future__ import division\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport matplotlib as plt\nimport stock_func as sf\nimport copy, os\nimport variables as v\nimport stock_func as sf\n\ndef init_model(data_dir):\n global stock_df\n stock_df = pd.read_csv(data_dir + v.stock_dataset, header=0)\n global df_full\n global df_input\n global df_target\n df_full, df_input, df_target = sf.get_data(data_dir=data_dir)\n \n global dayofweek \n dayofweek = int(pd.to_datetime(df_full['Date'].get_values()[0]).strftime('%w'))-1\n \n global trnstartpoint\n trnstartpoint = 90\n global trnendpoint\n trnendpoint = len(df_full)-0\n \n global df_val_input\n df_val_input = df_input[trnendpoint:len(df_full)]\n global df_val_target\n df_val_target = df_target[trnendpoint:len(df_full)]\n \n df_input = df_input[trnstartpoint:trnendpoint]\n df_target = df_target[trnstartpoint:trnendpoint]\n \n # Define variables for the number of predictors and targets\n num_predictors = len(df_input.columns)\n num_classes = len(df_target.columns)\n \n # Feed forward neural network with two hidden layers\n #sess = tf.Session()\n tf.reset_default_graph()\n \n # define placeholders forhe data we feed into the process - feature data and actual classes\n global feature_data\n feature_data = tf.placeholder(\"float\", [None, num_predictors], name=\"feature_data\")\n global actual_classes\n actual_classes = tf.placeholder(\"float\", [None, num_classes], name=\"actual_classes\")\n \n # define a matrix of weights and biases and initiale it with some small random values\n weights1 = tf.Variable(tf.truncated_normal([num_predictors,100],stddev=0.0001),name=\"weights1\")\n biases1 = tf.Variable(tf.zeros([100]),name=\"biases1\")\n \n weights2 = tf.Variable(tf.truncated_normal([100,50],stddev=0.0001),name=\"weights2\")\n biases2 = tf.Variable(tf.zeros([50]),name=\"biases2\")\n \n weights3 = tf.Variable(tf.truncated_normal([50,25],stddev=0.0001),name=\"weights3\")\n biases3 = tf.Variable(tf.zeros([25]),name=\"biases3\")\n \n weights4 = tf.Variable(tf.truncated_normal([25,num_classes],stddev=0.0001),name=\"weights4\")\n biases4 = tf.Variable(tf.zeros([num_classes]),name=\"biases4\")\n \n hidden_layer1 = tf.nn.relu(tf.matmul(feature_data,weights1) + biases1, name=\"hidden_layer1\")\n hidden_layer2 = tf.nn.relu(tf.matmul(hidden_layer1,weights2) + biases2, name=\"hidden_layer2\")\n hidden_layer3 = tf.nn.relu(tf.matmul(hidden_layer2,weights3) + biases3, name=\"hidden_layer3\")\n \n # Define our model\n global model\n model = tf.nn.softmax(tf.matmul(hidden_layer3, weights4) + biases4, name=\"model\")\n \n correct_prediction = tf.equal(tf.argmax(model,1), tf.argmax(actual_classes,1),name=\"correct_prediction\")\n global accuracy\n accuracy = tf.reduce_mean(tf.cast(correct_prediction,\"float\"),name=\"accuracy\")\n \n # Define a cost function using cross-entropy\n cost = -tf.reduce_sum(actual_classes*tf.log(model),name=\"cost\")\n \n # Define a training step\n global training_step\n training_step = tf.train.AdamOptimizer(learning_rate=0.0001, name=\"training_step\").minimize(cost)\n \n global saver\n saver = tf.train.Saver(tf.all_variables())","sub_path":"model_tts.py","file_name":"model_tts.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"497599843","text":"# Metadata\n__author__ = 'Andrey Popov'\n__email__ = 'Andrey.Popov@cern.ch'\n\n\n#Taken from https://svnweb.cern.ch/trac/singletop/browser/branches/8TeV/CMSSW/SingleTop/python/EventFilters_cff.py\n\nimport FWCore.ParameterSet.Config as cms\n\n# Function to set the filters up in accordance with the process\ndef ApplyEventFilters(process, goodVertices = 'goodOfflinePrimaryVertices', runOnFastSim = False,\n run53XFilters = True):\n \"\"\" The function initialises a number of filters to reject anomalous events. It packs them into\n sequence 'eventFilterSequence' which is added to the process (but the user has to insert it\n into the appropriate paths). Some of the filters depend on the PAT collections, therefore\n the prepared sequence must be added after the PAT one. All the filters are applied to both\n real data and MC simulation on identical basis.\n\n The arguments:\n\n process: An instance of cms.Process which the filters are added to.\n\n goodVertices: The collection of the selected (\"good\") primary vertices.\n\n runOnFastSim: Indicates whether the code processes a dataset produced with FastSimulation.\n Some of the filters cannot be evaluated in this case and are switched off.\n\n run53XFilters: Indicated whether to run the filters which require information stored in 53X\n only but not in 52X. Since the RelVals are reconstructed in 52X, these filters ought to\n be turned off in test runs.\n \"\"\"\n eventFiltersSequence = cms.Sequence()\n\n\n # Scraping events\n # https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCollisionsDataAnalysis#Recipes_to_get_started\n process.scrapingFilter = cms.EDFilter('FilterOutScraping',\n applyfilter = cms.untracked.bool(True),\n debugOn = cms.untracked.bool(False),\n numtrack = cms.untracked.uint32(10),\n thresh = cms.untracked.double(0.25))\n\n eventFiltersSequence += process.scrapingFilter\n\n\n # CSC beam halo\n # https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFilters#CSC_Beam_Halo_Filter\n if not runOnFastSim:\n process.load('RecoMET.METAnalyzers.CSCHaloFilter_cfi')\n eventFiltersSequence += process.CSCTightHaloFilter\n\n\n # HBHE noise filter. MET TWiki states (*) that requirements for noise isolation are needed despite\n # HBHE noise TWiki switches them off (**)\n # (*) https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFilters#HBHE_Noise_Filter\n # (**) https://twiki.cern.ch/twiki/bin/view/CMS/HBHEAnomalousSignals2011\n if not runOnFastSim:\n process.load('CommonTools.RecoAlgos.HBHENoiseFilter_cfi')\n eventFiltersSequence += process.HBHENoiseFilter\n\n\n # HCAL laser events\n # https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFilters#HCAL_laser_events\n # https://twiki.cern.ch/twiki/bin/view/CMS/PdmVKnowFeatures#HCAL_laser_events_in_prompt_2012\n process.load(\"EventFilter.HcalRawToDigi.hcallasereventfilter2012_cff\")\n\n eventFiltersSequence += process.hcallLaserEvent2012Filter\n\n\n # ECAL dead cell filter\n # https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFilters#ECAL_dead_cell_filter\n process.load('RecoMET.METFilters.EcalDeadCellTriggerPrimitiveFilter_cfi')\n process.EcalDeadCellTriggerPrimitiveFilter.tpDigiCollection = cms.InputTag('ecalTPSkimNA')\n\n process.load('RecoMET.METFilters.EcalDeadCellBoundaryEnergyFilter_cfi')\n process.EcalDeadCellBoundaryEnergyFilter.taggingMode = cms.bool(False)\n process.EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyDeadCellsEB=cms.untracked.double(10)\n process.EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyDeadCellsEE=cms.untracked.double(10)\n process.EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyGapEB=cms.untracked.double(100)\n process.EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyGapEE=cms.untracked.double(100)\n process.EcalDeadCellBoundaryEnergyFilter.enableGap=cms.untracked.bool(False)\n process.EcalDeadCellBoundaryEnergyFilter.limitDeadCellToChannelStatusEB = cms.vint32(12,14)\n process.EcalDeadCellBoundaryEnergyFilter.limitDeadCellToChannelStatusEE = cms.vint32(12,14)\n\n eventFiltersSequence += process.EcalDeadCellTriggerPrimitiveFilter\n # EcalDeadCellBoundaryEnergyFilter is configured but not actually used yet\n\n\n # Tracking failure filter\n # https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFilters#Tracking_failure_filter\n process.load('RecoMET.METFilters.trackingFailureFilter_cfi')\n process.trackingFailureFilter.VertexSource = goodVertices\n eventFiltersSequence += process.trackingFailureFilter\n\n\n # Bad EE supercrystal filter\n # https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFilters#Bad_EE_Supercrystal_filter_added\n process.load('RecoMET.METFilters.eeBadScFilter_cfi')\n eventFiltersSequence += process.eeBadScFilter\n\n\n # Large laser correction calibration filter\n # https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFilters#EB_or_EE_Xtals_with_large_laser\n process.load('RecoMET.METFilters.ecalLaserCorrFilter_cfi')\n eventFiltersSequence += process.ecalLaserCorrFilter\n\n\n # Tracking POG filters\n # https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFilters#Tracking_odd_events_filters_trac\n # https://twiki.cern.ch/twiki/bin/view/CMS/TrackingPOGFilters#Filters\n if not runOnFastSim and run53XFilters:\n process.load('RecoMET.METFilters.trackingPOGFilters_cff')\n eventFiltersSequence += process.trkPOGFilters\n\n\n\n # Add the sequence containing all the filters to the process\n process.eventFiltersSequence = eventFiltersSequence\n","sub_path":"CMSSW/src/SingleTopPolarization/Analysis/python/EventFilters_cff.py","file_name":"EventFilters_cff.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"205453277","text":"from node import Node\n\nclass Queue(object):\n\n def __init__(self):\n self.first = None\n self.last = None\n\n\n def add(self, value):\n if self.first == None:\n self.first = Node(value)\n self.last = self.first\n\n else:\n self.last.next = Node(value)\n self.last = self.last.next\n\n\n def remove(self):\n if self.is_empty():\n return \"empty queue\"\n\n if self.first == self.last:\n data = self.first.value\n self.first = None\n self.last = None\n return data\n\n else:\n data = self.first.value\n self.first = self.first.next\n return data\n\n\n def is_empty(self):\n return self.first == None\n\n\n def peek(self):\n if self.is_empty():\n return \"empty queue\"\n\n return self.last.value\n\n\n def iterate_queue(self, node):\n if node == None:\n return 0\n\n else:\n print(node.value)\n return self.iterate_queue(node.next)\n","sub_path":"CCI_problems/trees_and_graphs/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"472173739","text":"\"\"\"Test the regex module.\"\"\"\nimport pytest\n\nfrom scrapd.core import parsing\nfrom scrapd.core import regex\nfrom tests.core.test_apd import load_test_page\n\n\n@pytest.mark.parametrize('input_,expected', (\n (\"traffic-fatality-50-3\", 2),\n (\"traffic-fatality-73-2\", 1),\n))\ndef test_parse_twitter_description_number_deceased(input_, expected):\n \"\"\"\n Test that the parser finds the right number of deceased people.\n \"\"\"\n page_text = load_test_page(input_)\n twitter_description = regex.match_twitter_description_meta(page_text)\n d = parsing.parse_twitter_description(twitter_description)\n actual = len(d[\"Deceased\"])\n assert actual == expected\n\n\n@pytest.mark.parametrize('input_,expected', (\n ('

Case: 19-0881844

', '19-0881844'),\n ('

Case: 18-3640187

', '18-3640187'),\n ('Case: 19-0161105

', '19-0161105'),\n ('

Case: 18-1591949

', '18-1591949'),\n ('

Case: 18-590287
', '18-590287'),\n))\ndef test_parse_case_field_00(input_, expected):\n \"\"\"Ensure a case field gets parsed correctly.\"\"\"\n actual = regex.match_case_field(input_)\n assert actual == expected\n\n\n@pytest.mark.parametrize(\n 'input_, expected',\n (('', '12'), ))\ndef test_parse_crashes_field_00(input_, expected):\n \"\"\"Ensure the crashes field gets parsed correctly.\"\"\"\n actual = regex.match_crashes_field(input_)\n assert actual == expected\n\n\n@pytest.mark.parametrize('input_,expected',\n (('', 'Traffic Fatality #2'), ))\ndef test_extract_twitter_tittle_meta_00(input_, expected):\n \"\"\"Ensure we can extract the twitter tittle from the meta tag.\"\"\"\n actual = regex.match_twitter_title_meta(input_)\n assert actual == expected\n\n\n@pytest.mark.parametrize('input_,expected', (\n ('',\n 'Case: 18-3551763 Date: December 21, 2018 Time: 8:20 p.m. '\n 'Location: 9500 N Mopac SB'),\n ('', 'Case: 19-0161105'),\n))\ndef test_extract_twitter_description_meta_00(input_, expected):\n \"\"\"Ensure we can extract the twitter tittle from the meta tag.\"\"\"\n actual = regex.match_twitter_description_meta(input_)\n assert actual == expected\n\n\n@pytest.mark.parametrize('input_,expected', (\n (\n '>Location: West William Cannon Drive and Ridge Oak Road

',\n 'West William Cannon Drive and Ridge Oak Road',\n ),\n (\n '>Location: 183 service road westbound and Payton Gin Rd.

',\n '183 service road westbound and Payton Gin Rd.',\n ),\n (\n '

\tLocation: 8900 block of N Capital of Texas Highway

',\n '8900 block of N Capital of Texas Highway ',\n ),\n))\ndef test_parse_location_field_00(input_, expected):\n \"\"\"Ensure.\"\"\"\n actual = regex.match_location_field(input_)\n assert actual == expected\n","sub_path":"tests/core/test_regex.py","file_name":"test_regex.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"557622108","text":"import csv\nimport json\nimport os\nimport re\nimport shutil\n\nnameList = []\npartList = []\nfileName = \"\"\ncatList = dict()\n\nif os.path.exists(\"../assets2\"):\n shutil.rmtree(\"../assets2\");\n\t\nif os.path.exists(\"../json_set1\"):\n shutil.rmtree(\"../json_set1\")\n\t\nif os.path.exists(\"../json_set2\"):\n shutil.rmtree(\"../json_set2\")\n\t\nif os.path.exists(\"../output\"):\n shutil.rmtree(\"../output\")\n\t\nos.mkdir(\"../assets2\")\nos.mkdir(\"../output\")\n\nwith open(\"../assets/catalog_data.csv\", \"r\", encoding=\"utf-8-sig\") as infile:\n reader = csv.DictReader(infile)\n for row in reader:\n if(row['PARTNUMBER'] != ''):\n partList.append(row['PARTNUMBER'])\n if(row['CATENTRY_ID'] != ''):\n catList.update({row['PARTNUMBER'] : row['CATENTRY_ID']})\n if(row['NAME'] != ''):\n nameList.append(row['NAME'])\n\npList = partList\npartList = list(set(partList))\n\nwith open (\"../assets/catalog_data.csv\", \"r\", encoding = \"utf-8-sig\") as infile:\n for row in infile:\n if os.path.exists(\"../output/\"+row.partition(\",\")[0]+\".txt\"):\n mode = 'a'\n else:\n mode = 'w'\n with open (\"../output/\"+row.partition(\",\")[0]+\".txt\", mode) as outfile:\n if (mode == 'w'):\n outfile.write(\"PARTNUMBER,CATENTRY_ID,BRAND,CATEGORY,NAME,ATTRIBUTENAME,ATTRIBUTE,SHORTDESCRIPTION,LONGDESCRIPTION,OFFERPRICE\")\n outfile.write(\"\\n\")\n outfile.write(str(row))\nwith open (\"../assets/2.txt\", \"r\", encoding = \"utf-8-sig\") as infile:\n for row in infile:\n for i in range(len(nameList)):\n if nameList[i] in row:\n fileName = nameList[i]\n if os.path.exists(\"../assets2/\"+fileName+\".txt\"):\n mode = 'a'\n else:\n mode = 'w'\n with open (\"../assets2/\"+fileName+\".txt\", mode) as outfile:\n outfile.write(str(row))\n\npartNamePair = dict()\nfor i in range (len(partList)):\n try:\n with open (\"../output/\"+partList[i]+\".txt\", \"r\", encoding = \"utf-8-sig\") as infile:\n reader = csv.DictReader(infile)\n for row in reader:\n partNamePair.update({row['PARTNUMBER'] : row['NAME']})\n except OSError:\n pass\n#partNamePairUnique = {}\n\n#for key,value in partNamePair.items():\n# if value not in partNamePairUnique.values():\n# partNamePairUnique[key] = value\n\nparentCatentryList = {}\n\nfor i in range (len(partList)):\n with open (\"../output/\"+partList[i]+\".txt\", \"a\") as outfile:\n try:\n #print(partNamePairUnique.get(partList[i], \"duck\"))\n with open (\"../assets2/\"+partNamePair.get(partList[i], \"duck\")+\".txt\", \"r\", encoding = \"utf-8-sig\") as infile:\n for row in infile:\n parentCatentryList.update({partList[i] : row.partition(\",\")[0]})\n row = row.replace(row.partition(\",\")[0], partList[i])\n row = row.replace(row.split(\",\",2)[1], catList.get(partList[i]))\n outfile.write(row)\n except OSError:\n pass\n\njsonNest = {}\n\nfor i in range (len(partList)):\n with open (\"../output/\"+partList[i]+\".txt\", \"r\", encoding = \"utf-8-sig\") as infile:\n reader = csv.DictReader(infile)\n jsonNest = {}\n for row in reader:\n count = pList.count(partList[i])\n if jsonNest is not None:\n jsonNest[row['ATTRIBUTENAME']] = row['ATTRIBUTE']\n count = count-1\n if (count == 1): \n del row['ATTRIBUTE']\n del row['ATTRIBUTENAME']\n #row['MRP'] = float(row['MRP'])\n row['OFFERPRICE'] = float(row['OFFERPRICE'])\n row['ATTRIBUTES'] = jsonNest\n row['PARENTPARTNUMBER'] = parentCatentryList.get(partList[i],\"duck\")\n if row['PARENTPARTNUMBER'] == \"duck\":\n if len(row['PARTNUMBER'].rsplit(\"_\", 1)[-1]) == 4:\n row['PARENTPARTNUMBER'] = row['PARTNUMBER']\n else:\n row['PARENTPARTNUMBER'] = row['PARTNUMBER'][:-2]\n try:\n os.makedirs(\"../json_set1/\" + row['CATEGORY'])\n except OSError:\n pass\n with open (\"../json_set1/\" + row['CATEGORY']+\"/\" + row['PARTNUMBER'] + \".json\", \"w\") as outfile:\n json.dump(row, outfile, sort_keys=True, indent=4, ensure_ascii=False)\n\njsonNest = {}\n\nfor i in range (len(partList)):\n with open (\"../output/\"+partList[i]+\".txt\", \"r\", encoding = \"utf-8-sig\") as infile:\n reader = csv.DictReader(infile)\n jsonNest = {}\n strMod = []\n for row in reader:\n count = pList.count(partList[i])\n if jsonNest is not None:\n jsonNest[row['ATTRIBUTENAME']] = row['ATTRIBUTE']\n count = count-1\n if (count == 1): \n del row['ATTRIBUTE']\n del row['ATTRIBUTENAME']\n #row['MRP'] = float(row['MRP'])\n row['OFFERPRICE'] = float(row['OFFERPRICE'])\n row['ATTRIBUTES'] = jsonNest\n row['PARENTPARTNUMBER'] = parentCatentryList.get(partList[i],\"duck\")\n jsonMod = jsonNest.copy()\n s = \"\"\n for k,v in jsonMod.items():\n try:\n if re.search('color', k, re.IGNORECASE):\n jsonMod['color'] = jsonMod.pop(k)\n if re.search('size', k, re.IGNORECASE):\n jsonMod['size'] = jsonMod.pop(k)\n if re.search('brand', k, re.IGNORECASE):\n jsonMod['brand'] = jsonMod.pop(k)\n if re.search('type', k, re.IGNORECASE):\n jsonMod['type'] = jsonMod.pop(k)\n if re.search('style', k, re.IGNORECASE):\n jsonMod['style'] = jsonMod.pop(k)\n if re.search('volts', k, re.IGNORECASE):\n jsonMod['volts'] = jsonMod.pop(k)\n if re.search('watts', k, re.IGNORECASE):\n jsonMod['watts'] = jsonMod.pop(k)\n if re.search('category', k, re.IGNORECASE):\n jsonMod['category'] = jsonMod.pop(k)\n if re.search('speed', k, re.IGNORECASE):\n jsonMod['speed'] = jsonMod.pop(k)\n k.replace(\"Kitchenware\", \"\")\n except KeyError:\n pass\n stratt = \"\"\n for k,v in jsonMod.items():\n stratt = v + \" \" + k\n strMod.append(stratt)\n row['MODATTRIBUTES'] = str(set(strMod)).replace(\"{\",\"\").replace(\"}\",\"\")\n if row['PARENTPARTNUMBER'] == \"duck\":\n if len(row['PARTNUMBER'].rsplit(\"_\", 1)[-1]) == 4:\n row['PARENTPARTNUMBER'] = row['PARTNUMBER']\n else:\n row['PARENTPARTNUMBER'] = row['PARTNUMBER'][:-2]\n try:\n os.makedirs(\"../json_set2/\" + row['CATEGORY'])\n except OSError:\n pass\n with open (\"../json_set2/\" + row['CATEGORY']+\"/\" + row['PARTNUMBER'] + \".json\", \"w\") as outfile:\n json.dump(row, outfile, sort_keys=True, indent=4, ensure_ascii=False)\n \n","sub_path":"CSVGen_NRF/src/src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"29065774","text":"import unittest\nimport json\nimport server\n\n# Fake\nauthHeaders = [(\n 'cookie',\n '_shibsession_64656661756c7468747470733a2f2f706f6f7272696368617264736c69737448c36f6d2f73686962695c6c657468=_ddb1128649n08aa8e7a462de9970df3e'\n)]\nAUTH_TOKEN = b'5e625cf41e3b7838c79b49d890a203c568a44c3b27362b0a06ab6f08bec8f677'\n\n\nclass AuthApiTests(unittest.TestCase):\n def setUp(self):\n server.app.config['TESTING'] = True\n\n def testAuth(self):\n with server.app.test_request_context(headers=authHeaders):\n authToken = server.auth.auth()\n self.assertEquals(AUTH_TOKEN, authToken)\n\n def testTokenValidation(self):\n with server.app.test_request_context(headers=authHeaders):\n server.auth.auth()\n res = json.loads(\n server.auth.validate(AUTH_TOKEN).data.decode('utf8'))\n self.assertEquals(res['status'], 'valid')\n\n def testInvalidTokenValidation(self):\n with server.app.test_request_context(headers=authHeaders):\n server.auth.auth()\n res = json.loads(\n server.auth.validate(\"badtoken\")[0].data.decode('utf8'))\n self.assertEquals(res['status'], 'invalid')\n\n def testTokenValidationNoHttps(self):\n with server.app.test_request_context(headers=authHeaders):\n server.app.config['TESTING'] = False\n server.auth.auth()\n res = json.loads(\n server.auth.validate(AUTH_TOKEN)[0].data.decode('utf8'))\n self.assertEquals(res['status'], 'insecure access over http')\n","sub_path":"tests/auth_tests.py","file_name":"auth_tests.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"621531312","text":"import json\nfrom src.vppNode.Optimizer import Optimizer\nfrom .test_VppInterface import *\nimport pprint\nimport os\nfrom src.Utils.utils import NodeResults\n# import pyperclip\n# from src.Models.Models import VppNodeModel\n\n# init\noptimizer = Optimizer(interf)\noutputFileName = 'output/test_Optimizer.json'\n\n#test\nif __name__ == '__main__':\n optimizer.optimize()\n optimizer.distribute_results()\n # data = interf.get_graph_asJson()\n if input('dump data in output/test_Optimizer.json (yes/no): ').lower()[0] == 'y':\n os.makedirs(os.path.dirname(outputFileName), exist_ok=True)\n with open(outputFileName, 'w') as fp:\n json.dump(vppNode.to_dict(), fp=fp, indent=4, sort_keys=True)\n node_results = NodeResults(vppNode)\n node_results.to_excel(buy=True, sell=True)\n else:\n print('ok then')\n # print(data)\n # pyperclip.copy(data)\n \n # model = VppNodeModel()\n # model.insert_vppnode(vppNode)\n\nprint('\\nOFV: ', vppNode.OFV)\n","sub_path":"tests/VPP/test_Optimizer.py","file_name":"test_Optimizer.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"32205838","text":"# https://www.hackerrank.com/challenges/climbing-the-leaderboard/problem\n\ndef bSearch(l,s,e,v):#l is in reverse order\n if e>=s:\n mid = s+int((e-s)/2)\n if l[mid]==v:\n return mid\n elif l[mid]>v:\n # looking into right sub array\n return bSearch(l,mid+1,e,v)\n elif v>l[mid]:\n # left sub array\n return bSearch(l,s,mid-1,v)\n else:\n if l[s]=scr[0]:\n pos.append(0)\n elif i-1>=0 and ascr[i]==ascr[i-1]:\n pos.append(pos[-1])\n else:\n if len(pos)!=0:\n p= bSearch(scr,0,pos[-1]-1,ascr[i])\n else:\n p= bSearch(scr,0,len(scr)-1,ascr[i])\n pos.append(p)\n\n \nfor i in pos:\n print(i+1)","sub_path":"Random/climbLeaderboard.py","file_name":"climbLeaderboard.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"68426941","text":"import configparser\n\n\nclass Config:\n def __init__(self, config_file):\n self.config_file = config_file\n config = configparser.ConfigParser()\n\n if not config.read(config_file, encoding='utf-8'):\n print('[Console] Config file not found')\n\n config = configparser.ConfigParser(interpolation=None)\n config.read(config_file, encoding='utf-8')\n\n self._token = config.get('BOT', 'token', fallback=DefaultConfigs.token)\n\n self.auth = None\n\n self.server = config.get('MUSIC', 'server_id', fallback=DefaultConfigs.server_id)\n self.channel = config.get('MUSIC', 'channel_id', fallback=DefaultConfigs.channel_id)\n self.chat = config.get('MUSIC', 'music_chat', fallback=DefaultConfigs.music_chat)\n\n self.top_role = config.get('ROLE', 'top_role', fallback=DefaultConfigs.top_role)\n\n self.run_checks()\n\n def run_checks(self):\n if not self._token:\n print('[Console] Token is not found')\n else:\n self.auth = self._token\n\n\nclass DefaultConfigs:\n token = None\n\n server_id = None\n channel_id = None\n music_chat = None\n\n top_role = 'Admin'\n\n options_file = 'config/option.ini'\n","sub_path":"lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"200293634","text":"from math import math,exp,cos,sin,radians\n\ndef mph_to_mps(x):\n return x*0.447\n\ndef drag_force(velocity):\n #Returns the force on a baseball due to air drag at a particular velocity\n return (0.0039+0.0058/(1.0 + exp((velocity-35.0)/5.0))) * velocity\n\nv = mph_to_mps(110.0)\ny=1\nx=0\ndt=0.1\ntheta = radians(35)\n\ndef solve(x,y,vel,v_wind,launch_abgle):\n xs=[]\n ys=[]\n v_x = vel*cos(launch_angle)\n v_y = vel*sin(launch_angle)\n\n while (y>=0):\n #Euler equations\n x += v_x*dt\n y += v_y*dt\n\n #Force due to air drag\n velocity = sqrt((v_x - v_wind)**2 + v_y**2)\n F = drag_force(velocity)\n\n #Euler equations for vx and vy\n v_x = v_x + F*(v_x - v_wind)*dt\n v_y = v_y - 9.8*dt - F*v_y*dt\n xs.append(x)\n ys.append(y)\n return xs,ys\n\nx,y = solve(x=0,y=1,vel=v,v_wind=0,launch_angle=theta)\nplt.scatter(x,y)\n","sub_path":"ballwithdrag.py","file_name":"ballwithdrag.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"499540899","text":"import requests\nfrom bs4 import BeautifulSoup\n\nresp = requests.get('http://jwlin.github.io/py-scraping-analysis-book/ch2/table/table.html')\nsoup = BeautifulSoup(resp.text, 'html.parser')\n\n# 儲存課程價錢list\n# 取得所有課程售價 : 方法一, 使用index\n\n# rows = soup.find('table', 'table').tbody.find_all('tr')\n# for row in rows:\n # price = row.find_all('td')[2].text # 第3個(index為2)\n # prices.append(int(price))\n # print(sum(prices)/len(prices)) # 計算課程均價\n\n# 取得每一列所有的欄位資訊 : find_all('td') or row.chlidren\nrows = soup.find('table', 'table').tbody.find_all('tr')\nfor row in rows:\n # method.1 find_all('td')\n all_tds = row.find_all('td')\n # method.2 找出row() 所有的直接(下一層) children\n all_tds = [td for td in row.children]\n # get href Attributes before check presence\n if 'href' in all_tds[3].a.attrs:\n href = all_tds[3].a['href']\n else:\n href = None\n print(all_tds[0].text, all_tds[1].text, all_tds[2].text, href, all_tds[3].a.img['src'])","sub_path":"navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"151654843","text":"\"\"\"\nVelbus sensors.\n\nFor more details about this platform, please refer to the documentation\nhttps://home-assistant.io/components/sensor.velbus/\n\"\"\"\nimport logging\n\nfrom homeassistant.components.velbus import (\n DOMAIN as VELBUS_DOMAIN, VelbusEntity)\n\n_LOGGER = logging.getLogger(__name__)\n\nDEPENDENCIES = ['velbus']\n\n\nasync def async_setup_platform(hass, config, async_add_entities,\n discovery_info=None):\n \"\"\"Set up the Velbus temp sensor platform.\"\"\"\n if discovery_info is None:\n return\n sensors = []\n for sensor in discovery_info:\n module = hass.data[VELBUS_DOMAIN].get_module(sensor[0])\n channel = sensor[1]\n sensors.append(VelbusSensor(module, channel))\n async_add_entities(sensors)\n\n\nclass VelbusSensor(VelbusEntity):\n \"\"\"Representation of a sensor.\"\"\"\n\n @property\n def device_class(self):\n \"\"\"Return the device class of the sensor.\"\"\"\n return self._module.get_class(self._channel)\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._module.get_state(self._channel)\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit this state is expressed in.\"\"\"\n return self._module.get_unit(self._channel)\n","sub_path":"homeassistant/components/sensor/velbus.py","file_name":"velbus.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"60883611","text":"import csv\r\nimport os\r\nimport os.path\r\nfrom time import time\r\nfrom pyspark import SparkContext, SparkConf\r\nAPP_NAME = \"what is popular\"\r\n\r\ndef To_CSV(records):\r\n \"\"\"Write out CSV lines\"\"\"\r\n with open(\r\n os.path.join(r\"F:/Do an chuyen nganh/TT-TeamRC/DEMO-PROJECT.git/trunk/Save/RECOMMENDATION_SIMILARITY_CSV/\",\r\n \"What_is_popular\" + '.csv'), \"w\",\r\n newline='') as csvfile:\r\n writer = csv.writer(csvfile, delimiter=',',\r\n quotechar=',', quoting=csv.QUOTE_MINIMAL)\r\n writer.writerows(records)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n conf = SparkConf().setAppName(APP_NAME)\r\n conf.set(\"spark.akka.timeout\", \"200s\")\r\n conf = conf.setMaster(\"local[*]\")\r\n sc= SparkContext(conf=conf)\r\n # sc = SparkContext(appName=\"what is popular\")\r\n t0 = time()\r\n movies_raw_data = sc.textFile(\"F:/Do an chuyen nganh/TT-TeamRC/DEMO-PROJECT.git/trunk/Metadata/IMDBMovies.csv\")\r\n small_ratings_raw_data_header = movies_raw_data.take(1)[0]\r\n movies_data = movies_raw_data.filter(lambda line: line != small_ratings_raw_data_header).map(\r\n lambda line: line.split(\",\")).map(\r\n lambda token: (int(token[0]), token[1]))\r\n events_raw_data = sc.textFile(\"F:/Do an chuyen nganh/TT-TeamRC/DEMO-PROJECT.git/trunk/UserProfile/userevent.csv\")\r\n other = sc.parallelize([1])\r\n events_data = events_raw_data.map(lambda line: line.split(\";\")).map(lambda x: (int(x[1]))).cartesian(other)\r\n # movies_data = movies_data.cartesian(other)\r\n\r\n print(movies_data.take(10))\r\n # print(events_data.collect())\r\n # print(result)\r\n # rs =events_data.join(movies_data)\r\n # print(rs.take(5))\r\n result = events_data.reduceByKey(lambda x, y: (x + y)).takeOrdered(25, key=lambda x: -x[1])\r\n # result = events_data.reduceByKey(lambda x, y: (x + y))\r\n print(result)\r\n list = []\r\n for rs in result:\r\n for x in movies_data.collect():\r\n if(rs[0] == x[0]):\r\n array=[]\r\n array.append(x[1])\r\n array.append(rs[1])\r\n list.append(array)\r\n break\r\n print(list)\r\n # print(result)\r\n To_CSV(list)\r\n tt = time() - t0\r\n print(\"Completed collect! It take %s\" % round(tt, 3))\r\n # print(result)\r\n\r\n # result = events_data.countByValue().items()\r\n # result = result.map(lambda x: (int(x[0]),int[x[1]]))\r\n # result = result.takeOrdered(25,key=lambda x:-x[0])\r\n # print(events_data)\r\n # To_CSV(events_data)\r\n sc.stop()\r\n","sub_path":"WhatIsPopular/what is popular.py","file_name":"what is popular.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"165285691","text":"#!/usr/bin/env python\n\n\"\"\"\nThis is the basis for other scripts to run specialized sets of batches.\nSee run_glance and run_cadence for examples.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport argparse\nimport matplotlib\nmatplotlib.use('Agg')\nimport lsst.sims.maf.db as db\nimport lsst.sims.maf.metricBundles as mb\nimport lsst.sims.maf.batches as batches\nimport lsst.sims.maf.utils as mafUtils\n\n\n\n\ndef connectDb(dbfile):\n version = db.testOpsimVersion(dbfile)\n if version == \"Unknown\":\n opsdb = db.Database(dbfile)\n colmap = batches.ColMapDict('barebones')\n elif version == \"V3\":\n opsdb = db.OpsimDatabaseV3(dbfile)\n colmap = batches.ColMapDict('OpsimV3')\n elif version == \"V4\":\n opsdb = db.OpsimDatabaseV4(dbfile)\n colmap = batches.ColMapDict('OpsimV4')\n return opsdb, colmap\n\n\ndef setSQL(opsdb, sqlConstraint=None):\n # Fetch the proposal ID values from the database\n propids, proptags = opsdb.fetchPropInfo()\n # Construct a WFD SQL where clause so multiple propIDs can query by WFD:\n wfdWhere = opsdb.createSQLWhere('WFD', proptags)\n ddWhere = opsdb.createSQLWhere('DD', proptags)\n if sqlConstraint is not None:\n wfdWhere = '(%s) and (%s)' % (sqlConstraint, wfdWhere)\n ddWhere = '(%s) and (%s)' % (sqlConstraint, ddWhere)\n sqltags = {'WFD': wfdWhere, 'DD': ddWhere}\n return (propids, proptags, sqltags)\n\n\ndef run(bdict, opsdb, colmap, args):\n resultsDb = db.ResultsDb(outDir=args.outDir)\n group = mb.MetricBundleGroup(bdict, opsdb, outDir=args.outDir, resultsDb=resultsDb)\n group.runAll()\n group.plotAll()\n resultsDb.close()\n mafUtils.writeConfigs(opsdb, args.outDir)\n\n\ndef replot(bdict, opsdb, colmap, args):\n resultsDb = db.ResultsDb(outDir=args.outDir)\n group = mb.MetricBundleGroup(bdict, opsdb, outDir=args.outDir, resultsDb=resultsDb)\n group.readAll()\n group.plotAll()\n resultsDb.close()\n\n\ndef parseArgs(subdir='out', parser=None):\n if parser is None:\n # Let the user set up their own argparse Parser, in case they need to add new args.\n parser = argparse.ArgumentParser(description=\"Run or replot a set of metric bundles.\")\n # Things we always need.\n parser.add_argument(\"dbfile\", type=str, help=\"Sqlite file of observations (full path).\")\n parser.add_argument(\"--runName\", type=str, default=None, help=\"Run name.\"\n \"Default is based on dbfile name.\")\n parser.add_argument(\"--outDir\", type=str, default=None, help=\"Output directory.\"\n \"Default is runName/%s.\" % (subdir))\n parser.add_argument('--plotOnly', dest='plotOnly', action='store_true',\n default=False, help=\"Reload the metric values from disk and re-plot them.\")\n parser.add_argument('--sqlConstraint', type=str, default=None,\n help=\"SQL constraint to apply to all metrics. \"\n \" e.g.: 'night <= 365' or 'propId = 5' \"\n \" (**may not work with slew batches)\")\n args = parser.parse_args()\n\n if args.runName is None:\n args.runName = os.path.basename(args.dbfile).replace('_sqlite.db', '')\n args.runName = args.runName.replace('.db', '')\n if args.outDir is None:\n args.outDir = os.path.join(args.runName, subdir)\n return args\n\n\nif __name__ == '__main__':\n args = parseArgs()\n opsdb, colmap = connectDb(args.dbfile)\n bdict = setBatches(opsdb, colmap, args)\n if args.plotOnly:\n replot(bdict, opsdb, colmap, args)\n else:\n run(bdict, opsdb, colmap, args)\n opsdb.close()\n","sub_path":"bin.src/run_generic.py","file_name":"run_generic.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"554202617","text":"# coding:utf-8\n\nimport sys\n\nN = int(input())\na = list(map(int, input().split()))\n\nb = []\nfor i in range(N):\n b.append(i+1)\nif sorted(a)!=b:\n print(\"MuriyarokonnNaN\")\n sys.exit()\ncnt = 0\nans = []\n\nfor i in range(N-1, 0, -1):\n if cnt>100000:\n break\n while a[i]!=i+1 and i>0:\n k = a.index(i+1)\n # print(a)\n # print(i,k,i-k, a[i], a[k])\n if i-k==a[i] or i-k==a[k]:\n a[i], a[k] = a[k], a[i]\n ans.append([i+1, k+1])\n cnt += 1\n i -= 1\n else:\n x = a[i]\n # print(i, i-x, a[i], a[i-x])\n a[i], a[i-x] = a[i-x], a[i]\n ans.append([i+1, i-x+1])\n cnt += 1\nif cnt>100000:\n print(\"MuriyarokonnNaN\")\nelse:\n print(cnt)\n for i in range(cnt):\n print(ans[i][0], ans[i][1])","sub_path":"library_python/AtCoder_Event/ttpc2015/i.py","file_name":"i.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"189826190","text":"import fileinput\r\nimport numpy as np\r\nimport math\r\nfrom scipy import stats\r\n\r\n\r\ndef main():\r\n arrayX = []\r\n arrayY = []\r\n n = 0\r\n i = 0\r\n\r\n for line in fileinput.input():\r\n n += 1\r\n\r\n for line in fileinput.input():\r\n if(i == 0):\r\n xk = int(line)\r\n elif(i <= n/2):\r\n arrayX.append(line)\r\n else:\r\n arrayY.append(line)\r\n i += 1\r\n for i in range(int(n/2)):\r\n arrayX[i] = float(arrayX[i])\r\n arrayY[i] = float(arrayY[i])\r\n\r\n r = correlation(int(n/2), arrayX, arrayY)\r\n b1 = getB1(int(n/2), arrayX, arrayY)\r\n b0 = getB0(int(n/2), b1, arrayX, arrayY)\r\n yk = b0 + b1 * xk\r\n o = stdDeviation(int(n/2), arrayX, arrayY, b0, b1)\r\n x = getX(int(n/2), r)\r\n t = getT(int(n/2), o, arrayX, arrayY)\r\n df = int(n/2) - 2\r\n p = 1 - stats.t.cdf(t,df=df)\r\n print(\"p = {}\".format(p))\r\n\r\n rang = getRange(int(n/2), o, t, xk, arrayX)\r\n upi = yk + rang\r\n lpi = yk - rang\r\n\r\n print(\"r = {}\".format(r))\r\n print(\"r2 = {}\".format(r**2))\r\n print(\"b1 = {}\".format(b1))\r\n print(\"b0 = {}\".format(b0))\r\n print(\"yk = {}\".format(yk))\r\n print(\"range = {}\".format(rang))\r\n print(\"UPI = {}\".format(upi))\r\n print(\"LPI = {}\".format(lpi))\r\n\r\ndef getT(n, o, arrayX, arrayY):\r\n meanX = mean(n, arrayX)\r\n meanY = mean(n, arrayY)\r\n t = (meanX - meanY)/(o*np.sqrt(2/n))\r\n return t\r\n\r\ndef getX(n, rxy):\r\n x = ( abs(rxy) * math.sqrt(n-2) ) / math.sqrt(1 - (rxy ** 2))\r\n return x\r\n\r\n\r\ndef mean(n, arr):\r\n sumArray = 0\r\n for i in range(n):\r\n sumArray +=float(arr[i])\r\n return sumArray/n\r\n\r\ndef correlation(n, arrayX, arrayY):\r\n xi_mean = 0\r\n yi_mean = 0\r\n sum_xi_mean_yi_mean = 0\r\n sum_xi_mean_2 = 0\r\n sum_yi_mean_2 = 0\r\n meanX = mean(n, arrayX)\r\n meanY = mean(n, arrayY)\r\n for i in range(n):\r\n xi_mean =float(arrayX[i]) - meanX\r\n yi_mean =float(arrayY[i]) - meanY\r\n sum_xi_mean_yi_mean += xi_mean * yi_mean\r\n sum_xi_mean_2 += xi_mean ** 2\r\n sum_yi_mean_2 += yi_mean ** 2\r\n r = ( sum_xi_mean_yi_mean / math.sqrt ( sum_xi_mean_2 * sum_yi_mean_2 ) )\r\n return r\r\n\r\ndef getB1(n, arrayX, arrayY):\r\n sum_xiyi = 0\r\n sum_x2 = 0\r\n meanX = mean(n, arrayX)\r\n meanY = mean(n, arrayY)\r\n for i in range(n):\r\n sum_xiyi +=float(arrayX[i]) *float(arrayY[i])\r\n x =float(arrayX[i])\r\n x2 = x ** 2\r\n sum_x2 += x2\r\n b1 = (sum_xiyi - (n * meanX * meanY)) / (sum_x2 - (n * meanX ** 2))\r\n return b1\r\n\r\ndef getB0(n, b1, arrayX, arrayY):\r\n meanX = mean(n, arrayX)\r\n meanY = mean(n, arrayY)\r\n b0 = meanY - (b1 * meanX)\r\n return b0\r\n\r\ndef stdDeviation(n, arrayX, arrayY, b0, b1):\r\n sum_yi_B0_B1Xi = 0\r\n for i in range(n):\r\n sum_yi_B0_B1Xi += (float(arrayY[i]) - b0 - (b1 *float(arrayX[i]))) ** 2\r\n o = math.sqrt( (1/(n-2)) * sum_yi_B0_B1Xi )\r\n return o\r\n\r\ndef getRange(n, o, t, xk, arrayX):\r\n sum_xi_mean2 = 0\r\n meanX = mean(n, arrayX)\r\n t = 1.1081348148903627\r\n for i in range(n):\r\n sum_xi_mean2 = (float(arrayX[i]) - meanX) ** 2\r\n rang = t * o * math.sqrt( ( 1 + 1/n + ( ( ( xk - meanX ) ** 2) / sum_xi_mean2 )))\r\n return rang\r\n\r\n\r\n\"\"\"\r\nMain program\r\n\"\"\"\r\nmain()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"14957634","text":"#!/usr/bin/python3\n\nimport numpy as np\nimport csv\nimport sys\n\nsys.path.append('Class')\nfrom Dataset import *\nfrom Classifier import *\nfrom MultiClassifier import *\nfrom Math import *\nfrom IOHelper import *\n\nfrom sklearn.metrics import accuracy_score\n\n# np.set_printoptions(precision=4)\nnp.set_printoptions(threshold='nan')\nnp.set_printoptions(suppress=True)\n\n#############################################################\n\ndef getHouseByIndex(d, index):\n\thouse = d.getDataset()[index][1]\n\treturn house\n\ndef getIndex(X, querie):\n\tfor i,x in enumerate(X):\n\t\tif x == querie:\n\t\t\treturn int(i+1)\n\treturn -1\n\ndef getInputInDataset(d, index, featuresId, inFloat=False):\n\tX = []\n\tif inFloat == True:\n\t\tfor i in featuresId:\n\t\t\ttmp = d.getDataset(index, i)\n\t\t\tif len(tmp) > 0:\n\t\t\t\tX.append(float(tmp))\n\t\t\telse:\n\t\t\t\t# X.append(float(0))\n\t\t\t\t# X.append(float(d.mean(d.getFeature(i))))\n\t\t\t\treturn None\n\telse:\n\t\tfor i in featuresId:\n\t\t\tX.append(d.getDataset(index, i))\n\n\treturn np.array(X)\n\ndef generateDataset(d, featuresId, index=-1):\n\n\tX = []\n\tY = []\n\n\thouseArray = d.getFeature(1, uniq=True)\n\n\tfor i in range(d.getLength()):\n\t\tx = getInputInDataset(d, i, featuresId, inFloat=True)\n\t\ty = getIndex(houseArray, getHouseByIndex(d, i))\n\n\t\tif x is not None: \n\t\t\tif index == -1 or (y == index):\n\t\t\t\tif len(x) > 0 and y >= 0:\n\t\t\t\t\tX.append(x)\n\t\t\t\t\tY.append(y)\n\n\tX = np.array(X)\n\tY = np.array(Y)\n\tif len(X) != len(Y):\n\t\tprint(\"Error when generate dataset\")\n\t\texit(1)\n\tif len(X) <= 0:\n\t\tprint(\"Error Empty dataset\")\n\t\texit(1)\n\treturn X, Y\n\ndef generatePrediction(allclassifier, X, Y):\n\ty_pred = []\n\ty_true = []\n\n\tfor i,data in enumerate(X):\n\t\toutput = allclassifier.getMax(data) + 1\n\n\t\ty_pred.append(output)\n\t\ty_true.append(Y[i])\n\n\tif len(y_true) != len(y_pred):\n\t\tprint(\"Error when generate prediction\")\n\t\texit(1)\n\n\treturn np.array(y_true), np.array(y_pred)\n\n##############################\n############ MAIN ############\n##############################\n\ndef main():\n\n\tnbInput = 0\n\tepoch = 99999\n\n\tfile = IOHelper().checkArg(sys.argv)\n\tif (len(file) < 2):\n\t\tprint(\"Missing file\")\n\t\texit(1)\n\n\tfeaturesId = range(7, 19)\n\n\t### train\n\td = Dataset()\n\td.loadFile(file[0])\n\n\t# nbInput = len(featuresId)\n\tX, Y = generateDataset(d, featuresId)\n\n\tX, nbInput = d.featureExpand(d, X)\n\tX = d.featureRescale(d, X)\n\n\t### test\n\td_test = Dataset()\n\td_test.loadFile(file[1])\n\n\tX_test, Y_test = generateDataset(d_test, featuresId)\n\n\tX_test, nbInput = d_test.featureExpand(d_test, X_test)\n\tX_test = d_test.featureRescale(d_test, X_test)\n\n\thouseArray = d.getFeature(1, uniq=True)\n\n\tallclassifier = MultiClassifier(nbInput, houseArray)\n\n\tlr = 10.0\n\toldLoss = 9e+9\n\tallclassifier.setLr(lr)\n\n\t# allclassifier.printInfo()\n\n\tfor j in range(epoch):\n\t\tloss = allclassifier.train(X, Y)\n\n\t\tallLoss = loss.sum()\n\n\t\tif abs(allLoss) > abs(oldLoss) and lr > 0.000000001:\n\t\t\tlr /= 10\n\t\t\tprint(\"DECREASE TO \" + str(lr))\n\t\t\tallclassifier.setLr(lr)\n\t\toldLoss = allLoss\n\n\t\tallclassifier.saveWeight()\n\t\t\n\t\t# y_true, y_pred = generatePrediction(allclassifier, X, Y)\n\t\ty_true, y_pred = generatePrediction(allclassifier, X_test, Y_test)\n\n\t\t# print(y_true)\n\t\t# print(y_pred)\n\n\t\tacc = accuracy_score(y_true, y_pred) * 100\n\t\tprint(\"epoch: {0:<15.5g} Loss1: {1:<15.5g} Loss2: {2:<15.5g} Loss3: {3:<15.5g} Loss4: {4:<15.5g} LOSS: {5:<15.5g} Accuracy: {6: UnversionedCoordinate:\n coordinate_parts = coord.split(\":\")\n if len(coordinate_parts) != 2:\n raise ValueError(f\"Invalid coordinate specifier: {coord}\")\n return UnversionedCoordinate(group=coordinate_parts[0], artifact=coordinate_parts[1])\n\n\n@dataclass(frozen=True)\nclass AvailableThirdPartyArtifacts:\n \"\"\"Maps JVM artifact coordinates (with only group and artifact set) to the `Address` of each\n target specifying that coordinate.\"\"\"\n\n artifacts: FrozenDict[UnversionedCoordinate, FrozenOrderedSet[Address]]\n\n def addresses_for_coordinates(\n self, coordinates: Iterable[UnversionedCoordinate]\n ) -> FrozenOrderedSet[Address]:\n candidate_artifact_addresses: Set[Address] = set()\n for coordinate in coordinates:\n candidates = self.artifacts.get(coordinate, FrozenOrderedSet())\n candidate_artifact_addresses.update(candidates)\n return FrozenOrderedSet(candidate_artifact_addresses)\n\n\nclass MutableTrieNode:\n def __init__(self):\n self.children: dict[str, MutableTrieNode] = {}\n self.recursive: bool = False\n self.coordinates: set[UnversionedCoordinate] = set()\n\n def ensure_child(self, name: str) -> MutableTrieNode:\n if name in self.children:\n return self.children[name]\n node = MutableTrieNode()\n self.children[name] = node\n return node\n\n\n@frozen_after_init\nclass FrozenTrieNode:\n def __init__(self, node: MutableTrieNode) -> None:\n children = {}\n for key, child in node.children.items():\n children[key] = FrozenTrieNode(child)\n self._children: FrozenDict[str, FrozenTrieNode] = FrozenDict(children)\n self._recursive: bool = node.recursive\n self._coordinates: FrozenOrderedSet[UnversionedCoordinate] = FrozenOrderedSet(\n node.coordinates\n )\n\n def find_child(self, name: str) -> FrozenTrieNode | None:\n return self._children.get(name)\n\n @property\n def recursive(self) -> bool:\n return self._recursive\n\n @property\n def coordinates(self) -> FrozenOrderedSet[UnversionedCoordinate]:\n return self._coordinates\n\n def __hash__(self) -> int:\n return hash((self._children, self._recursive, self._coordinates))\n\n def __eq__(self, other: Any) -> bool:\n if not isinstance(other, FrozenTrieNode):\n return False\n return (\n self._children == other._children\n and self.recursive == other.recursive\n and self.coordinates == other.coordinates\n )\n\n def __repr__(self):\n return f\"FrozenTrieNode(children={repr(self._children)}, recursive={self._recursive}, coordinate={self._coordinates})\"\n\n\nclass AllJvmArtifactTargets(Targets):\n pass\n\n\n@rule(desc=\"Find all jvm_artifact targets in project\", level=LogLevel.DEBUG)\ndef find_all_jvm_artifact_targets(targets: AllTargets) -> AllJvmArtifactTargets:\n return AllJvmArtifactTargets(\n tgt for tgt in targets if tgt.has_fields((JvmArtifactGroupField, JvmArtifactArtifactField))\n )\n\n\n@dataclass(frozen=True)\nclass ThirdPartyJavaPackageToArtifactMapping:\n mapping_root: FrozenTrieNode\n\n\n@rule\nasync def find_available_third_party_artifacts(\n all_jvm_artifact_tgts: AllJvmArtifactTargets,\n) -> AvailableThirdPartyArtifacts:\n artifact_mapping: dict[UnversionedCoordinate, set[Address]] = defaultdict(set)\n for tgt in all_jvm_artifact_tgts:\n group = tgt[JvmArtifactGroupField].value\n if not group:\n raise ValueError(\n f\"The {JvmArtifactGroupField.alias} field of target {tgt.address} must be set.\"\n )\n\n artifact = tgt[JvmArtifactArtifactField].value\n if not artifact:\n raise ValueError(\n f\"The {JvmArtifactArtifactField.alias} field of target {tgt.address} must be set.\"\n )\n\n key = UnversionedCoordinate(group=group, artifact=artifact)\n artifact_mapping[key].add(tgt.address)\n\n return AvailableThirdPartyArtifacts(\n FrozenDict({key: FrozenOrderedSet(value) for key, value in artifact_mapping.items()})\n )\n\n\n@rule\nasync def compute_java_third_party_artifact_mapping(\n java_infer_subsystem: JavaInferSubsystem,\n) -> ThirdPartyJavaPackageToArtifactMapping:\n def insert(mapping: MutableTrieNode, imp: str, coordinate: UnversionedCoordinate) -> None:\n imp_parts = imp.split(\".\")\n recursive = False\n if imp_parts[-1] == \"**\":\n recursive = True\n imp_parts = imp_parts[0:-1]\n\n current_node = mapping\n for imp_part in imp_parts:\n child_node = current_node.ensure_child(imp_part)\n current_node = child_node\n\n current_node.coordinates.add(coordinate)\n current_node.recursive = recursive\n\n mapping = MutableTrieNode()\n for imp_name, imp_action in {\n **JVM_ARTIFACT_MAPPINGS,\n **java_infer_subsystem.third_party_import_mapping,\n }.items():\n value = UnversionedCoordinate.from_coord_str(imp_action)\n insert(mapping, imp_name, value)\n\n return ThirdPartyJavaPackageToArtifactMapping(FrozenTrieNode(mapping))\n\n\ndef find_artifact_mapping(\n import_name: str,\n mapping: ThirdPartyJavaPackageToArtifactMapping,\n available_artifacts: AvailableThirdPartyArtifacts,\n) -> FrozenOrderedSet[Address]:\n imp_parts = import_name.split(\".\")\n current_node = mapping.mapping_root\n\n found_nodes = []\n for imp_part in imp_parts:\n child_node_opt = current_node.find_child(imp_part)\n if not child_node_opt:\n break\n found_nodes.append(child_node_opt)\n current_node = child_node_opt\n\n if not found_nodes:\n return FrozenOrderedSet()\n\n # If the length of the found nodes equals the number of parts of the package path, then there\n # is an exact match.\n if len(found_nodes) == len(imp_parts):\n addresses = available_artifacts.addresses_for_coordinates(found_nodes[-1].coordinates)\n return addresses\n\n # Otherwise, check for the first found node (in reverse order) to match recursively, and use its coordinate.\n for found_node in reversed(found_nodes):\n if found_node.recursive:\n addresses = available_artifacts.addresses_for_coordinates(found_node.coordinates)\n return addresses\n\n # Nothing matched so return no match.\n return FrozenOrderedSet()\n\n\ndef rules():\n return collect_rules()\n","sub_path":"src/python/pants/backend/java/dependency_inference/artifact_mapper.py","file_name":"artifact_mapper.py","file_ext":"py","file_size_in_byte":7453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"264295154","text":"def sol(depth):\n if depth == M:\n print(' '.join(map(str, out)))\n return\n\n for i in range(N):\n if not visited[i]:\n out.append(i+1)\n sol(depth+1)\n out.pop()\n\n\nN, M = map(int, input().split())\nvisited = [False] * N # 탐사 여부 check\nout = [] # 출력 내용\n\nsol(0)","sub_path":"boj/stepbystep/backtracking/p15651_N과M3.py","file_name":"p15651_N과M3.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"7262071","text":"import os\n\n\ndef find_files(subffix, path):\n if path is None:\n raise ValueError(\"Path should not be None\")\n\n if not os.path.isdir(path):\n print(\"The input path is not a directory\")\n if (os.path.isfile(path)\n and path.endswith(\".{}\".format(subffix))):\n return [path]\n else:\n return []\n\n file_path_list = []\n for sub in os.listdir(path):\n sub_path = os.path.join(path, sub)\n if os.path.isdir(sub_path):\n file_paths = find_files(subffix, sub_path)\n file_path_list.extend(file_paths)\n elif (os.path.isfile(sub_path)\n and sub_path.endswith(\".{}\".format(subffix))):\n file_path_list.append(sub_path)\n return file_path_list\n\n\ndef test_case1():\n test_list = find_files(\"c\", os.path.join(os.getcwd(), r\"File Recursion\\testdir\"))\n\n for path in test_list:\n print(path)\n\n # expected output\n '''\n %current work directory%\\File Recursion\\testdir\\subdir1\\a.c\n %current work directory%\\File Recursion\\testdir\\subdir3\\subsubdir1\\b.c\n %current work directory%\\File Recursion\\testdir\\subdir5\\a.c\n %current work directory%\\File Recursion\\testdir\\t1.c\n '''\n\n\ndef test_case2():\n test_list = find_files(\"b\", os.path.join(os.getcwd(), r\"File Recursion\\testdir\"))\n\n for path in test_list:\n print(path)\n # expected output: empty output\n\n\ndef test_case3():\n test_list = find_files(None, os.path.join(os.getcwd(), r\"File Recursion\\testdir\"))\n\n for path in test_list:\n print(path)\n # expected output: empty output\n\n\ndef test_case4():\n test_list = find_files(\"c\", None)\n\n for path in test_list:\n print(path)\n # expect ValueError message\n '''Path should not be None'''\n\n\ndef test_case5():\n test_list = find_files(\"c\", os.path.join(os.getcwd(), r\"File Recursion\\testdir\\t1.c\"))\n\n for path in test_list:\n print(path)\n # expect output\n '''%current work directory%\\File Recursion\\testdir\\t1.c'''\n\n\nif __name__ == \"__main__\":\n import time\n print(\"***test case 1***\")\n test_case1()\n print(\"\\n\")\n print(\"***test case 2***\")\n time.sleep(1)\n test_case2()\n print(\"\\n\")\n print(\"***test case 3***\")\n time.sleep(1)\n test_case3()\n print(\"\\n\")\n print(\"***test case 4***\")\n time.sleep(1)\n try:\n test_case4()\n except ValueError as er:\n print(er)\n print(\"\\n\")\n print(\"***test case 5***\")\n time.sleep(1)\n try:\n test_case5()\n except ValueError as er:\n print(er)\n","sub_path":"Show Me the Data Structures/File Recursion/problem_2.py","file_name":"problem_2.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"148680843","text":"from PIL import Image, ImageDraw\n\n\nclass Grid:\n def __init__(self, file_path):\n self.capture_image(file_path)\n\n def capture_image(self, file_path):\n screenshot = Image.open(file_path)\n screen_width, screen_height = screenshot.size\n columns = screen_width/30\n rows = screen_height/40\n\n block_width = int(((screen_width - 1) // columns) + 1)\n block_height = int(((screen_height - 1) // rows) + 1)\n\n for y in range(0, screen_height, block_height):\n for x in range(0, screen_width, block_width):\n draw = ImageDraw.Draw(screenshot)\n draw.rectangle((x, y, x + block_width, y + block_height), outline=\"red\")\n\n screenshot.save(\"grid.png\")\n\n def process_region(self, image, x, y, width, height):\n region_total = 0\n\n # This is the sensitivity factor, the larger it is the less sensitive the comparison\n factor = 10\n\n for coordinateY in range(y, y + height):\n for coordinateX in range(x, x + width):\n try:\n pixel = image.getpixel((coordinateX, coordinateY))\n region_total += sum(pixel) / 4\n except:\n return\n\n return region_total / factor\n\n\nGrid('screenshots/screen_TEST.png')\n","sub_path":"Specter/specter_grid.py","file_name":"specter_grid.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"543041136","text":"from sys import argv\n\ndef loadfile (fn):\n\tdata = []\n\twith open(fn) as f:\n\t\tfor line in f:\n\t\t\tdata.append([eval(i) for i in line.split()])\n\treturn data \n\ndef sort(data):\n\tgrp = {}\n\tfor l in data:\n\t\tif l[2] in grp:\n\t\t\tgrp[l[2]].append(l)\n\t\telse:\n\t\t\tgrp[l[2]] = [l]\n\treturn grp\n\ndef findNumNode(grp):\n\tNodeCount = {}\n\tallnodes = []\n\tcount = 0 \n\tfor key in grp:\n\t\tarr = grp[key]\n\t\ttemp = []\n\t\tfor i in arr:\n\t\t\ttemp.append(i[0])\n\t\t\ttemp.append(i[1])\n\t\t\tallnodes.append(i[0])\n\t\t\tallnodes.append(i[1])\n\t\tNodeCount[key] = len(set(temp))\n\t\tcount += len(set(temp))\n\tallnodes = set(allnodes)\n\treturn [allnodes, NodeCount, count]\n\n\n\nif len(argv) > 1:\n\tif argv[1] == \"h\":\n\t\tprint(\"python kddOutPut\")\n\telse:\n\t\tfn = argv[1]\n\t\tdata = loadfile(fn)\n\t\tgrp = sort(data)\n\t\tnodes, Ncount ,count = findNumNode(grp)\n\t\tprint(count - len(nodes))\n\n","sub_path":"partition/baseline/kddTest.py","file_name":"kddTest.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"206424611","text":"# 1. 导包\nimport logging\nimport logging.handlers\n\n\ndef set_log_config():\n # 2. 创建日志器对象 / 设置日志级别\n logger = logging.getLogger() # 默认日志器名称为 root\n # logger = logging.getLogger(\"Jay\") # 自定义日志器名称为 Jay\n logger.setLevel(level=logging.INFO)\n # 3. 创建处理器对象: 输出到控制台 + 文件(按时间切割)\n ls = logging.StreamHandler()\n lf = logging.handlers.TimedRotatingFileHandler(filename=\"./log/tpshop.log\", when=\"d\", backupCount=3) # 按天切割\n # 4. 创建格式器对象\n fmt = \"%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(funcName)s:%(lineno)d] - %(message)s\"\n formatter = logging.Formatter(fmt=fmt)\n # 5. 将格式器添加到处理器\n ls.setFormatter(formatter)\n lf.setFormatter(formatter)\n # 6. 将处理器添加到日志器\n logger.addHandler(ls)\n logger.addHandler(lf)\n","sub_path":"config/logging_config.py","file_name":"logging_config.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"239186333","text":"import logging\nimport multiprocessing as mp\n\ntry:\n from queue import Empty\nexcept ImportError:\n from Queue import Empty\n\nfrom tqdm import tqdm\n\nfrom skeleton_synapses.dto import SkeletonAssociationInput\nfrom skeleton_synapses.helpers.files import write_predictions_synapses, TILE_SIZE\nfrom skeleton_synapses.helpers.images import submit_synapse_slice_data, remap_synapse_slices\nfrom skeleton_synapses.helpers.roi import tile_index_to_bounds, nodes_to_tile_indexes, roi_around_synapse\nfrom skeleton_synapses.constants import TQDM_KWARGS, RESULTS_TIMEOUT_SECONDS\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef populate_tile_input_queue(catmaid, roi_radius_px, workflow_id, node_infos):\n \"\"\"\n Convert node infos into a set of tiles to act on, and populate a queue with that set.\n\n Parameters\n ----------\n catmaid : CatmaidSynapseSuggestionAPI\n roi_radius_px : int\n workflow_id : int\n node_infos : list of NodeInfo\n\n Returns\n -------\n tuple of (mp.Queue, int)\n \"\"\"\n tile_index_set = nodes_to_tile_indexes(node_infos, TILE_SIZE, roi_radius_px)\n\n addressed_tiles = catmaid.get_detected_tiles(workflow_id)\n\n tile_queue = mp.Queue()\n tile_count = 0\n for tile_idx in tqdm(tile_index_set, desc='Populating tile queue', unit='tiles', **TQDM_KWARGS):\n if (tile_idx.x_idx, tile_idx.y_idx, tile_idx.z_idx) in addressed_tiles:\n logger.debug(\"Tile %s has been addressed by this algorithm, skipping\", repr(tile_idx))\n else:\n logger.debug(\"Tile %s has not been addressed, adding to queue\", repr(tile_idx))\n tile_count += 1\n tile_queue.put(tile_idx)\n\n return tile_queue, tile_count\n\n\ndef populate_synapse_queue(catmaid, roi_radius_px, project_workflow_id, stack_info, skeleton_ids):\n \"\"\"\n Given a set of skeleton IDs, find detected synapses near the skeletons and append them to a queue.\n\n Parameters\n ----------\n catmaid : CatmaidSynapseSuggestionAPI\n roi_radius_px : int\n project_workflow_id : int\n stack_info : dict\n skeleton_ids : list of int\n\n Returns\n -------\n tuple of (mp.Queue, int)\n \"\"\"\n synapse_queue = mp.Queue()\n synapse_count = 0\n\n roi_radius_nm = roi_radius_px * stack_info['resolution']['x'] # assumes XY isotropy\n logger.debug('Getting synapses spatially near skeleton {}'.format(skeleton_ids))\n synapses_near_skeleton = catmaid.get_synapses_near_skeletons(skeleton_ids, project_workflow_id, roi_radius_nm)\n logger.debug('Found {} synapse planes near skeleton {}'.format(len(synapses_near_skeleton), skeleton_ids))\n slice_id_tuples = set()\n for synapse in tqdm(synapses_near_skeleton, desc='Populating synapse plane queue', unit='synapse planes',\n **TQDM_KWARGS):\n slice_id_tuple = tuple(synapse['synapse_slice_ids'])\n if slice_id_tuple in slice_id_tuples:\n continue\n\n slice_id_tuples.add(slice_id_tuple)\n roi_xyz = roi_around_synapse(synapse, roi_radius_px)\n\n logger.debug('Getting treenodes in roi {}'.format(roi_xyz))\n item = SkeletonAssociationInput(roi_xyz, slice_id_tuple, synapse['synapse_object_id'])\n logger.debug('Adding {} to neuron segmentation queue'.format(item))\n synapse_queue.put(item)\n synapse_count += 1\n\n return synapse_queue, synapse_count\n\n\nclass QueueOverpopulatedException(Exception):\n pass\n\n\ndef iterate_queue(queue, final_size, queue_name=None, timeout=RESULTS_TIMEOUT_SECONDS):\n \"\"\"\n Yield items from a queue until exhausted, raising a QueueOverpopulatedException if the final index mismatches the\n expected queue length\n\n Parameters\n ----------\n queue : mp.Queue\n final_size : int\n queue_name : str or None\n timeout : float\n\n Yields\n ------\n object\n\n Raises\n ------\n QueueOverpopulatedException\n \"\"\"\n if queue_name is None:\n queue_name = repr(queue)\n for idx in range(final_size):\n logger.debug('Waiting for item {} from queue {} (expect {} more)'.format(idx, queue_name, final_size - idx))\n try:\n item = queue.get(timeout=timeout)\n except Empty:\n logger.exception('Result queue timed out after {} seconds'.format(timeout))\n raise\n logger.debug('Got item {} from queue {}: {} (expect {} more)'.format(idx, queue_name, item, final_size-idx-1))\n yield item\n\n if not queue.empty():\n raise QueueOverpopulatedException(\n 'More enqueued items in {} than expected (expected {}, at least {} more)'.format(\n queue_name, final_size, queue.qsize()\n )\n )\n\n\ndef commit_tilewise_result(tile_size, workflow_id, output_path, catmaid, synapse_detection_output):\n \"\"\"\n Commit the output of detecting synapses on a single tile to CATMAID and an HDF5 file.\n\n Parameters\n ----------\n tile_size : int\n workflow_id : int\n output_path : str or PathLike\n catmaid : CatmaidSynapseSuggestionAPI\n synapse_detection_output : SynapseDetectionOutput\n\n Returns\n -------\n None\n \"\"\"\n tile_idx, predictions_xyc, synapse_cc_xy = synapse_detection_output\n bounds_xyz = tile_index_to_bounds(tile_idx, tile_size)\n\n id_mapping = submit_synapse_slice_data(\n bounds_xyz, predictions_xyc, synapse_cc_xy, tile_idx, catmaid, workflow_id\n )\n\n catmaid.agglomerate_synapses(id_mapping.values())\n\n logger.debug('Got ID mapping from CATMAID:\\n{}'.format(id_mapping))\n\n mapped_synapse_cc_xy = remap_synapse_slices(synapse_cc_xy, id_mapping)\n write_predictions_synapses(output_path, predictions_xyc, mapped_synapse_cc_xy, bounds_xyz)\n\n\ndef commit_tilewise_results_from_queue(\n tile_result_queue, output_path, total_tiles, tile_size, workflow_id, catmaid\n):\n \"\"\"\n Commit all results from the given synapse detection output queue as they become available.\n\n Parameters\n ----------\n tile_result_queue : mp.Queue of SynapseDetectionOutput\n output_path : str or PathLike\n total_tiles : int\n tile_size : int\n workflow_id : int\n catmaid : CatmaidSynapseSuggestionAPI\n \"\"\"\n # raise ValueError('Reached commit_tilewise')\n logger.debug('Entering commit_tilewise_results_from_queue')\n result_iterator = tqdm(\n iterate_queue(tile_result_queue, total_tiles, 'tile_result_queue'),\n desc='Synapse detection', unit='tiles', total=total_tiles, **TQDM_KWARGS\n )\n\n logger.info('Starting to commit tile classification results')\n\n for tile_count, synapse_detection_output in enumerate(result_iterator):\n tilename = 'z{}-y{}-x{}'.format(*synapse_detection_output.tile_idx)\n logger.debug('Committing results from tile {}, {} of {}'.format(tilename, tile_count, total_tiles))\n\n commit_tilewise_result(tile_size, workflow_id, output_path, catmaid, synapse_detection_output)\n\n\ndef commit_node_association_results(project_workflow_id, catmaid, skeleton_association_output_list):\n \"\"\"\n Commit a list of results from skeleton association to catmaid\n\n Parameters\n ----------\n project_workflow_id : int\n catmaid : CatmaidSynapseSuggestionAPI\n skeleton_association_output_list : list of SkeletonAssociationOutput\n \"\"\"\n assoc_tuples = [\n (result.synapse_slice_id, result.node_id, result.contact_px) for result in skeleton_association_output_list\n ]\n\n logger.debug('Node association results are\\n%s', repr(assoc_tuples))\n logger.info('Inserting new slice:treenode mappings')\n if assoc_tuples:\n catmaid.add_synapse_treenode_associations(assoc_tuples, project_workflow_id)\n\n\ndef commit_node_association_results_from_queue(node_result_queue, total_nodes, project_workflow_id, catmaid):\n \"\"\"\n Commit all results from the given skeleton association output queue as they become available\n\n Parameters\n ----------\n node_result_queue : mp.Queue of SkeletonAssociationOutput\n total_nodes : int\n project_workflow_id : int\n catmaid : CatmaidSynapseSuggestionAPI\n \"\"\"\n logger.debug('Committing node association results')\n\n result_list_generator = tqdm(\n iterate_queue(node_result_queue, total_nodes, 'node_result_queue'),\n desc='Synapse-treenode association', unit='synapse planes', total=total_nodes, **TQDM_KWARGS\n )\n\n logger.debug('Getting node association results')\n for result_list in result_list_generator:\n commit_node_association_results(project_workflow_id, catmaid, result_list)\n","sub_path":"skeleton_synapses/parallel/queues.py","file_name":"queues.py","file_ext":"py","file_size_in_byte":8462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"42940614","text":"# Raspberry Pi tutorial 27 Socket Communication 1\r\nimport socket\r\n#from LeadServer import getStoredValue\r\nhost = ''\r\nport = 5560\r\n\r\nstoredValue = \"8\"\r\n\r\ndef setupServer():\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n print(\"Socket created.\")\r\n try:\r\n s.bind((host, port))\r\n except socket.error as msg:\r\n print(msg)\r\n print(\"socket bind complete.\")\r\n return s\r\n\r\n\r\ndef setupConnection(s):\r\n s.listen(1) # allows 1 connection at a time\r\n conn, address = s.accept()\r\n print(\"connected to: \" + address[0] + \":\" + str(address[1]))\r\n return conn\r\n\r\n\r\ndef GET():\r\n global storedValue\r\n reply = storedValue\r\n print(\"replying with \" + str(reply))\r\n return reply\r\n\r\n\r\ndef REPEAT ( dataMessage):\r\n reply = dataMessage[1]\r\n return reply\r\n\r\ndef CONFIRM ( dataMessage):\r\n reply = dataMessage[0]\r\n return reply\r\n\r\n\r\ndef dataTransfer(conn):\r\n # a big loop that sends and recieves data\r\n global storedValue\r\n x=0\r\n while x <=1: # loop twice\r\n data = conn.recv(1024)\r\n data = data.decode('utf-8')\r\n dataMessage = data.split(' ', 1)\r\n command = dataMessage[0]\r\n if command == 'GET':\r\n value = GET()\r\n reply = str(storedValue)\r\n print(\"getting\" + str(storedValue))\r\n x=x+2\r\n elif command == 'REPEAT':\r\n reply = REPEAT(dataMessage)\r\n print(\"repeating\")\r\n elif command == '0' or '1' or '2' or '3' or '4' or '5' or '6' or'7' or '8' or'8' or '10':\r\n print(str(command))\r\n storedValue = str(command)\r\n reply = \"chk\"\r\n x=x+2\r\n elif command == 'EXIT':\r\n print(\" client has disconnected \")\r\n x=x+1\r\n break\r\n elif command == 'KILL':\r\n print(\"Server is shutting down\")\r\n break\r\n else:\r\n print(\"unknown command\")\r\n reply = 'Unknown Command'\r\n # send the reply back to the client\r\n conn.sendall(str.encode(reply))\r\n #print(\"Data has been sent!\")\r\n conn.close()\r\n\r\n\r\nwhile True:\r\n s = setupServer()\r\n\r\n while True:\r\n try:\r\n conn = setupConnection(s)\r\n dataTransfer(conn)\r\n\r\n except:\r\n print(\"exception on while loop\")\r\n conn.close()\r\n s.close()\r\n break\r\n","sub_path":"PlatooningServer/FollowServer.py","file_name":"FollowServer.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"365208946","text":"import discord\nfrom discord.ext import commands\n\nclass infocommands(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.bot_version = bot.bot_version\n self.bot_name = bot.bot_name\n self.author = bot.author\n self.icon_url = bot.icon_url\n self.getDeviceOs = bot.getDeviceOs\n\n @commands.command()\n async def info(self, message):\n embed = discord.Embed(color=0xff0000)\n embed.set_author(name=\"Boomer\", url=\"https://github.com/woosal1337\", icon_url=\"https://i.imgur.com/SxH7Ctg.jpg\")\n embed.set_thumbnail(url=\"https://i.imgur.com/SxH7Ctg.jpg\")\n embed.add_field(name=\"Boomer\", value=\"A BOT who is a Boomer AF\", inline=True)\n embed.add_field(name=\"Github\", value=\"[@woosal1337](https://github.com/woosal1337)\", inline=True)\n embed.add_field(name=\"Instagram\", value=\"[@woosal1337](https://www.instagram.com/woosal1337/)\", inline=True)\n embed.add_field(name=\"Twitter\", value=\"[@woosal1337](https://twitter.com/woosal1337)\", inline=True)\n embed.add_field(name=\"Reddit\", value=\"[@woosal1337](https://www.reddit.com/user/woosal1337)\", inline=True)\n embed.add_field(name=\"Telegram\", value=\"[@woosal1337](https://t.me/woosal1337)\", inline=True)\n embed.add_field(name=\"Author\", value=self.author, inline=True)\n embed.add_field(name=\"Website\", value=\"[woosal1337.me](http://woosal1337.me)\", inline=True)\n embed.set_footer(icon_url=self.icon_url, text=\"A real Boomer BOT\")\n await message.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(infocommands(bot))\n","sub_path":"MEE6 Jr/commands/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"245471186","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 31 21:18:10 2019\n\n@author: anirban-mac\n\"\"\"\n\"\"\"\n145. Binary Tree Postorder Traversal\nGiven a binary tree, return the postorder traversal of its nodes' values.\n\nExample:\n\nInput: [1,null,2,3]\n 1\n \\\n 2\n /\n 3\n\nOutput: [3,2,1]\nFollow up: Recursive solution is trivial, could you do it iteratively?\n\"\"\"\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def stringToTreeNode(self,inputValues):\n \n root = TreeNode(int(inputValues[0]))\n nodeQueue = [root]\n front = 0\n index = 1\n while index < len(inputValues):\n node = nodeQueue[front]\n front = front + 1\n \n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n leftNumber = int(item)\n node.left = TreeNode(leftNumber)\n nodeQueue.append(node.left)\n \n if index >= len(inputValues):\n break\n \n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n rightNumber = int(item)\n node.right = TreeNode(rightNumber)\n nodeQueue.append(node.right)\n return root\n \n def prettyPrintTree(self, node, prefix=\"\", isLeft=True):\n if not node:\n print(\"Empty Tree\")\n return\n \n if node.right:\n self.prettyPrintTree(node.right, prefix + (\"│ \" if isLeft else \" \"), False)\n \n print(prefix + (\"└── \" if isLeft else \"┌── \") + str(node.val))\n \n if node.left:\n self.prettyPrintTree(node.left, prefix + (\" \" if isLeft else \"│ \"), True)\n \n\n def postorderTraversal(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[int]\n \"\"\"\n if root is None: \n return []\n \n stack = [root]\n postOrder = []\n \n while stack:\n root = stack.pop()\n if root:\n postOrder.append(root.val)\n if root.left:\n stack.append(root.left)\n if root.right:\n stack.append(root.right)\n \n return list(reversed(postOrder))\n \n \n \ntreelist = [1,'null',2,3]\ntreeNode = Solution().stringToTreeNode(treelist)\nSolution().prettyPrintTree(treeNode,\"\",True)\nprint(Solution().postorderTraversal(treeNode))","sub_path":"145_postOrderTravelsal.py","file_name":"145_postOrderTravelsal.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"206516833","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\n#COMECE AQUI ABAIXO\n\n#ENTRADA\na=int(input('Primeiro numero apostado: '))\nb=int(input('Segundo numero apostado: '))\nc=int(input('Terceiro numero apostado: '))\nd=int(input('Quarto numero apostado: '))\ng=int(input('Quinto numero apostado: '))\nf=int(input('Sexto numero apostado: '))\nq=int(input('Primeiro numero sorteado: '))\nw=int(input('Segundo numero sorteado: '))\ne=int(input('Terceiro numero sorteado: '))\nr=int(input('Quarto numero sorteado: '))\nt=int(input('Quinto numero sorteado: '))\ny=int(input('Sexto numero sorteado: '))\ncontador=0\n#PROCESSAMENTO\nif a==q or b==q or c==q or d==q or g==q or f==q:\n contador=contador+1\nif a==w or b==w or c==w or d==w or g==w or f==w:\n contador=contador+1\nif a==e or b==e or c==e or d==e or g==e or f==e:\n contador=contador+1\nif a==r or b==r or c==r or d==r or g==r or f==r:\n contador=contador+1\nif a==t or b==t or c==t or d==t or g==t or f==t:\n contador=contador+1\nif a==y or b==y or c==y or d==y or g==y or f==y:\n contador=contador+1\n#SAIDA\nif contador==3:\n print (\"terno\")\nif contador==4:\n print (\"quadra\")\nif contador==5:\n print (\"quina\")\nif contador==6:\n print (\"sena\")\nif contador<3:\n print (\"azar\")","sub_path":"moodledata/vpl_data/32/usersdata/62/10543/submittedfiles/questao2_av1.py","file_name":"questao2_av1.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"574409481","text":"#coding:utf-8\nimport random\nresult_list = []\nfor i in range(50):\n hostname = \"local_%s\"%i\n mac = \"00:0c:2%s:cd:%sb:e%s\"%(random.randint(1,9),random.randint(1,9),random.randint(1,9))\n ip = \"192.168.1.%s\"%i\n sys_type = random.choice([\"Windows\",\"Linux\",\"Mac\"])\n if sys_type == \"Windows\":\n versionList = [\"win95\",\"win98\",\"win2000\",\"win xp\",\"win7\",\"win8\",\"win10\"]\n elif sys_type == \"Linux\":\n versionList = [\"Linux \", \"Ubuntu\", \"OpenSUSE\", \"Fedora\", \"RHEL\", \"CentOS \"]\n else:\n versionList = [\"OS X Mountain Lion\",\"OS X Mavericks\",\"OS X Yosemite\",\"OS X El Capitan\",\"macOS Sierra\",]\n sys_version = random.choice(versionList)\n cpu_count = random.randint(1,4)\n disk = random.choice([\"SEAGATE 200GB SAS 10K 2.5\",\n \"SEAGATE 400GB SAS 10K 2.5\",\n \"SEAGATE 600GB SAS 10K 2.5\"])\n memory = random.choice([\"SAMSUNG DDR4 2400T DDR3 4G\",\n \"SAMSUNG DDR4 2400T DDR3 8G\",\n \"SAMSUNG DDR4 2400T DDR3 16G\",\n \"SAMSUNG DDR4 2400T DDR3 32G\"])\n result = {\n \"hostname\": hostname,\n \"mac\": mac,\n \"ip\": ip,\n \"sys_type\": sys_type,\n \"sys_version\": sys_version,\n \"cpu_count\": cpu_count,\n \"disk\": disk,\n \"memory\": memory\n }\n result_list.append(result)\n\nprint(result_list)","sub_path":"CmdbWeb/testData.py","file_name":"testData.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"69696699","text":"pp,qqq=input().split()\npp=int(pp)\nqqq=int(qqq)\nss=''\nuu=2\nif(pp+qqq<=3):\n for i in range(0,pp+qqq):\n if(i%2!=0):\n ss=ss+'0'\n else:\n ss=ss+'1'\nelse: \n for i in range(0,pp+qqq):\n if(i==uu):\n ss=ss+'0'\n if(uu==qqq):\n uu=uu+2\n else:\n uu=uu+3\n else:\n ss=ss+'1'\nx=len(ss)-1\nif(int(ss[x])==0):\n print('-1') \nelif pp==1 and qqq==2: \n print(\"011\")\nelse:\n print(ss)\n","sub_path":"pro41.py","file_name":"pro41.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"398498804","text":"import os\nimport logging \n\nfrom google.appengine.api import users\nfrom google.appengine.api import memcache\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom models import Bookmark, Tag\n\nclass MainHandler(webapp.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user is None:\n self.redirect(users.create_login_url(self.request.uri))\n \n logout_url = users.create_logout_url(self.request.uri)\n username = user.nickname() if user is not None else \"\"\n urls_query = Bookmark.all()\n last_cursor = memcache.get('bookmark_cursor')\n if last_cursor:\n urls_query.with_cursor(last_cursor)\n urls_query.filter('user =', user)\n urls = urls_query.fetch(10)\n memcache.set('bookmark_cursor', urls_query.cursor())\n logging.error(urls)\n template_values = {'user_name': username, 'logout_url': logout_url, 'urls': urls}\n path = os.path.join(os.path.dirname(__file__), 'templates/index.html')\n self.response.out.write(template.render(path, template_values))\n\n \n","sub_path":"main_handler.py","file_name":"main_handler.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"636358650","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.views.generic import FormView, RedirectView, TemplateView\n\nfrom diaryspace_auth import groups\nfrom diaryspace_auth.forms import SchoolAdminCreationForm\nfrom diaryspace_auth.models import User\nfrom schools.models import School\n\n\nclass SchoolAdminCreationView(FormView):\n template_name = \"diaryspace_auth/signup.html\"\n form_class = SchoolAdminCreationForm\n success_url = \"/\"\n\n def form_valid(self, form):\n school = School.objects.create(\n region=form.cleaned_data[\"region\"],\n city=form.cleaned_data[\"city\"],\n school=form.cleaned_data[\"school\"],\n )\n\n User.objects.create_school_admin(\n email=form.cleaned_data[\"email\"],\n password=form.cleaned_data[\"password\"],\n name=form.cleaned_data[\"name\"],\n surname=form.cleaned_data[\"surname\"],\n patronymic=form.cleaned_data[\"patronymic\"],\n school_id=school.id,\n )\n form.send_registration_mail()\n\n return super().form_valid(form)\n\n\nclass UserHomeRedirect(LoginRequiredMixin, RedirectView):\n def get_redirect_url(self, *args, **kwargs):\n user = self.request.user\n if user.is_admin:\n return \"/admin\"\n return reverse(groups.HOME_URLS[user.group.name])\n","sub_path":"diaryspace/diaryspace_auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"265972981","text":"import pandas as pd\nimport sys\n\n# this script gets the top peaks_num peaks based on the \"pileup\" in each peak\n# pileup is the number of fragment at the peak summit\n\npath = sys.argv[1]\npath_xls_file = sys.argv[2]\npeaks_num = int(sys.argv[3]) # here we select the number of peaks we'd like to take \n\npath_combine = path+path_xls_file\n\npeaks = pd.read_csv(path_combine,sep='\\t',comment='#')\npeaks = peaks[peaks['length']<=500] # takes peaks with lenth less than or equal to 500\n\npeaks_sorted_by_pileup = peaks.sort_values(by=['pileup'],ascending=False)\npeaks_sorted_by_pileup_600 = peaks_sorted_by_pileup.head(peaks_num)\n\n# update the trimming steps in the peak file accordingly\n\nfn = path+'/peaks/peaks_sorted_by_pileup_top_'+str(peaks_num)+'.bed'\n# print('file saved to: \\\"' + fn + '\\\"')\npeaks_sorted_by_pileup_600.to_csv(fn, sep='\\t',index=False,header=False)\n","sub_path":"extract_peaks/extract_peaks.py","file_name":"extract_peaks.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"361848779","text":"\"\"\"\nFind the kth largest element in an unsorted array.\nNote that it is the kth largest element in the sorted order, not the kth distinct element.\n\nFor example,\nGiven [3,2,1,5,6,4] and k = 2, return 5.\n\nNote:\nYou may assume k is always valid, 1 ≤ k ≤ array's length.\n\"\"\"\n\nfrom random import shuffle\n\n\nclass Solution1(object):\n def findKthLargest(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n def findKthSmallest(k, lo, hi):\n if lo == hi: return nums[lo]\n\n pivotIdx = partition(lo, hi)\n if pivotIdx < k:\n return findKthSmallest(k, pivotIdx + 1, hi)\n elif pivotIdx > k:\n return findKthSmallest(k, lo, pivotIdx - 1)\n return nums[k]\n\n def partition(lo, hi):\n i, j = lo + 1, hi\n while i <= j:\n if nums[i] > nums[lo]:\n exch(i, j)\n j -= 1\n else:\n i += 1\n exch(lo, i - 1)\n return i - 1\n\n def exch(i, j):\n nums[i], nums[j] = nums[j], nums[i]\n\n n = len(nums)\n shuffle(nums)\n return findKthSmallest(n - k, 0, n - 1)\n\n\nclass Solution(object):\n def findKthLargest(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n def partition(lo, hi):\n i, j = lo + 1, hi\n while i <= j:\n if nums[i] > nums[lo]:\n exch(i, j)\n j -= 1\n else:\n i += 1\n exch(i - 1, lo)\n return i - 1\n\n def exch(i, j):\n nums[i], nums[j] = nums[j], nums[i]\n\n n = len(nums)\n shuffle(nums)\n k = n - k\n lo, hi = 0, n - 1\n while lo < hi:\n pivotIdx = partition(lo, hi)\n if pivotIdx < k:\n lo = pivotIdx + 1\n elif pivotIdx > k:\n hi = pivotIdx - 1\n else: return nums[pivotIdx]\n return nums[lo]\n","sub_path":"medium/KthLargestElementinanArray.py","file_name":"KthLargestElementinanArray.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"323382158","text":"\"\"\"\nCreated on Sep 12, 2013\n\n@author: clabauve\n\n\"\"\"\n\nfrom types import LexemeTypes\n\n\nclass Lex(object):\n \"\"\"Base lexer object which keeps track of token stream of input text.\n Implement lex function in child class.\n\n \"\"\"\n\n def __init__(self, stream, file_name='STRING', is_debug=False):\n \"\"\"Initializer. Keeps track of position in file and file name.\"\"\"\n self._line_number = 1\n self._previous_column = None\n self._column = 0\n self._file_name = file_name\n self._is_debug = is_debug\n self._string = stream\n self._stream = stream\n self._current_lexeme = None\n self._indent = ''\n self.advance()\n\n @property\n def string(self):\n \"\"\"Returns the entire input string.\"\"\"\n return self._string\n\n @property\n def stream(self):\n \"\"\"Returns the remainder of the input string--the part not yet\n consumed.\n\n \"\"\"\n\n return self._stream\n\n def spawn(self, t, *args):\n \"\"\"Returns a new Lex object of type t with all necessary information\n from self copied over.\n\n :Parameters:\n - t (class) Class of resulting Lex child class\n - *args (object) variadic list of arguments to pass to child class.\n\n :Returns:\n Newly spawned lex object with debug info copied over.\n\n :Return Type:\n t\n\n \"\"\"\n\n lex = t(*args)\n lex._is_debug = self._is_debug\n lex._line_number = self._line_number\n lex._file_name = self._file_name\n return lex\n\n def error(self, message):\n \"\"\"Raises SyntaxError with debug information and the given message.\"\"\"\n\n raise SyntaxError('%s line %s: %s (%s)'\n % (self._file_name, self._line_number, message, self._current_lexeme))\n\n def check(self, t):\n \"\"\"Returns true if the current token matches type t,\n false otherwise.\n\n \"\"\"\n\n return t == self._current_lexeme.type\n\n def match(self, t):\n \"\"\"Raises SyntaxError if current token matches type t, otherwise\n returns the matched token and consumes it, advancing to the next\n token.\n\n \"\"\"\n\n self._match_no_advance(t)\n if self._is_debug:\n self._debug()\n return self.advance()\n\n def _debug(self):\n \"\"\"Prints debug information about the current token.\"\"\"\n\n if (self._current_lexeme.type not in (LexemeTypes.WSP,\n LexemeTypes.LINE_BREAK)):\n print (self._indent + self._current_lexeme.type + ' '\n + repr(self._current_lexeme.value))\n\n def _lex(self):\n \"\"\"Only function to implement in child class. This function should\n use the _getch function to read through the character stream and\n return the next matched token.\n\n \"\"\"\n\n raise NotImplementedError()\n\n def advance(self):\n \"\"\"Consumes the current token and advances to the next one.\n Returns the consumed token.\n\n \"\"\"\n\n tmp = self._current_lexeme\n self._current_lexeme = self._lex()\n return tmp\n\n def _getch(self):\n \"\"\"Returns the next character in the input stream.\n Properly updates internal line number and column number.\n\n \"\"\"\n\n if len(self._stream) == 0:\n return None\n ch = self._stream[0]\n self._previous_column = self._column\n if ch == '\\n':\n self._line_number += 1\n self._column = 0\n else:\n self._column += 1\n #print repr(ch), self._column\n self._stream = self._stream[1:]\n return ch\n\n def _ungetch(self, ch):\n \"\"\"Adds character ch back to the input stream, updating internal line\n and column numbers.\n\n \"\"\"\n\n if ch is not None:\n self._stream = ch + self._stream\n if ch == '\\n':\n if self._previous_column is None:\n raise IOError('Cannot unget over multiple lines')\n self._line_number -= 1\n self._column = self._previous_column\n else:\n self._column -= 1\n self._previous_column = None\n return ch\n\n def _match_no_advance(self, t):\n \"\"\"Raises SyntaxError if current token does not match type t.\"\"\"\n\n if not self.check(t):\n self.error('expected token type %s' % t)\n","sub_path":"Perforce/kkhanna_ubuntu-VirtualBox_4296/package/code_generation/pyyen/pyyen/yang/analyzer/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"175849542","text":"\n\n#calss header\nclass _GLIDE():\n\tdef __init__(self,): \n\t\tself.name = \"GLIDE\"\n\t\tself.definitions = [u'to move easily without stopping and without effort or noise: ', u'to move or progress without difficulty or effort: ', u'to fly by floating on air currents instead of using power from wings or an engine: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_glide.py","file_name":"_glide.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"642468041","text":"from __future__ import unicode_literals\n\nimport math\nimport random\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.utils.translation import ugettext_lazy\nfrom django.views.generic import TemplateView\nfrom django_cradmin import crapp\n\nfrom devilry.apps.core.models import Examiner, RelatedExaminer\nfrom devilry.devilry_admin.views.assignment.students import groupview_base\nfrom devilry.devilry_cradmin import devilry_listbuilder\n\n\nclass SelectMethodView(TemplateView):\n template_name = 'devilry_admin/assignment/examiners/bulk_organize/select_method.django.html'\n\n\nclass RandomOrganizeForm(groupview_base.SelectedGroupsForm):\n selected_relatedexaminers_invalid_choice_message = ugettext_lazy(\n 'You must select at least two examiners.')\n selected_relatedexaminers = forms.ModelMultipleChoiceField(\n queryset=RelatedExaminer.objects.none(),\n widget=forms.CheckboxSelectMultiple(),\n label=ugettext_lazy('Select at least two examiners:'),\n required=False\n )\n\n def __make_relatedexaminer_choices(self, relatedexaminerqueryset):\n return [\n (relatedexaminer.id, relatedexaminer.user.get_full_name())\n for relatedexaminer in relatedexaminerqueryset]\n\n def __init__(self, *args, **kwargs):\n selectable_relatedexaminers_queryset = kwargs.pop('selectable_relatedexaminers_queryset')\n super(RandomOrganizeForm, self).__init__(*args, **kwargs)\n self.fields['selected_relatedexaminers'].queryset = selectable_relatedexaminers_queryset\n self.fields['selected_relatedexaminers'].choices = self.__make_relatedexaminer_choices(\n relatedexaminerqueryset=selectable_relatedexaminers_queryset)\n\n def clean(self):\n cleaned_data = super(RandomOrganizeForm, self).clean()\n selected_relatedexaminers = cleaned_data.get(\"selected_relatedexaminers\")\n if selected_relatedexaminers.count() < 2:\n self.add_error(\n 'selected_relatedexaminers',\n self.selected_relatedexaminers_invalid_choice_message)\n\n\nclass RandomOrganizeTargetRenderer(devilry_listbuilder.assignmentgroup.GroupTargetRenderer):\n def get_with_items_title(self):\n return ugettext_lazy('Select at least two students:')\n\n def get_submit_button_text(self):\n return ugettext_lazy('Randomly assign selected students to selected examiners')\n\n def get_field_layout(self):\n return [\n 'selected_relatedexaminers'\n ]\n\n\nclass RandomView(groupview_base.BaseMultiselectView):\n filterview_name = 'random'\n template_name = 'devilry_admin/assignment/examiners/bulk_organize/random.django.html'\n\n def get_target_renderer_class(self):\n return RandomOrganizeTargetRenderer\n\n def get_form_class(self):\n return RandomOrganizeForm\n\n def __get_relatedexaminerqueryset(self):\n assignment = self.request.cradmin_role\n period = assignment.period\n queryset = RelatedExaminer.objects\\\n .filter(period=period)\\\n .select_related('user')\\\n .exclude(active=False)\n return queryset\n\n def get_form_kwargs(self):\n kwargs = super(RandomView, self).get_form_kwargs()\n kwargs['selectable_relatedexaminers_queryset'] = self.__get_relatedexaminerqueryset()\n return kwargs\n\n def get_success_url(self):\n return self.request.cradmin_instance.reverse_url(\n appname='examineroverview',\n viewname=crapp.INDEXVIEW_NAME)\n\n def __clear_examiners(self, groupqueryset):\n Examiner.objects.filter(assignmentgroup__in=groupqueryset).delete()\n\n def __random_organize_examiners(self, groupqueryset, relatedexaminerqueryset):\n relatedexaminers = list(relatedexaminerqueryset)\n groups = list(groupqueryset)\n max_per_examiner = int(math.ceil(len(groups) / len(relatedexaminers)))\n relatedexaminer_to_count_map = {}\n examiners_to_create = []\n for group in groupqueryset:\n relatedexaminer = random.choice(relatedexaminers)\n if relatedexaminer.id not in relatedexaminer_to_count_map:\n relatedexaminer_to_count_map[relatedexaminer.id] = 0\n relatedexaminer_to_count_map[relatedexaminer.id] += 1\n if relatedexaminer_to_count_map[relatedexaminer.id] > max_per_examiner:\n relatedexaminers.remove(relatedexaminer)\n examiner_to_create = Examiner(relatedexaminer=relatedexaminer, assignmentgroup=group)\n examiners_to_create.append(examiner_to_create)\n Examiner.objects.bulk_create(examiners_to_create)\n\n def form_invalid_add_global_errormessages(self, form):\n super(RandomView, self).form_invalid_add_global_errormessages(form=form)\n if 'selected_relatedexaminers' in form.errors:\n for errormessage in form.errors['selected_relatedexaminers']:\n messages.error(self.request, errormessage)\n\n def form_valid(self, form):\n groupqueryset = form.cleaned_data['selected_items']\n relatedexaminerqueryset = form.cleaned_data['selected_relatedexaminers']\n self.__clear_examiners(groupqueryset=groupqueryset)\n self.__random_organize_examiners(groupqueryset=groupqueryset,\n relatedexaminerqueryset=relatedexaminerqueryset)\n # messages.success(self.request, self.get_success_message(candidatecount=candidatecount))\n return redirect(self.get_success_url())\n\n\nclass ManualAddOrReplaceExaminersForm(groupview_base.SelectedGroupsForm):\n selected_relatedexaminers_required_message = ugettext_lazy(\n 'You must select at least one examiner.')\n selected_relatedexaminers = forms.ModelMultipleChoiceField(\n queryset=RelatedExaminer.objects.none(),\n widget=forms.CheckboxSelectMultiple(),\n label=ugettext_lazy('Select examiners:'),\n required=True,\n error_messages={\n 'required': selected_relatedexaminers_required_message\n }\n )\n\n def __make_relatedexaminer_choices(self, relatedexaminerqueryset):\n return [\n (relatedexaminer.id, relatedexaminer.user.get_full_name())\n for relatedexaminer in relatedexaminerqueryset]\n\n def __init__(self, *args, **kwargs):\n selectable_relatedexaminers_queryset = kwargs.pop('selectable_relatedexaminers_queryset')\n super(ManualAddOrReplaceExaminersForm, self).__init__(*args, **kwargs)\n self.fields['selected_relatedexaminers'].queryset = selectable_relatedexaminers_queryset\n self.fields['selected_relatedexaminers'].choices = self.__make_relatedexaminer_choices(\n relatedexaminerqueryset=selectable_relatedexaminers_queryset)\n\n\nclass ManualAddOrReplaceTargetRenderer(devilry_listbuilder.assignmentgroup.GroupTargetRenderer):\n\n def get_field_layout(self):\n return [\n 'selected_relatedexaminers'\n ]\n\n\nclass BaseManualAddOrReplaceView(groupview_base.BaseMultiselectView):\n def get_form_class(self):\n return ManualAddOrReplaceExaminersForm\n\n def __get_relatedexaminerqueryset(self):\n assignment = self.request.cradmin_role\n period = assignment.period\n queryset = RelatedExaminer.objects\\\n .filter(period=period)\\\n .select_related('user')\\\n .exclude(active=False)\n return queryset\n\n def get_form_kwargs(self):\n kwargs = super(BaseManualAddOrReplaceView, self).get_form_kwargs()\n kwargs['selectable_relatedexaminers_queryset'] = self.__get_relatedexaminerqueryset()\n return kwargs\n\n def get_success_url(self):\n return self.request.get_full_path()\n\n def clear_existing_examiners_from_groups(self, groupqueryset):\n raise NotImplementedError()\n\n def get_ignored_relatedexaminerids_for_group(self, group):\n raise NotImplementedError()\n\n def __add_examiners(self, groupqueryset, relatedexaminerqueryset):\n examiners = []\n groupcount = 0\n candidatecount = 0\n relatedexaminers = list(relatedexaminerqueryset)\n for group in groupqueryset:\n groupcount += 1\n candidatecount += len(group.candidates.all())\n ignored_relatedexaminers_for_group = self.get_ignored_relatedexaminerids_for_group(group=group)\n for relatedexaminer in relatedexaminers:\n if relatedexaminer.id not in ignored_relatedexaminers_for_group:\n examiner = Examiner(assignmentgroup=group,\n relatedexaminer=relatedexaminer)\n examiners.append(examiner)\n Examiner.objects.bulk_create(examiners)\n return groupcount, candidatecount, relatedexaminers\n\n def form_invalid_add_global_errormessages(self, form):\n super(BaseManualAddOrReplaceView, self).form_invalid_add_global_errormessages(form=form)\n if 'selected_relatedexaminers' in form.errors:\n for errormessage in form.errors['selected_relatedexaminers']:\n messages.error(self.request, errormessage)\n\n def get_success_message_formatting_string(self):\n raise NotImplementedError()\n\n def get_success_message(self, candidatecount, relatedexaminers):\n examinernames = [relatedexaminer.user.get_full_name()\n for relatedexaminer in relatedexaminers]\n return self.get_success_message_formatting_string() % {\n 'count': candidatecount,\n 'examinernames': ', '.join(examinernames)\n }\n\n def form_valid(self, form):\n groupqueryset = form.cleaned_data['selected_items']\n relatedexaminerqueryset = form.cleaned_data['selected_relatedexaminers']\n self.clear_existing_examiners_from_groups(groupqueryset=groupqueryset)\n groupcount, candidatecount, relatedexaminers = self.__add_examiners(\n groupqueryset=groupqueryset,\n relatedexaminerqueryset=relatedexaminerqueryset)\n messages.success(self.request, self.get_success_message(candidatecount=candidatecount,\n relatedexaminers=relatedexaminers))\n return redirect(self.get_success_url())\n\n\nclass ManualAddTargetRenderer(ManualAddOrReplaceTargetRenderer):\n def get_submit_button_text(self):\n return ugettext_lazy('Add selected examiners to selected students')\n\n\nclass ManualAddView(BaseManualAddOrReplaceView):\n filterview_name = 'manual-add'\n template_name = 'devilry_admin/assignment/examiners/bulk_organize/manual-add.django.html'\n\n def get_target_renderer_class(self):\n return ManualAddTargetRenderer\n\n def clear_existing_examiners_from_groups(self, groupqueryset):\n pass # We do not clear existing examiners in Add view!\n\n def get_ignored_relatedexaminerids_for_group(self, group):\n # We ignore any examiners currently registered on the group\n return {examiner.relatedexaminer_id for examiner in group.examiners.all()}\n\n def get_success_message_formatting_string(self):\n return ugettext_lazy('Added %(count)s students to %(examinernames)s.')\n\n\nclass ManualReplaceTargetRenderer(ManualAddOrReplaceTargetRenderer):\n def get_submit_button_text(self):\n return ugettext_lazy('Replace selected examiners with current examiners for selected students')\n\n\nclass ManualReplaceView(BaseManualAddOrReplaceView):\n filterview_name = 'manual-replace'\n template_name = 'devilry_admin/assignment/examiners/bulk_organize/manual-replace.django.html'\n\n def get_target_renderer_class(self):\n return ManualReplaceTargetRenderer\n\n def clear_existing_examiners_from_groups(self, groupqueryset):\n # We clear any existing examiners on for selected groups\n Examiner.objects.filter(assignmentgroup__in=groupqueryset).delete()\n\n def get_ignored_relatedexaminerids_for_group(self, group):\n # We do not need to ignore any existing examiners - they are removed\n # by :meth:`.clear_existing_examiners_from_groups`\n return []\n\n def get_success_message_formatting_string(self):\n return ugettext_lazy('Made %(examinernames)s examiner for %(count)s students, replacing any previous '\n 'examiners for those students.')\n\n\nclass App(crapp.App):\n appurls = [\n crapp.Url(r'^$',\n SelectMethodView.as_view(),\n name=crapp.INDEXVIEW_NAME),\n crapp.Url(r'^random/(?P.+)?$',\n RandomView.as_view(),\n name='random'),\n crapp.Url(r'^manual-add/(?P.+)?$',\n ManualAddView.as_view(),\n name='manual-add'),\n crapp.Url(r'^manual-replace/(?P.+)?$',\n ManualReplaceView.as_view(),\n name='manual-replace'),\n ]\n","sub_path":"devilry/devilry_admin/views/assignment/examiners/bulk_organize.py","file_name":"bulk_organize.py","file_ext":"py","file_size_in_byte":12925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"416057499","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 30 13:13:26 2020\n\n@author: hongwei\n\"\"\"\n\nimport networkx as nx\n\nG1 = nx.read_gpickle(\"G_original.gpickle\")\nG2 = nx.read_gpickle(\"G_patched.gpickle\")\n\nprint(len(G1.nodes()), len(G1.edges()))\nprint(len(G2.nodes()), len(G2.edges()))","sub_path":"src/test_gpickle.py","file_name":"test_gpickle.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"300855357","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2019, Pycom Limited.\n#\n# This software is licensed under the GNU GPL version 3 or any\n# later version, with permitted additional terms. For more information\n# see the Pycom Licence v1.0 document supplied with this file, or\n# available at https://www.pycom.io/opensource/licensing\n#\n\nimport machine\nimport math\nimport network\nimport os\nimport time\nimport utime\nimport gc\nimport socket\n\nfrom network import LoRa\nfrom machine import RTC\nfrom machine import SD\n#from L76GNSS import L76GNSS\nfrom L76GNSV4 import L76GNSS\nfrom pytrack import Pytrack\n\n\ntime.sleep(2)\ngc.enable()\n\n\n#setup LoRa\nlora = network.LoRa(mode=LoRa.LORA, region=LoRa.AS923, frequency=923000000, tx_power=14, bandwidth=LoRa.BW_125KHZ, sf=12, preamble=6, coding_rate=LoRa.CODING_4_5, power_mode=LoRa.ALWAYS_ON, tx_iq=False, rx_iq=False, adr=False, public=True, tx_retries=1, device_class=LoRa.CLASS_A)\ns = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\ns.setblocking(False)\n\n# setup rtc\nrtc = machine.RTC()\nrtc.ntp_sync(\"pool.ntp.org\")\nutime.sleep_ms(750)\nprint('\\nRTC Set from NTP to UTC:', rtc.now())\nutime.timezone(7200)\nprint('Adjusted from UTC to EST timezone', utime.localtime(), '\\n')\n\npy = Pytrack()\nl76 = L76GNSS(py, timeout=30)\n\nsd = SD()\nos.mount(sd, '/sd')\n#a=open(\"/sd/lorastats.csv\", \"w\")\n#a.close()\n#b=open(\"/sd/gps-lora.csv\", \"w\")\n#b.close()\n#c=open(\"/sd/gps-record.csv\", \"w\")\n#c.close()\n#import utime; start = utime.ticks_us();diff = time.ticks_diff(time.ticks_us(), start); print(diff)\ni=0\nwhile (True):\n start = utime.ticks_ms();\n l76_coord = l76.coordinates()\n get_loc = l76.get_location()\n\n gps_stats = [get_loc['latitude'],get_loc['longitude'],get_loc['altitude']]\n print(gps_stats)\n rtcnow = rtc.now()\n for r in rtcnow:\n gps_stats.append(r)\n print(gps_stats)\n\n if s.recv(64) == b'Ping':\n i += 1\n print('Received Ping', i)\n lorastats = lora.stats()\n print(lorastats)\n with open(\"/sd/lorastats_290120.csv\", \"a\") as lora_file:\n lora_file.write(\", \".join(str(j) for j in lorastats) + \"\\n\")\n lora_file.close()\n with open('/sd/gps-lora_290120.csv', 'a') as gps_lora:\n #gps_lora.write(\"{} - {} - {} - {}\\n\".format(get_loc['latitude'], get_loc['longitude'],get_loc['altitude'], rtc.now()))\n gps_lora.write(\", \".join(str(k) for k in gps_stats) + \"\\n\")\n\n gps_lora.close()\n\n with open('/sd/gps-record_290120.csv', 'a') as gps_file:\n #gps_file.write(\"{} - {} - {} - {}\\n\".format(get_loc['latitude'], get_loc['longitude'],get_loc['altitude'], rtc.now()))\n gps_file.write(\", \".join(str(k) for k in gps_stats) + \"\\n\")\n\n gps_file.close()\n #f.write(\"{} - {}\\n\".format(coord, rtc.now()))\n print(\"{} - {} - {} - {}\".format(get_loc['latitude'], get_loc['longitude'],get_loc['altitude'], gc.mem_free()))\n time.sleep(5)\n print('Looping')\n","sub_path":"lib/main_290120.py","file_name":"main_290120.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"593339877","text":"from pysqlcipher import dbapi2 as sqlcipher\n#import sqlite3 as sqlcipher\nimport os\n\n\ndef location():\n return os.path.join(os.getcwd())\n\n\nDATABASE = '%s/data.db' % location()\n\n\nclass ManageProxyData:\n def __init__(self):\n conn = sqlcipher.connect(DATABASE)\n conn.executescript('pragma key=\"pazGvFYLRd\"; pragma kdf_iter=64000;')\n cur = conn.cursor()\n cur.execute('''SELECT * FROM configuration WHERE config = ?''', (current_config()[1],))\n user = cur.fetchone()\n conn.close()\n self.data = user\n\n def all(self):\n return self.data\n\n def islocked(self):\n if self.data[12] == 1:\n return True\n else:\n return False\n\n def proxy_ip(self):\n if self.data[6] is None:\n return ''\n else:\n return '%s' % self.data[6]\n\n def proxy_port(self):\n if self.data[7] is None:\n return ''\n else:\n return self.data[7]\n\n def injection_method(self):\n if self.data[5] is None:\n return ''\n else:\n if self.data[5] == 0:\n return 'CONNECT'\n if self.data[5] == 1:\n return 'GET'\n if self.data[5] == 2:\n return 'POST'\n if self.data[5] == 3:\n return 'PUT'\n if self.data[5] == 4:\n return 'HEAD'\n if self.data[5] == 5:\n return 'TRACE'\n if self.data[5] == 6:\n return 'OPTIONS'\n if self.data[5] == 7:\n return 'PATCH'\n if self.data[5] == 8:\n return 'PROPATCH'\n if self.data[5] == 9:\n return 'DELETE'\n\n def request_method(self):\n if self.data[4] is None:\n return ''\n else:\n return self.data[4]\n\n def biquery(self):\n if self.data[15] is None:\n return 0\n else:\n return self.data[15]\n\n def fiquery(self):\n if self.data[14] is None:\n return 0\n else:\n return self.data[14]\n\n def front_query(self):\n if self.data[0] is None:\n return ''\n else:\n return '%s' % self.data[0]\n\n def middle_query(self):\n if self.data[1] is None:\n return ''\n else:\n return '%s' % self.data[1]\n\n def back_query(self):\n if self.data[2] is None:\n return ''\n else:\n return '%s' % self.data[2]\n\n def host_name(self):\n if self.data[3] is None:\n return ''\n else:\n return '%s' % self.data[3]\n\n def host(self):\n if self.data[4] == 0:\n return 'Host'\n else:\n return ''\n\n def online_host(self):\n if self.data[8]:\n return 'X-Online-Host'\n else:\n return ''\n\n def reverse_proxy(self):\n if self.data[9]:\n return 'Proxy-Connection'\n else:\n return ''\n\n def forward_host(self):\n if self.data[10]:\n return 'X-Forward-Host'\n else:\n return ''\n\n def keepalive(self):\n if self.data[11]:\n return 'Keep-Alive'\n else:\n return ''\n\n\ndef connection():\n conn = sqlcipher.connect(DATABASE)\n conn.executescript('pragma key=\"pazGvFYLRd\"; pragma kdf_iter=64000;')\n c = conn.cursor()\n return c, conn\n\n\ndef current_config():\n current = []\n conn = sqlcipher.connect(DATABASE)\n conn.executescript('pragma key=\"pazGvFYLRd\"; pragma kdf_iter=64000;')\n cur = conn.cursor()\n cur.execute('''SELECT * FROM current''')\n current_config_data = cur.fetchone()\n for i in current_config_data:\n current.append(i)\n return current\n\n\ndef openvpn_config():\n conn = sqlcipher.connect(DATABASE)\n conn.executescript('pragma key=\"pazGvFYLRd\"; pragma kdf_iter=64000;')\n cur = conn.cursor()\n cur.execute('''SELECT config,username,password,user_auth FROM openvpn WHERE id = ? ''', (current_config()[0],))\n config = []\n for i in cur.fetchone():\n config.append(i)\n conn.close()\n return config\n\n\ndef wifi_config():\n conn = sqlcipher.connect(DATABASE)\n conn.executescript('pragma key=\"pazGvFYLRd\"; pragma kdf_iter=64000;')\n cur = conn.cursor()\n cur.execute('''SELECT wifi_ssid, wifi_password, wpa, wifi_conf FROM current WHERE id = 1''')\n config = []\n for i in cur.fetchone():\n config.append(i)\n conn.close()\n return config","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"636502545","text":"\"\"\"A bunch of helpful functions that don't really fit into any one class, but are helpful.\"\"\"\nimport re, datetime\n\n\ndef str_to_epoch(length_string):\n \"\"\"Takes a number and letter, and returns a length in integer seconds.\"\"\"\n length_string = length_string.lower()\n multi_dict = {\n 'y': int(3.154e+7),\n 'm': int(2.592e+6),\n 'w': 604800,\n 'd': 86400,\n 'h': 60\n }\n number = re.match(r'\\d+', length_string)\n multi = length_string[-1]\n return multi_dict[multi] * int(number[0])\n\n\ndef epoch_from_now(length):\n \"\"\"Adds 'length' seconds to the current unix timestamp.\"\"\"\n return round(datetime.datetime.now().timestamp()) + length\n\n\ndef frmt_td(time_delta):\n \"\"\"Formats a datetime.timedelta into a 'time remaining' string.\"\"\"\n t = {}\n seconds = round(time_delta.total_seconds())\n if seconds < 0:\n return 'date has been passed'\n t['months'], seconds = divmod(seconds, 2.628e+6)\n t['days'], seconds = divmod(seconds, 86400)\n t['hours'], seconds = divmod(seconds, 3600)\n t['minutes'], seconds = divmod(seconds, 60)\n t_string = ''\n for k,v in t.items():\n if v != 0:\n t_string += f'{int(v)} {k}, '\n return t_string[:-2]\n\n\ndef is_steamid64(steamid64):\n \"\"\"Basic check to see if the given ID is likely a steam ID (not perfect).\"\"\"\n try:\n int(steamid64)\n if len(steamid64) != 17:\n return False\n return True\n except ValueError:\n return False\n","sub_path":"miscellaneous/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"175651029","text":"from datetime import datetime\nfrom typing import List, Union\nfrom fastapi import FastAPI, Depends, Request, Form\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\n\nfrom sqlalchemy.orm import Session\nfrom db import models, schemas\nfrom db.database import SessionLocal, engine\n\nmodels.Base.metadata.create_all(bind=engine)\n\napp = FastAPI()\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\ntemplates = Jinja2Templates(directory=\"templates\")\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n# https://stackoverflow.com/questions/42552696/sqlalchemy-nearest-datetime\n@app.get(\"/news\", response_model=List[schemas.SiteBase])\ndef get_news(date: Union[str, datetime] = datetime.now(),\n db: Session = Depends(get_db),\n site: str = 'tvn24',\n limit: int = 1):\n headline = models.Headline\n if isinstance(date, str):\n date_format = '%Y-%m-%dT%H:%M:%S'\n if len(date) <= 10:\n date += 'T12:00:00'\n else:\n date_format = date_format[:len(date) - 2]\n date = datetime.strptime(date[:19], date_format)\n return db.query(headline).filter(headline.time_stamp <= date).\\\n order_by(headline.time_stamp.desc()).limit(limit).all()\n\n\n@app.post(\"/news\") # , response_model=schemas.NewsOut)\ndef add_news(date: Union[str, datetime] = datetime.now(),\n db: Session = Depends(get_db),\n site: str = 'tvn24'):\n headline = models.Headline(\n headline='test',\n time_stamp=datetime.now(),\n site=site\n )\n db.add(headline)\n db.commit()\n return True\n\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def root(request: Request):\n return templates.TemplateResponse(\n \"index.html\",\n {\"request\": request}\n )\n\n\n@app.post(\"/\")\nasync def form_submit(request: Request,\n db: Session = Depends(get_db),\n date: str = Form(...),\n site: str = Form(...)):\n results = get_news(date=date,\n db=db,\n site=site)\n return templates.TemplateResponse(\n \"index.html\",\n {\"request\": request, \"results\": results}\n )\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"524260058","text":"import threading\nimport unittest\n\nfrom sdnlg.libs.signals.signals import Signal, called_on\n\npacket_in = Signal()\nport_status = Signal()\nlist_pkin1 = []\nlist_pkin2 = []\nlist_pstatus1 = []\nlist_thread = []\n\n\n@called_on(packet_in)\ndef pkin1(pkt):\n list_pkin1.append(pkt)\n\n\n@called_on(packet_in, 'one')\ndef pkin2(pkt):\n list_pkin2.append(pkt)\n\n\n@called_on(port_status)\ndef pstatus1(pkt):\n list_pstatus1.append(pkt)\n\n\nclass TestSignals(unittest.TestCase):\n\n def setUp(self):\n global list_pkin1, list_pkin2, list_pstatus1, list_thread\n list_pkin1 = []\n list_pkin2 = []\n list_pstatus1 = []\n list_thread = []\n\n def test_signals(self):\n packet_in.send(pkt='First packet')\n packet_in.send(sender='one', pkt='Second packet')\n port_status.send(pkt='Third packet')\n packet_in.send(pkt='Fourth packet')\n port_status.send(sender='two', pkt='Fifth packet')\n\n self.assertEqual(list_pkin1, ['First packet', 'Second packet', 'Fourth packet'])\n self.assertEqual(list_pkin2, ['Second packet'])\n self.assertEqual(list_pstatus1, ['Third packet', 'Fifth packet'])\n\n def test_threads(self):\n\n self.assertEqual(len(list_pkin1), 0)\n\n def thread_signal():\n packet_in.send(pkt='Thread sent 1')\n packet_in.send(sender='one', pkt='Thread sent 2')\n\n packet_in.send(pkt='Main sent 1')\n thread = threading.Thread(target=thread_signal)\n thread.start()\n\n self.assertEqual(list_pkin1, ['Main sent 1', 'Thread sent 1', 'Thread sent 2'])\n self.assertEqual(list_pkin2, ['Thread sent 2'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_signals.py","file_name":"test_signals.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"396586707","text":"#https://www.pyimagesearch.com/2016/06/20/detecting-cats-in-images-with-opencv/ #18.4.19\n#https://pypi.org/project/paho-mqtt/\n#https://pysource.com/2019/03/12/face-landmarks-detection-opencv-with-python/ #15.5.19\nimport paho.mqtt.client as mqtt\nimport time\nimport base64\nimport cv2\nfrom collections import deque\nfrom threading import Thread\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport RPi.GPIO as GPIO\nimport os\nimport sys\nfrom Robot import *\nimport dlib\nimport math\n\nspeed=0\nsmoothstrength=2\n\ntry:\n GPIO.setmode(GPIO.BCM)\n\n #GPIO PWM starten\n GPIO.setup(5, GPIO.OUT)\n GPIO.setup(6, GPIO.OUT)\n GPIO.setup(13, GPIO.OUT)\n GPIO.setup(19, GPIO.OUT)\n\n rechtsvor = GPIO.PWM(19, 50)\n rechtszuruck = GPIO.PWM(13, 50)\n linksvor = GPIO.PWM(5, 50)\n linkszuruck = GPIO.PWM(6, 50)\n\n rechtsvor.start(0)\n rechtszuruck.start(0)\n linksvor.start(0)\n linkszuruck.start(0)\n\n class WebcamVideoStream:\n def __init__(self, resx, resy, framerate):\n self.newframe = None\n\n self.cap = PiCamera()\n self.cap.resolution = (resx, resy)\n self.cap.framerate = framerate\n self.rawcap = PiRGBArray(self.cap, size=(resx, resy))\n\n self.stopped = False\n\n def start(self):\n Thread(target=self.update, args=()).start()\n return self\n\n def update(self):\n for self.img in self.cap.capture_continuous(self.rawcap, format=\"bgr\", use_video_port=True):\n if self.stopped:\n return\n\n self.newframe = self.img.array\n self.rawcap.truncate(0)\n\n def read(self):\n while self.newframe is None:\n pass\n return self.newframe.copy()\n\n def stop(self):\n self.stopped = True\n\n smoothx = deque(maxlen=smoothstrength)\n for i in range(smoothstrength):\n smoothx.appendleft(0)\n\n def on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n \n client = mqtt.Client()\n client.on_connect = on_connect\n\n client.connect_async(\"localhost\", port=1883, keepalive=10, bind_address=\"\")\n client.loop_start()\n\n resx = 640#960 #864\n resy = 480#720 #432\n\n vs = WebcamVideoStream(resx, resy, 24).start()\n\n #haarcascade_lowerbody.xml \n #haarcascade_frontalface_default.xml #detections = detector.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=2, minSize=(3, 3))\n #haarcascade_fullbody.xml\n #haarcascade_frontalcatface.xml\n #haarcascade_frontalface_alt_tree.xml\n\n #detector und predictor initialisieren\n detectorfrontal = cv2.CascadeClassifier(\"haarcascades/haarcascade_frontalface_default.xml\")\n detectorprofile = cv2.CascadeClassifier(\"haarcascades/haarcascade_profileface.xml\")\n predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\n while True:\n frame = vs.read()[0:int(resy/1.8),0:resx]\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n #gesichter per haarcascade finden\n detectionsfrontal = detectorfrontal.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=2, minSize=(3, 3))\n detectionsprofile = detectorprofile.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=2, minSize=(3, 3))\n\n #größtes gesicht herausfinden\n biggestsize=0\n biggestdetection=[0, 0, 0, 0]\n for (i, (x, y, w, h)) in enumerate(detectionsfrontal):\n if (w + h) > biggestsize:\n biggestsize = w + h\n x1=x+w\n y1=y+h\n biggestdetection = [x, y, x1, y1]\n for (i, (x, y, w, h)) in enumerate(detectionsprofile):\n if (w + h) > biggestsize:\n biggestsize = w + h\n x1=x+w\n y1=y+h\n biggestdetection = [x, y, x1, y1]\n\n #Facial landmarks in bild einzeichnen und bild auf gesicht zuschneiden\n if biggestsize is not 0:\n face = frame[biggestdetection[1]:biggestdetection[3],biggestdetection[0]:biggestdetection[2]]\n landmarks = predictor(face, dlib.rectangle(0,0,biggestdetection[3]-biggestdetection[1],biggestdetection[2]-biggestdetection[0]))\n for n in range(0, 68):\n x = landmarks.part(n).x\n y = landmarks.part(n).y\n cv2.circle(face, (x, y), 2, (255, 0, 0), -1)\n smoothx.appendleft((((biggestdetection[0]+biggestdetection[2])/2)-0.5*resx)/resx/2*20.0)\n else:\n smoothx.appendleft(0)\n face = frame\n\n Thread(target=client.publish, args=(\"image\", b\"data:image/png;base64,\" + base64.b64encode(cv2.imencode('.jpg', face)[1].tostring()), 0, False)).start()\n #Thread(target=client.publish, args=(\"image\", b\"data:image/png;base64,\" + base64.b64encode(cv2.imencode('.jpg', frame)[1].tostring()), 0, False)).start()\n \n\n X=math.pow(2*smpos(smoothx),2)\n if smpos(smoothx)<0:\n X=-X\n if X>=-2 and X<=-10:\n X=-10\n if X<=2 and X>=10:\n X=10\n if X>-2 and X<2:\n X=0\n print(str(X) + \" | \"+ str(smpos(smoothx)))\n Thread(target=client.publish, args=(\"posx\", str(X), 0, False)).start()\n MotorL=checkValue(X+speed)\n MotorR=checkValue(-X+speed)\n PWMmotoren(MotorR,MotorL,rechtsvor,rechtszuruck,linksvor,linkszuruck)\n\nexcept KeyboardInterrupt:\n print(\"Keyboard interrupt\")\nexcept Exception as e:\n print(str(e))\nfinally:\n vs.stop()\n GPIO.cleanup()\n print(\"Cleaning up\")\n","sub_path":"Python/Track_Face.py","file_name":"Track_Face.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"463744095","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport json\nimport time\nimport csv\n\ntry:\n import urllib.parse as urllib\n import http.client as httplib\n from http.client import ssl\nexcept ImportError:\n import urllib\n import httplib\n from httplib import ssl\n\nimport six\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom django.forms.models import modelform_factory\nfrom django.template.loader import render_to_string\nfrom django.views.generic.base import View\nfrom django.http import HttpResponse\nimport pystache\nfrom django.http import QueryDict\nfrom django.shortcuts import render\nfrom django.db.models import Q\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import ugettext\nfrom django.utils.dateparse import parse_date\n\nfrom devices.models import Device, Room, Building, Manufacturer, Lending\nfrom users.models import Lageruser, Department\nfrom mail.models import MailTemplate\nfrom devicetypes.models import Type\nfrom devices.forms import AddForm\nfrom devicegroups.models import Devicegroup\nfrom devicetags.models import Devicetag\nfrom csv import QUOTE_ALL\n\n\nclass AutocompleteDevice(View):\n def post(self, request):\n name = request.POST[\"name\"]\n devices = Device.objects.filter(name__icontains=name).values(\"name\").distinct()[:20]\n results = []\n for device in devices:\n device_json = {}\n device_json['id'] = device[\"name\"]\n device_json['label'] = device[\"name\"]\n device_json['value'] = device[\"name\"]\n results.append(device_json)\n return HttpResponse(json.dumps(results), content_type='application/json')\n\n\nclass AutocompleteSmallDevice(View):\n def post(self, request):\n name = request.POST[\"name\"]\n devices = Lending.objects.filter(smalldevice__icontains=name).values(\"smalldevice\").distinct()\n results = []\n for device in devices:\n device_json = {}\n device_json['label'] = device[\"smalldevice\"]\n results.append(device_json)\n return HttpResponse(json.dumps(results), content_type='application/json')\n\n\nclass AutocompleteName(View):\n def post(self, request):\n name = request.POST[\"name\"]\n classtype = request.POST[\"classtype\"]\n if classtype == \"type\":\n objects = Type.objects.filter(name__icontains=name)[:20]\n urlname = \"type-detail\"\n elif classtype == \"room\":\n objects = Room.objects.filter(name__icontains=name)[:20]\n urlname = \"room-detail\"\n elif classtype == \"building\":\n objects = Building.objects.filter(name__icontains=name)[:20]\n urlname = \"building-detail\"\n elif classtype == \"manufacturer\":\n objects = Manufacturer.objects.filter(name__icontains=name)[:20]\n urlname = \"manufacturer-detail\"\n elif classtype == \"manufacturer\":\n objects = Manufacturer.objects.filter(name__icontains=name)[:20]\n urlname = \"manufacturer-detail\"\n else:\n return HttpResponse(\"\")\n if len(objects) > 0:\n retobjects = [\"
  • {1}
  • \".format(\n reverse(urlname, kwargs={\"pk\": obj[0]}), obj[1])\n for obj in objects.values_list(\"pk\", \"name\")]\n return HttpResponse(json.dumps(retobjects), content_type='application/json')\n else:\n return HttpResponse(\"\")\n\n\nclass AddDeviceField(View):\n def post(self, request):\n dform = QueryDict(query_string=six.text_type(request.POST[\"form\"]).encode('utf-8'))\n classname = dform[\"classname\"]\n if classname == \"manufacturer\":\n form = modelform_factory(Manufacturer, exclude=(), form=AddForm)(dform)\n elif classname == \"devicetype\":\n form = modelform_factory(Type, exclude=(), form=AddForm)(dform)\n elif classname == \"room\":\n form = modelform_factory(Room, exclude=(), form=AddForm)(dform)\n elif classname == \"group\":\n form = modelform_factory(Devicegroup, exclude=(), form=AddForm)(dform)\n else:\n return HttpResponse(\"\")\n data = {}\n if form.is_valid():\n if request.user.is_staff:\n classname = form.cleaned_data[\"classname\"]\n if classname == \"manufacturer\":\n newitem = Manufacturer()\n newitem.name = form.cleaned_data[\"name\"]\n newitem.save()\n elif classname == \"devicetype\":\n newitem = Type()\n newitem.name = form.cleaned_data[\"name\"]\n newitem.save()\n elif classname == \"room\":\n newitem = Room()\n newitem.name = form.cleaned_data[\"name\"]\n newitem.building = form.cleaned_data[\"building\"]\n newitem.section = form.cleaned_data[\"section\"]\n newitem.save()\n elif classname == \"group\":\n newitem = Devicegroup()\n newitem.name = form.cleaned_data[\"name\"]\n newitem.save()\n data[\"id\"] = newitem.pk\n data[\"name\"] = newitem.name\n data[\"classname\"] = classname\n else:\n print(form.errors)\n data[\"error\"] = \"Error: {0}\".format(form.non_field_errors())\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\nclass LoadExtraform(View):\n def post(self, request):\n classname = request.POST[\"classname\"]\n if classname == \"manufacturer\":\n form = modelform_factory(Manufacturer, exclude=(), form=AddForm)()\n elif classname == \"devicetype\":\n form = modelform_factory(Type, exclude=(), form=AddForm)()\n elif classname == \"room\":\n form = modelform_factory(Room, exclude=(), form=AddForm)()\n elif classname == \"group\":\n form = modelform_factory(Devicegroup, exclude=(), form=AddForm)()\n else:\n return HttpResponse(\"\")\n\n return HttpResponse(render_to_string('snippets/formfields.html', {\"form\": form}))\n\n\nclass PreviewMail(View):\n def post(self, request):\n template = request.POST[\"template\"]\n device = {\n \"currentlending\": request.POST.get(\"device[currentlending]\", \"\"),\n \"description\": request.POST.get(\"device[description]\", \"\"),\n \"devicetype\": request.POST.get(\"device[devicetype]\", \"\"),\n \"group\": request.POST.get(\"device[group]\", \"\"),\n \"hostname\": request.POST.get(\"device[hostname]\", \"\"),\n \"inventorynumber\": request.POST.get(\"device[inventorynumber]\", \"\"),\n \"manufacturer\": request.POST.get(\"device[manufacturer]\", \"\"),\n \"name\": request.POST.get(\"device[name]\", \"\"),\n \"room\": request.POST.get(\"device[room]\", \"\"),\n \"serialnumber\": request.POST.get(\"device[serialnumber]\", \"\"),\n \"templending\": request.POST.get(\"device[templending]\", \"\"),\n \"webinterface\": request.POST.get(\"device[webinterface]\", \"\")\n }\n if template == \"\":\n return HttpResponse(\"\")\n\n if device[\"manufacturer\"] != \"\":\n device[\"manufacturer\"] = get_object_or_404(Manufacturer, pk=device[\"manufacturer\"])\n else:\n del device[\"manufacturer\"]\n\n if device[\"devicetype\"] != \"\":\n device[\"devicetype\"] = get_object_or_404(Type, pk=device[\"devicetype\"])\n else:\n del device[\"devicetype\"]\n\n if device[\"room\"] != \"\":\n device[\"room\"] = get_object_or_404(Room, pk=device[\"room\"])\n else:\n del device[\"room\"]\n\n template = get_object_or_404(MailTemplate, pk=template)\n datadict = {\"device\": device, \"user\": {\n \"username\": request.user.username,\n \"first_name\": request.user.first_name,\n \"last_name\": request.user.last_name\n }}\n data = {\"subject\": pystache.render(template.subject, datadict),\n \"body\": pystache.render(template.body, datadict).replace(\"\\n\", \"
    \")}\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\nclass LoadMailtemplate(View):\n\n def post(self, request):\n template = request.POST[\"template\"]\n recipients = request.POST.get(\"recipients[]\", [])\n if template == \"\":\n return HttpResponse(\"\")\n template = get_object_or_404(MailTemplate, pk=template)\n data = {\"subject\": template.subject, \"body\": template.body}\n if isinstance(recipients, six.text_type):\n recipients = [recipients]\n newrecipients = [obj for obj in recipients]\n newrecipients += [obj.content_type.name[0].lower() + str(obj.object_id) for obj in\n template.default_recipients.all()]\n newrecipients = list(set(newrecipients))\n data[\"recipients\"] = newrecipients\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\nclass LoadSearchoptions(View):\n\n def post(self, request):\n term = request.POST[\"searchTerm\"]\n if term[:4] == \"not \":\n term = term[4:]\n invert = True\n else:\n invert = False\n facet = request.POST[\"facet\"]\n if facet == \"manufacturer\":\n items = Manufacturer.objects.filter(name__icontains=term)\n elif facet == \"devicetype\":\n items = Type.objects.filter(name__icontains=term)\n elif facet == \"room\":\n items = Room.objects.filter(name__icontains=term)\n elif facet == \"devicegroup\":\n items = Devicegroup.objects.filter(name__icontains=term)\n elif facet == \"user\":\n items = Lageruser.objects.filter(username__icontains=term)\n elif facet == \"tag\":\n items = Devicetag.objects.filter(name__icontains=term)\n elif facet == \"department\":\n items = Department.objects.filter(name__icontains=term)\n else:\n return HttpResponse(\"\")\n if invert:\n data = [\n {\"value\": \"not \" + str(object.pk) + \"-\" + six.text_type(object), \"label\": \"not \" + six.text_type(object)}\n for object in items]\n else:\n data = [{\"value\": str(object.pk) + \"-\" + six.text_type(object), \"label\": six.text_type(object)}\n for object in items]\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\nclass UserLendings(View):\n def post(self, request):\n user = request.POST[\"user\"]\n if user == \"\":\n return HttpResponse(\"\")\n user = get_object_or_404(Lageruser, pk=user)\n data = {}\n data[\"devices\"] = [[device[\"device__name\"] if device[\"device__name\"] else device[\"smalldevice\"],\n device[\"device__inventorynumber\"], device[\"device__serialnumber\"],\n device[\"duedate\"].strftime(\"%d.%m.%y\") if device[\"duedate\"] else \"\", device[\"pk\"]]\n for device in user.lending_set.filter(returndate=None).values(\"pk\", \"device__name\",\n \"device__inventorynumber\",\n \"device__serialnumber\",\n \"smalldevice\", \"duedate\")]\n\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\nclass AjaxSearch(View):\n def post(self, request):\n search = json.loads(request.POST.get('search', '{}'))\n searchdict = {}\n if request.user.departments.count() > 0:\n searchdict[\"department__in\"] = list(request.user.departments.all())\n excludedict = {}\n search_q_list = []\n exclude_q_list = []\n textfilter = None\n statusfilter = None\n displayed_columns = []\n searchvalues = [\"id\", \"name\", \"inventorynumber\", \"serialnumber\", \"devicetype__name\", \"room__name\",\n \"room__building__name\", \"currentlending__owner__username\", \"currentlending__owner__id\"]\n for searchitem in search:\n key, value = list(searchitem.items())[0]\n\n if value[:4] == \"not \":\n value = value[4:]\n dictionary = excludedict\n q_list = exclude_q_list\n else:\n dictionary = searchdict\n q_list = search_q_list\n\n if key == \"manufacturer\":\n value = value.split(\"-\", 1)[0]\n try:\n value = int(value)\n except:\n break\n if len(displayed_columns) < 8:\n displayed_columns.append((\"manufacturer\", ugettext(\"Manufacturer\")))\n searchvalues.append(\"manufacturer__name\")\n if \"manufacturer__in\" in dictionary:\n dictionary[\"manufacturer__in\"].append(value)\n else:\n dictionary[\"manufacturer__in\"] = [value]\n\n elif key == \"room\":\n value = value.split(\"-\", 1)[0]\n try:\n value = int(value)\n except:\n break\n if \"room__in\" in dictionary:\n dictionary[\"room__in\"].append(value)\n else:\n dictionary[\"room__in\"] = [value]\n\n elif key == \"devicetype\":\n value = value.split(\"-\", 1)[0]\n try:\n value = int(value)\n except:\n break\n if \"devicetype__in\" in dictionary:\n dictionary[\"devicetype__in\"].append(value)\n else:\n dictionary[\"devicetype__in\"] = [value]\n\n elif key == \"devicegroup\":\n value = value.split(\"-\", 1)[0]\n try:\n value = int(value)\n except:\n break\n if len(displayed_columns) < 8:\n displayed_columns.append((\"group\", ugettext(\"Group\")))\n searchvalues.append(\"group__name\")\n if \"group__in\" in dictionary:\n dictionary[\"group__in\"].append(value)\n else:\n dictionary[\"group__in\"] = [value]\n\n elif key == \"user\":\n value = value.split(\"-\", 1)[0]\n try:\n value = int(value)\n dictionary[\"currentlending__owner__id\"] = value\n except:\n if value.lower() == \"null\":\n dictionary[\"currentlending\"] = None\n else:\n q_list.append(Q(currentlending__owner__username__icontains=value)\n | Q(currentlending__owner__first_name__icontains=value)\n | Q(currentlending__owner__last_name__icontains=value))\n\n elif key == \"ipaddress\":\n if len(displayed_columns) < 8:\n displayed_columns.append((\"ipaddress\", ugettext(\"IP-Address\")))\n searchvalues.append(\"ipaddress__address\")\n if value.lower() == \"null\":\n dictionary[\"ipaddress\"] = None\n else:\n dictionary[\"ipaddress__address__icontains\"] = value\n\n elif key == \"inventoried\" or key == \"trashed\" or key == \"archived\":\n if value.startswith(\"before\"):\n value = value[7:]\n modifier = \"__lt\"\n elif value.startswith(\"after\"):\n value = value[6:]\n modifier = \"__gt\"\n else:\n modifier = \"\"\n\n if len(displayed_columns) < 8:\n displayed_columns.append((key, _(\"{0} on\").format(key.capitalize())))\n searchvalues.append(key)\n if key == \"archived\" or key == \"inventoried\":\n statusfilter = \"all\"\n dictionary[key + modifier] = parse_date(value)\n\n elif key == \"tag\":\n value = value.split(\"-\", 1)[0]\n try:\n value = int(value)\n except:\n break\n if \"tags__in\" in dictionary:\n dictionary[\"tags__in\"].append(value)\n else:\n dictionary[\"tags__in\"] = [value]\n\n elif key == \"department\":\n value = value.split(\"-\", 1)[0]\n if value == \"all\":\n del dictionary[\"department__in\"]\n try:\n value = int(value)\n except:\n break\n dictionary[\"department__in\"] = [value]\n\n elif key == \"hostname\":\n if len(displayed_columns) < 8:\n displayed_columns.append((\"hostname\", ugettext(\"Hostname\")))\n searchvalues.append(\"hostname\")\n\n dictionary[\"hostname__icontains\"] = value\n\n elif key == \"inventorynumber\":\n dictionary[\"inventorynumber__icontains\"] = value\n\n elif key == \"serialnumber\":\n dictionary[\"serialnumber__icontains\"] = value\n\n elif key == \"text\":\n textfilter = value\n\n elif key == \"status\":\n statusfilter = value\n\n elif key == \"id\":\n try:\n value = int(value)\n context = {\"device_list\": Device.objects.filter(id=value).values(\"id\", \"name\", \"inventorynumber\",\n \"devicetype__name\", \"room__name\",\n \"room__building__name\")}\n return render(request, 'devices/searchresult.html', context)\n except ValueError:\n context = {\n \"wrong_id_format\": True\n }\n return render(request, 'devices/searchempty.html', context)\n except Device.DoesNotExist:\n return render(request, 'devices/searchempty.html')\n elif key == \"shortterm\":\n if value.lower() == \"yes\":\n dictionary[\"templending\"] = True\n else:\n dictionary[\"templending\"] = False\n\n devices = Device.objects.filter(*search_q_list, **searchdict)\n devices = devices.exclude(*exclude_q_list, **excludedict)\n\n if statusfilter == \"all\":\n pass\n elif statusfilter == \"available\":\n devices = devices.filter(currentlending=None, archived=None, trashed=None)\n elif statusfilter == \"unavailable\":\n devices = devices.exclude(currentlending=None).filter(archived=None, trashed=None)\n elif statusfilter == \"archived\":\n devices = devices.exclude(archived=None)\n elif statusfilter == \"trashed\":\n devices = devices.exclude(trashed=None)\n else:\n devices = devices.filter(archived=None, trashed=None)\n\n if textfilter is not None:\n SEARCHSTRIP = getattr(settings, \"SEARCHSTRIP\", [])\n if \"text\" in SEARCHSTRIP:\n textfilter = textfilter.strip(settings.SEARCHSTRIP[\"text\"]).strip()\n try:\n searchid = int(textfilter.replace(\" \", \"\"))\n devices = devices.filter(Q(name__icontains=textfilter)\n | Q(inventorynumber__icontains=textfilter.replace(\" \", \"\"))\n | Q(serialnumber__icontains=textfilter.replace(\" \", \"\"))\n | Q(id=searchid))\n except ValueError:\n devices = devices.filter(Q(name__icontains=textfilter)\n | Q(inventorynumber__icontains=textfilter.replace(\" \", \"\"))\n | Q(serialnumber__icontains=textfilter.replace(\" \", \"\")))\n if \"format\" in request.POST:\n if request.POST[\"format\"] == \"csv\":\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"' + str(int(time.time())) + '_searchresult.csv\"'\n\n writer = csv.writer(response, delimiter=\",\", quotechar='\"', quoting=QUOTE_ALL)\n headers = [ugettext(\"ID\"), ugettext(\"Device\"), ugettext(\"Inventorynumber\"), ugettext(\"Serialnumber\"),\n ugettext(\"Devicetype\"), ugettext(\"Room\"), ugettext(\"Building\")]\n if len(displayed_columns) > 0:\n headers.extend([col[1] for col in displayed_columns])\n writer.writerow(headers)\n for device in devices.values_list(*searchvalues):\n writer.writerow(device)\n\n return response\n context = {\n \"device_list\": devices.values(*searchvalues),\n \"columns\": displayed_columns\n }\n return render(request, 'devices/searchresult.html', context)\n\n\nclass PuppetDetails(View):\n\n def post(self, request):\n searchvalue = request.POST[\"id\"]\n params = urllib.urlencode({'query': '[\"in\", \"certname\",[\"extract\", \"certname\",'\n + '[\"select_facts\",[\"and\",[\"=\", \"name\",\"'\n + settings.PUPPETDB_SETTINGS['query_fact'] + '\"],'\n + '[\"=\",\"value\",\"' + searchvalue + '\"]]]]]'})\n context = ssl.create_default_context(cafile=settings.PUPPETDB_SETTINGS['cacert'])\n context.load_cert_chain(certfile=settings.PUPPETDB_SETTINGS['cert'],\n keyfile=settings.PUPPETDB_SETTINGS['key'])\n conn = httplib.HTTPSConnection(settings.PUPPETDB_SETTINGS['host'],\n settings.PUPPETDB_SETTINGS['port'],\n context=context)\n conn.request(\"GET\", settings.PUPPETDB_SETTINGS['req'] + params)\n res = conn.getresponse()\n if res.status != httplib.OK:\n return HttpResponse('Failed to fetch puppet details from '\n + settings.PUPPETDB_SETTINGS['host'])\n context = {\n 'puppetdetails': json.loads(res.read().decode())\n }\n return render(request, 'devices/puppetdetails.html', context)\n\n\nclass PuppetSoftware(View):\n\n def post(self, request):\n searchvalue = request.POST[\"id\"]\n software_fact = settings.PUPPETDB_SETTINGS['software_fact']\n query_fact = settings.PUPPETDB_SETTINGS['query_fact']\n\n params = urllib.urlencode({'query': '[\"and\", [ \"=\", \"name\", \"' + software_fact + '\"],'\n + '[\"in\", \"certname\",[\"extract\", \"certname\",'\n + '[\"select_facts\",[\"and\",[\"=\", \"name\",\"' + query_fact + '\"],'\n + '[\"=\",\"value\",\"' + searchvalue + '\"]]]]]]'})\n context = ssl.create_default_context(cafile=settings.PUPPETDB_SETTINGS['cacert'])\n context.load_cert_chain(certfile=settings.PUPPETDB_SETTINGS['cert'],\n keyfile=settings.PUPPETDB_SETTINGS['key'])\n conn = httplib.HTTPSConnection(settings.PUPPETDB_SETTINGS['host'],\n settings.PUPPETDB_SETTINGS['port'],\n context=context)\n conn.request(\"GET\", settings.PUPPETDB_SETTINGS['req'] + params)\n res = conn.getresponse()\n if res.status != httplib.OK:\n return HttpResponse('Failed to fetch puppet details from '\n + settings.PUPPETDB_SETTINGS['host'])\n\n try:\n res = json.loads(res.read().decode())[0]\n software = res['value']\n context = {\n 'puppetsoftware': list(software.values())\n }\n except:\n return HttpResponse('Malformed puppet software fact.')\n\n return render(request, 'devices/puppetsoftware.html', context)\n","sub_path":"devices/ajax.py","file_name":"ajax.py","file_ext":"py","file_size_in_byte":24562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"422345980","text":"#!/usr/bin/python\n# -*- coding:utf8 -*-\n\nimport os\nimport nltk\nfrom nltk import SnowballStemmer\nrootpath = ''\n\n#获取path目录下所有的文件名\ndef getfiles(path):\n fileList = []\n files = os.listdir(path)\n for f in files:\n if(os.path.isfile(path + '/' + f)):\n fileList.append(f)\n return fileList\n\n\ndef processbagofword(file):\n global rootpath\n content=''\n with open(rootpath+file, 'r') as f:\n content = f.read()\n\n #tokenize\n words = nltk.word_tokenize(content)\n\n #stemming\n ps = nltk.stem.snowball.PortugueseStemmer()\n wordlist=['']\n for word in words:\n word1 = ps.stem(word)\n word1 = str(word1)\n wordlist.append(word1)\n wordlist.pop(0)\n\n #termcount\n termcount = {}\n for word in wordlist:\n termcount[word]=termcount.get(word,0)+1\n\n\n#主函数\nif __name__ == '__main__':\n fileList = []\n #这里是网页文件的文件夹,手工在这个目录里建一个名叫final的文件夹,用于存放处理后的文件\n rootpath = 'webKB'\n fileList = getfiles(rootpath)\n for fl in fileList:\n processbagofword(fl)\n \n \n","sub_path":"webKB with code/htmlprocessing.py","file_name":"htmlprocessing.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"220276517","text":"\"\"\" Модуль с функциями для визуализаций \"\"\"\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\n\ndef draw_cycle_tree(edge_list, node_size_multiplier=30000):\n \"\"\" Рисовалка для циклодеревьев. \"\"\"\n G = nx.DiGraph(edge_list)\n nodes = list(zip(*edge_list))\n loop_nodes = []\n for index, edge in enumerate(edge_list):\n if edge[0] == edge[1]:\n loop_nodes.append(edge[0])\n\n # source_only_nodes = set(nodes[0]) - set(nodes[1])\n # edge_list = [edge for edge in edge_list if\n # edge[0] not in source_only_nodes]\n in_degrees = []\n loop_nodes_degree = []\n for node, deg in G.in_degree(G.nodes):\n in_degrees.append(deg)\n if node in loop_nodes:\n loop_nodes_degree.append(deg)\n pos = nx.random_layout(G)\n nx.draw_networkx_nodes(G, pos,\n node_color=in_degrees,\n cmap=plt.cm.Blues,\n node_size=[\n node_size_multiplier * degree / len(edge_list)\n for degree in in_degrees]\n )\n nx.draw_networkx_nodes(G, pos,\n nodelist=loop_nodes,\n node_color='red',\n node_size=[\n node_size_multiplier * degree / len(edge_list)\n for degree in\n loop_nodes_degree],\n alpha=1.0,\n node_shape='d')\n plt.axis(\"off\")\n plt.show()\n\n\ndef draw_cayley_table(op):\n pass\n","sub_path":"DMPy/Visualisation/Visualisation.py","file_name":"Visualisation.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"258385867","text":"import RPi.GPIO as GPIO\nimport time\ntouch = 2\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(18,GPIO.OUT)\nGPIO.setup(touch,GPIO.IN,pull_up_down=GPIO.PUD_UP)\nimport os\nMORSE_CODE_DICT = { 'A':'.-', 'B':'-...',\n 'C':'-.-.', 'D':'-..', 'E':'.',\n 'F':'..-.', 'G':'--.', 'H':'....',\n 'I':'..', 'J':'.---', 'K':'-.-',\n 'L':'.-..', 'M':'--', 'N':'-.',\n 'O':'---', 'P':'.--.', 'Q':'--.-',\n 'R':'.-.', 'S':'...', 'T':'-',\n 'U':'..-', 'V':'...-', 'W':'.--',\n 'X':'-..-', 'Y':'-.--', 'Z':'--..',\n '1':'.----', '2':'..---', '3':'...--',\n '4':'....-', '5':'.....', '6':'-....',\n '7':'--...', '8':'---..', '9':'----.',\n '0':'-----'}\n\nimport speech_recognition as sr\ndef record():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n audio = r.listen(source)\n try:\n a = r.recognize_google(audio)\n print(a)\n return a\n #print(\"system predicts:\"+r.recognize_google(audio))\n except Exception:\n print(\"Something went wrong !!\")\n return \"failed\"\n\n\ndef convert_to_morse(message):\n message=message.upper()\n dummy = ''\n for letter in message:\n if letter != ' ': \n dummy += MORSE_CODE_DICT[letter] + ' '\n else:\n dummy += ' '\n return dummy\n\n\n\ndef morse_to_text(message):\n message += ' '\n\n decipher = ''\n citext = ''\n i=0\n for letter in message:\n \n \n if (letter != ' '):\n\n i = 0\n\n citext += letter\n \n \n else:\n \n i += 1\n \n \n if i == 2 :\n \n \n decipher += ' '\n else:\n \n s=str(MORSE_CODE_DICT.values()).find(citext)\n decipher += str(MORSE_CODE_DICT.keys())[str(MORSE_CODE_DICT\n .values()).find(citext)]\n citext = ''\n\n return decipher\n\n\n\ntouchstatus = False\n\n\"\"\"def read_touchsensor():\n global touchstatus\n touchstatus = GPIO.input(touch)\n print(touchstatus)\n if touchstatus:\n t1=time.time()\n print(t1)\n while(True):\n if not GPIO.input(touch):\n \n t2=time.time()\n print(t2)\n break\n t_diff=t2-t1\n print('touched')\n #time.sleep(0.15)\n return t_diff,True\n else:\n print('no')\n #time.sleep(0.15)\n return 0,False\"\"\"\n \ndef morse_to_vibration(result):\n n=0\n while n2:\n GPIO.output(18,GPIO.HIGH)\n print('-')\n time.sleep(0.5)\n GPIO.output(18,GPIO.LOW)\"\"\"\n\ndef generator(t_diff):\n if(t_diff<0.25 and t_diff>0):\n generated_morse_code=generated_morse_code+\".\"\n elif(t_diff>0.25 and t_diff<1):\n generated_morse_code=generated_morse_code+\"-\"\n elif(t_diff>1 and t_diff<2):\n generated_morse_code=generated_morse_code+\" \"\n return generated_morse_code\n\n\ndef main():\n t1 = 0\n t2 = 0\n k=0\n while True:\n while True:\n touchstatus = GPIO.input(touch)\n #print(touchstatus)\n if touchstatus:\n k += 1\n if k<2:\n t1=time.time()\n #print(t1)\n t2 = time.time()\n #else:\n #pass\n #t2=time.time()\n #print(t2)\n \n t_diff1=t2-t1\n #print('touched')\n if(t_diff1<2):\n \"\"\"print(\"Start\")\n k=record()\n if \"failed\" not in k:\n q=convert_to_morse(k)\n print(q)\n morse_to_vibration(q)\"\"\"\n elif(t_diff1>2):\n print(t_diff1)\n break\n \n global touchstatus,generator_morse_code\n print('hello')\n generator_morse_code = \"\"\n while True:\n touchstatus = GPIO.input(touch)\n print(touchstatus)\n if touchstatus:\n y += 1\n t1=time.time()\n print(t1)\n else:\n t2=time.time()\n print(t2)\n \n t_diff=t2-t1\n #print('touched')\n if(t_diff<2):\n gernerator_morse_code = generator(t_diff)\n elif(t_diff>2):\n break\n\n \n \n \n \n \n \n \n# Executes the main function\nif __name__ == '__main__':\n main() \n\n \n","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"350007161","text":"\n\ndef rect_intersect_generic(position1, dimensions1, position2, dimensions2, x, y):\n d1 = dimensions1\n d2 = dimensions2\n\n cx1 = position1[x] + d1[x]/2\n cy1 = position1[y] + d1[y]/2\n cx2 = position2[x] + d2[x]/2\n cy2 = position2[y] + d2[y]/2\n\n ix = max(cx1, cx2) - min(cx1, cx2)\n iy = max(cy1, cy2) - min(cy1, cy2)\n\n return ix < (d1[x]+d2[x])/2 and iy < (d1[y]+d2[y])/2\n\n\ndef item_to_positions(item):\n position = item.position\n dimensions = item.get_dimension()\n return set(\n [(position[0] + dimensions[0], position[1], position[2]),\n (position[0], position[1] + dimensions[1], position[2]),\n (position[0], position[1], position[2] + dimensions[2])],\n )\n\n","sub_path":"py3dbp/auxiliary_methods.py","file_name":"auxiliary_methods.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"278597203","text":"def replace(pattern, replacement, corpus):\n i = 0\n while i < len(corpus):\n if corpus[i] == pattern[0]: # same 1st char as pattern\n if match_strings(pattern, corpus[i:]):\n corpus = corpus[0:i] + replacement + corpus[i + len(pattern):]\n\n i += 1\n return corpus\n\ndef match_strings(pattern, s2):\n same = True\n counter = 0\n while counter < len(pattern):\n if pattern[counter] != s2[counter]:\n same = False\n break\n counter += 1\n return same\n\n\nreplace(\"with\", \"for\", \"I play with a sentence without words\")\n","sub_path":"Les5_Strings_Files/ex5.6_Strings_String_replace.py","file_name":"ex5.6_Strings_String_replace.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"340045277","text":"import matplotlib.pyplot as plt\n\nsquares = range(1, 1000, 10)\nvalue_root = [x ** 2 for x in squares]\n# Select graph styling\nplt.style.use(\"seaborn\")\nfig, ax = plt.subplots()\n# Initiate scatter plot function\nax.scatter(squares, value_root, c=value_root, cmap='Greens', s=10)\n\n# Set the title and labels for the graph\nax.set_title(\"Square of Number\", fontsize=24)\nax.set_xlabel(\"Root of Value\", fontsize=14)\nax.set_ylabel(\"Square of Value\", fontsize=14)\n\n# Set size of tick label.\nax.tick_params(axis='both', which='major', labelsize=14)\n\n# Set a range for each axis.\nax.axis([0, 1100, 0, 1100000])\n\nif __name__ == \"__main__\":\n plt.savefig('square_scatterplot.png', bbox_inches='tight') # To\n # automatically save plots\n plt.show()\n","sub_path":"visualization_basics/scatter_plot.py","file_name":"scatter_plot.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"571620485","text":"# --------------\nimport pandas as pd\r\nimport nltk\r\n\r\ndf = pd.read_csv(path)\r\n\r\ndef pos_tagger(sent):\r\n sent = sent.split()\r\n sent_tag = nltk.pos_tag(sent)\r\n return sent_tag\r\n\r\ntagged_titles = df['nominee'].apply(lambda emmy_nominee: pos_tagger(emmy_nominee))\r\ntagged_titles_df = pd.DataFrame(data = tagged_titles)\r\nprint(tagged_titles_df.head())\n\n\n# --------------\nimport matplotlib.pyplot as plt\n\n# Function to create tags\n\ndef count_tags(title_with_tags):\n tag_count = {}\n for word, tag in title_with_tags:\n if tag in tag_count:\n tag_count[tag] += 1\n else:\n tag_count[tag] = 1\n return(tag_count)\n\n\n# Mapping the counts with the tags\ntagged_titles_df['tag_counts'] = tagged_titles_df['nominee'].map(count_tags)\n\n\n# Tagset containing all the possible tags\ntag_set = list(set([tag for tags in tagged_titles_df['tag_counts'] for tag in tags]))\n\n# Creating tag column frequency for each tags\nfor tag in tag_set:\n tagged_titles_df[tag] = tagged_titles_df['tag_counts'].map(lambda x: x.get(tag, 0))\n\n\n# Subsetting the dataframe to contain only the tagset columns \ntop_pos=tagged_titles_df[tag_set]\n\n# Sorting and storing the top 10 frequent tags\ntop_pos=top_pos.sum().sort_values().tail(10)\n \n \n# Plotting the barplot of the tag frequency \ntitle = 'Frequency of POS Tags in Show Titles' \ntop_pos.plot(kind='barh', figsize=(18,10), title=title)\n\n\nplt.show()\n\n\n# --------------\n# Function to create vocabulary of the tags\ndef vocab_creator(tagged_titles):\n vocab = {}\n\n for row in tagged_titles['nominee']:\n for word, tag in row:\n if word in vocab:\n if tag in vocab[word]:\n vocab[word][tag] += 1\n else:\n vocab[word][tag] = 1\n else:\n vocab[word] = {tag: 1}\n \n return vocab \n \n# Creating vocab of our tagged titles dataframe\nvocab= vocab_creator(tagged_titles_df)\n\nvocab_df = pd.DataFrame.from_dict(vocab, orient = 'Index')\nvocab_df.fillna(value = 0, inplace = True)\ntop_verb_nominee = vocab_df['VBG'].sort_values().tail(10)\n\ntitle = 'Top Verbs in Show Titles' \ntop_verb_nominee.plot(kind='barh', figsize=(18,10), title=title)\nplt.show()\n\ntop_noun_nominee = vocab_df['NN'].sort_values().tail(10)\n\ntitle = 'Top Nouns in Show Titles' \ntop_noun_nominee.plot(kind='barh', figsize=(18,10), title=title)\nplt.show()\n\n\n# --------------\n# Subsetting comedy winners\nnew_df=df[(df['winner']==1) & (df['category'].str.contains('Comedy'))]\n\n# Mapping the position tags of the winners\ntagged_titles_winner = new_df['nominee'].str.split().map(pos_tag)\n\n# Creating a dataframe\ntagged_titles_winner_df=pd.DataFrame(tagged_titles_winner)\n\n# Creating a vocabulary of the tags\nvocab= vocab_creator(tagged_titles_winner_df)\n\n# Creating a dataframe from the dictionary\nvocab_df = pd.DataFrame.from_dict(vocab,orient='index')\n\n# Filling the nan values in the dataframe\nvocab_df.fillna(value=0, inplace=True)\n\n# Saving the top 5 most frequent NNP taggged words\nsize = 5\ntag = 'NNP' \ntop_proper_noun_nominee=vocab_df[tag].sort_values().tail(size)\n\n\n# Plotting the top 5 most frequent NNP taggged words\ntitle = 'Top {} Most Frequent Words for {} Tag'.format(size, tag)\ntop_proper_noun_nominee.plot(kind='barh', figsize=(12,6), title=title)\nplt.show()\n\n\n# --------------\n\"\"\" After filling and submitting the feedback form, click the Submit button of the codeblock\"\"\"\r\n\n\n\n","sub_path":"pos-tagging-analysis/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"387078260","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n#input('dime algo')\n#a = input('dime algo')\n#print(a)\n\n#CONSTANTE = 13\n#print(CONSTANTE)\n#CONSTANTE = 2\n#print(CONSTANTE)\n\n#a = '3'\n#b = '2'\n#print(a+b)\n\n#a = 3.2 \n#b = 3\n#print(a + b)\n\n#a = 'Holi'\n#print(len(a))\n\n#name = 'Roman'\n#print(name[0])\n#print(name[0:2])\n#print(name[:2])\n#print(name[2:])\n#print(name[:1])\n#print(name[:-4])\n\na = 'Espana'\n#print('yo vivo en {}'.format(a))\n#print(f\"yo vivo en {a}\")\n#print(a + ' yo vivo en ')\n\nn = 'Roman'\n#print(n.upper())\n#print(n)\n#print(n.swapcase())\n#print(n)\n\n#print(n.isnumeric())\n\n#variableInput = input()\n#print(variableInput[:3].upper())\n\n#print(type(n))\n\nnumero = int(input('Escribe un numero: \\n'))\n#numeroNegativo = int('-' + str(numero))\nnumeroNegativo2 = numero * -1 \n#print(numeroNegativo)\nprint(numeroNegativo2)\n\nnumero2 = -4\nprint(numero2)\n\n\n\n\n","sub_path":"src/main/python/Curso_intensivo_Python_CICE/clase_1.py","file_name":"clase_1.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"190207687","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/muntjac/demo/sampler/features/layouts/SplitPanelBasicExample.py\n# Compiled at: 2013-04-04 15:36:38\nfrom muntjac.api import VerticalLayout, VerticalSplitPanel, Label, HorizontalSplitPanel, CheckBox\nfrom muntjac.ui.button import IClickListener\nfrom muntjac.terminal.sizeable import ISizeable\n\nclass SplitPanelBasicExample(VerticalLayout):\n brownFox = 'The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. The quick brown fox jumps over the lazy dog. '\n\n def __init__(self):\n super(SplitPanelBasicExample, self).__init__()\n vert = VerticalSplitPanel()\n vert.setHeight('450px')\n vert.setWidth('100%')\n vert.setSplitPosition(150, ISizeable.UNITS_PIXELS)\n self.addComponent(vert)\n vert.addComponent(Label(self.brownFox))\n horiz = HorizontalSplitPanel()\n horiz.setSplitPosition(50)\n vert.addComponent(horiz)\n horiz.addComponent(Label(self.brownFox))\n horiz.addComponent(Label(self.brownFox))\n toggleLocked = CheckBox('Splits locked', LockListener(vert, horiz))\n toggleLocked.setImmediate(True)\n self.addComponent(toggleLocked)\n\n\nclass LockListener(IClickListener):\n\n def __init__(self, vert, horiz):\n self._vert = vert\n self._horiz = horiz\n\n def buttonClick(self, event):\n self._vert.setLocked(event.getButton().booleanValue())\n self._horiz.setLocked(event.getButton().booleanValue())","sub_path":"pycfiles/Muntjac-1.1.2-py2.7/SplitPanelBasicExample.py","file_name":"SplitPanelBasicExample.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"59752869","text":"#!/usr/bin/python\n\n\"\"\"\nCombine mutliple tf combination of the edge scores at difference dbd cutoff.\n\neval_method: cumulative, binned\n\"\"\"\n\nimport sys\nimport argparse\nimport glob\nimport os.path\nimport numpy\nimport matplotlib.pyplot as plt\n\ndef parse_args(argv):\n parser = argparse.ArgumentParser(description=\"Combine chip and pwm evaluations\")\n parser.add_argument('-e', '-eval_method', dest='eval_method', type=str, default='cumulative')\n parsed = parser.parse_args(argv[1:])\n return parsed\n\ndef errprint(st):\n sys.stderr.write(st + \"\\n\")\n\ndef main(argv):\n parsed = parse_args(argv)\n\n \"\"\" evaluate chip and pwm supports on binary gold standard \"\"\"\n # # file initialization\n # combined_method = 'all_methods'\n # dir_network = '/Users/KANG/cgscluster/proj_db_infer_pipe/output/fly_network_combined/'\n # dir_sub_quantile = 'weighed_quantile_combine_' + combined_method + '_baranski_9microarray/analysis_flynet_top20to200k/'\n # fns = []\n # eval_range = ['', '']\n # fns.append(dir_sub_quantile + 'analysis_chip_support.all_methods_combined_by_samples.txt')\n # fns.append(dir_sub_quantile + 'analysis_pwm_support.all_methods_combined_by_samples.txt')\n # fns.append(dir_sub_quantile + 'analysis_chip_support.all_methods_combined_by_experiments.txt')\n # fns.append(dir_sub_quantile + 'analysis_pwm_support.all_methods_combined_by_experiments.txt')\n # fns.append(dir_sub_quantile + 'analysis_chip_support.all_methods_combined_by_perturbations.txt')\n # fns.append(dir_sub_quantile + 'analysis_pwm_support.all_methods_combined_by_perturbations.txt')\n # fns.append(dir_sub_quantile + 'analysis_chip_support.all_methods_combined_equal.txt')\n # fns.append(dir_sub_quantile + 'analysis_pwm_support.all_methods_combined_equal.txt')\n\n # # figure setup\n # colors = ['k:', 'r', 'g', 'b', 'm']\n # labels = []\n # labels.append('chance')\n # labels.append('weighed by samples')\n # labels.append('weighed by experiments')\n # labels.append('weighed by perturbations')\n # labels.append('weighed equally')\n # # x_ticks = ['2k', '4k', '6k', '8k', '10k', '12k', '14k', '16k', '18k', '20k']\n # # x_ticks = ['10k', '20k', '30k', '40k', '50k', '60k', '70k', '80k', '90k', '100k']\n # x_ticks = ['20k', '40k', '60k', '80k', '100k', '120k', '140k', '160k', '180k', '200k']\n\n # # compute chip and pwm supports\n # [eval_chip, eval_pwm] = parse_binary_gold_standard(dir_network, fns, parsed.eval_method)\n\n \"\"\" evaluate chip and pwm supports on binding overlap gold standard \"\"\"\n # file initialization\n combined_method = 'all_methods'\n # combined_method = 'all_np'\n dir_network = '/Users/KANG/cgscluster/proj_db_infer_pipe/output/fly_network_combined/'\n dir_sub_quantile = 'weighed_model_average_' + combined_method + '_baranski_9microarray/analysis_binding_indep/chip.bp.np.set.sizes.top4to40k.'\n # dir_sub_quantile = 'weighed_quantile_combine_all_methods_baranski_9microarray/analysis_binding_overlap/chip.bp.np.set.sizes.top20to200k.'\n fns = []\n fns.append(dir_network + dir_sub_quantile + combined_method + '_combined_by_samples.txt')\n fns.append(dir_network + dir_sub_quantile + combined_method + '_combined_by_experiments.txt')\n fns.append(dir_network + dir_sub_quantile + combined_method + '_combined_by_perturbations.txt')\n # fns.append(dir_network + dir_sub_quantile + combined_method + '_combined_equal.txt')\n\n # figure setup\n colors = ['k:', 'r', 'g', 'b']\n labels = []\n labels.append('chance')\n labels.append('weighed by samples')\n labels.append('weighed by experiments')\n labels.append('weighed by perturbations')\n # labels.append('weighed equally')\n # x_ticks = ['2k', '4k', '6k', '8k', '10k', '12k', '14k', '16k', '18k', '20k']\n x_ticks = ['4k', '8k', '12k', '16k', '20k', '24k', '28k', '32k', '36k', '40k']\n # x_ticks = ['10k', '20k', '30k', '40k', '50k', '60k', '70k', '80k', '90k', '100k']\n # x_ticks = ['20k', '40k', '60k', '80k', '100k', '120k', '140k', '160k', '180k', '200k']\n\n # compute chip and pwm supports\n eval_chip = [None] * (len(fns)+1)\n eval_pwm = [None] * (len(fns)+1)\n [eval_chip[0], eval_pwm[0]] = parse_chance_binding_overlap(fns[0])\n for i in range(len(fns)):\n [eval_chip[i+1], eval_pwm[i+1]] = parse_binding_overlap(fns[i], parsed.eval_method)\n\n \"\"\"\" plot figures \"\"\"\n plt.figure(num=None, figsize=(15,8), dpi=80)\n plt.subplot(1,2,1)\n for i in range(len(eval_chip)):\n plt.plot(eval_chip[i], colors[i], label=labels[i])\n plt.xticks(range(len(eval_chip[0])), x_ticks)\n plt.xlabel('Predictions grouped by rank')\n plt.ylabel('Interactions supported by ChIP')\n plt.xlim(-1, len(eval_chip[0]))\n plt.ylim(0, .5)\n plt.legend(loc=\"upper right\")\n\n plt.subplot(1,2,2)\n for i in range(len(eval_pwm)):\n plt.plot(eval_pwm[i], colors[i], label=labels[i])\n plt.xticks(range(len(eval_pwm[0])), x_ticks)\n plt.xlabel('Predictions grouped by rank')\n plt.ylabel('Interactions supported by PWM')\n plt.xlim(-1, len(eval_pwm[0]))\n plt.ylim(0, .5)\n plt.legend(loc=\"upper right\")\n\n plt.show()\n\ndef parse_binary_gold_standard(fns, method):\n eval_chip = numpy.zeros([len(fns)/2+1, 10])\n eval_pwm = numpy.zeros([len(fns)/2+1, 10])\n\n for i in range(len(fns)/2):\n chip_support = numpy.loadtxt(fns[i*2])\n pwm_support = numpy.loadtxt(fns[i*2+1]) \n if i == 0:\n eval_chip[0,:] = chip_support[0,:]\n eval_pwm[0,:] = pwm_support[0,:]\n eval_chip[i+1,:] = chip_support[1,:]\n eval_pwm[i+1,:] = pwm_support[1,:]\n \n if method == 'cumulative':\n temp_eval_chip = numpy.zeros([len(fns)/2+1, 10])\n temp_eval_pwm = numpy.zeros([len(fns)/2+1, 10])\n for j in range(10):\n temp_eval_chip[:,j] = numpy.sum(eval_chip[:,0:(j+1)], axis=1)/(j+1)\n temp_eval_pwm[:,j] = numpy.sum(eval_pwm[:,0:(j+1)], axis=1)/(j+1)\n eval_chip = temp_eval_chip\n eval_pwm = temp_eval_pwm\n\n return [eval_chip, eval_pwm]\n\ndef parse_binding_overlap(fn, method):\n lines = open(fn, \"r\").readlines()\n chip = [0] * (len(lines))\n pwm = [0] * (len(lines))\n\n if method == \"cumulative\":\n for i in range(len(lines)):\n line = lines[i].split()\n chip[i] = float(line[5])/float(line[2])\n pwm[i] = float(line[4])/float(line[2])\n \n elif method == \"binned\":\n for i in range(len(lines)):\n line = lines[i].split()\n if i == 0:\n chip[i] = float(line[5])/float(line[2])\n pwm[i] = float(line[4])/float(line[2])\n else:\n chip[i] = (float(line[5]) - float(prevline[5]))/(float(line[2]) - float(prevline[2]))\n pwm[i] = (float(line[4]) - float(prevline[4]))/(float(line[2]) - float(prevline[2]))\n prevline = line \n\n return [chip, pwm]\n\ndef parse_chance_binding_overlap(fn):\n line = open(fn, 'r').readline()\n line = line.split()\n chip = [float(line[1])/float(line[8]) for _ in range(10)]\n pwm = [float(line[0])/float(line[7]) for _ in range(10)]\n\n return [chip, pwm]\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"temp/eval_weighed_model_average_baranski+9microarray.py","file_name":"eval_weighed_model_average_baranski+9microarray.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"59964106","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import ModuleList\nfrom transformers.modeling_bert import BertPreTrainedModel, BertModel\nfrom model.utils import Vocab\nfrom model.modules import BertFeature, Encoding, Interaction, FeatureExtractor\n\nclass BIIN(nn.Module):\n def __init__(self, config, vocab, hidden_size, enc_num_layers=1, ):\n super(BIIN, self).__init__()\n self._input = BertFeature(config, vocab)\n self._dropout = nn.Dropout(p=0.15)\n self._enc_num_layers = enc_num_layers\n\n assert len(hidden_size) == enc_num_layers\n if isinstance(hidden_size, list):\n input_dim = [768] + [hidden_size[0] * 2, hidden_size[1] * 2] # encoder is bidirectionL\n self._encoder = ModuleList([Encoding(i, h) for i, h in zip(input_dim, hidden_size)])\n self._extractor = FeatureExtractor(hidden_size[2] * 2) # encoder is bidirectionL\n else:\n self._encoder = Encoding(768, hidden_size)\n self._extractor = FeatureExtractor(hidden_size * 2) # encoder is bidirectionL\n\n self._interaction = Interaction()\n\n\n def forward(self, inputs):\n q_1, q_2 = inputs\n bert_1 = self._dropout(self._input(q_1)[1])\n bert_2 = self._dropout(self._input(q_2)[1])\n\n if self._enc_num_layers == 1:\n encoded_1 = self._encoder(bert_1)\n encoded_2 = self._encoder(bert_2)\n else:\n x_1 = bert_1\n x_2 = bert_2\n for i in range(len(self._encoder)):\n encoded_1 = self._encoder[i](x_1)\n encoded_2 = self._encoder[i](x_2)\n x_1 = encoded_1\n x_2 = encoded_2\n\n interaction_output = self._interaction(encoded_1, encoded_2)\n output = self._extractor(interaction_output)\n\n return output\n","sub_path":"model/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"168958073","text":"# -*- coding:utf-8 -*-\n\nimport CONSTANT\nfrom wrapper import keypair\nfrom wrapper.client import Client\nfrom tools import load_json\nfrom wrapper import builder as BUILDER\n\nconstant=CONSTANT.Constant('test')\n\n# fund a random account\ndef fund_a_random_account():\n seed=constant.SEED\n client=Client(seed,api_server=constant.API_SERVER)\n destination=keypair.Keypair.random()\n address=destination.address().decode().encode('ascii')\n print(address)\n client.fund(destination=address,amount=30)\n\n# fund an account\ndef fund_an_account(id,amount):\n seed=constant.SEED\n client=Client(seed,api_server=constant.API_SERVER)\n address=id\n print(address)\n client.fund(destination=address,amount=amount)\n\n# fund the first 100 accounts in keys.json\ndef fund_accounts_in_keys_dot_json(lower_bound=0, upper_bound=1):\n seed=constant.SEED\n client=Client(seed,api_server=constant.API_SERVER)\n builder=BUILDER.Builder(secret=seed)\n keys=None\n keys=load_json.file2json('keys.json')[lower_bound:upper_bound]\n for key in keys:\n address=key['public_key']\n print(address)\n builder.append_create_account_op(destination=address, starting_balance=100)\n builder.sign()\n builder.submit()\n\n\nif __name__=='__main__':\n import time\n # while True:\n # fund_a_random_account();\n # time.sleep(5)\n # fund_accounts_in_keys_dot_json(lower_bound=102, upper_bound=103)\n fund_an_account('GCHZDZXYLZ76XADS7735LK3OJUFZ2TBSXAR23YXKXCXXHUEEVT5C37PY',100000)\n","sub_path":"fund_account.py","file_name":"fund_account.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"251054794","text":"# -*- coding: cp936 -*-\nimport os\n\nif __name__==\"__main__\":\n\n## print(\"当前工作路径:\",os.getcwd())\n\n\n wellName=os.path.basename(os.getcwd())\n print ('Current WellName...'+wellName)\n fileName_wellHead=\"$wellHead#.txt\"\n fileName_NeedDeal=\"B6_new.txt\"\n openFileWrite=\"addWellHead.txt\"\n\n\n fileOpened_wellHead_dic=open(fileName_wellHead,'r')\n fileOpend_NeedDealJH=open(fileName_NeedDeal,'r')\n fileWrited=open(openFileWrite,'w')\n\n welllName_list=[]\n KB_list=[]\n X_list=[]\n Y_list=[]\n lineZone_list=[]\n \n\n \n for lineZone in fileOpened_wellHead_dic.readlines():\n if lineZone!=\"\":\n lineZone_list.append(lineZone)\n splitlineZone=lineZone.split()\n welllName_list.append(splitlineZone[0]) \n X_list.append(splitlineZone[1])\n Y_list.append(splitlineZone[2])\n KB_list.append(splitlineZone[3])\n\n \n lineIndex=0\n lineFlag=1\n\n for lineLayer in fileOpend_NeedDealJH.readlines():\n if lineLayer!=\"\":\n lineIndex+=1\n print(lineIndex)\n splitLine=lineLayer.split()\n originalValue=0\n if lineIndex>=1:\n jh=splitLine[0]\n for i in range(0,len(X_list)):\n if welllName_list[i]==jh :\n splitLine.append(X_list[i])\n splitLine.append(Y_list[i])\n splitLine.append(KB_list[i])\n lineLayer='\\t'.join(splitLine)+'\\n'\n fileWrited.write(lineLayer)\n\n fileWrited.close()\n \n","sub_path":"文件读写及处理/添加井位信息/添加井位信息.py","file_name":"添加井位信息.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"560260547","text":"import requests\nimport os\nimport json\nimport sys\n\n\nFILE_PATH = os.path.dirname(os.path.abspath(__file__))\nAPI_INFO_PATH = os.path.join(FILE_PATH, \"apiinfo.json\")\n\n\nclass APIException(Exception):\n pass\n\n\ndef _get_api_info():\n # Load the api key from file or get the api key from the user\n if not os.path.exists(API_INFO_PATH):\n api_url = input(\"Please input the API url: \").strip()\n api_key = input(\"Please input your given api key: \").strip()\n api_info = {\"api_key\": api_key, \"api_url\": api_url}\n with open(API_INFO_PATH, 'w') as f:\n f.write(json.dumps(api_info))\n print(f\"Successfully saved api key to {API_INFO_PATH}. Please delete/edit this file if you gave the wrong key.\")\n else:\n with open(API_INFO_PATH) as f:\n api_info = json.load(f)\n return api_info\n\n\ndef _get_api_url():\n # Load the api key from file or get the api key from the user\n if not os.path.exists(API_INFO_PATH):\n api_key = input(\"Please input your given api key: \").strip()\n with open(API_INFO_PATH, 'w') as f:\n f.write(api_key)\n print(\"Successfully saved api key to {}. Please delete/edit this file if you gave the wrong key.\".format(API_INFO_PATH))\n else:\n with open(API_INFO_PATH) as f:\n api_key = f.read().strip()\n return api_key\n\n\ndef score(predictions):\n \"\"\"\n Submit the given predictions to the API and score them\n\n Args:\n predictions (list): The predictions to submit.\n\n Example:\n score([1,2,3]) -> 0.12345\n\n Returns:\n float: The score.\n \"\"\"\n\n # Check that predictions has correct type\n if not isinstance(predictions, list):\n raise TypeError(\"Predictions must be a list, not {}\".format(type(predictions)))\n\n # Create the data object to send\n data = {\n \"predictions\": predictions\n }\n\n api_info = _get_api_info()\n\n # Assign the header values\n headers = {\n \"Content-Type\": \"application/json\",\n \"x-api-key\": api_info[\"api_key\"]}\n\n # Submit the request\n response = requests.post(api_info[\"api_url\"], data=json.dumps(data), headers=headers)\n\n try:\n return float(response.text)\n except:\n raise APIException(response.text)\n\n\ndef score_predictions_from_file(file_path):\n \"\"\"\n Load predictions from a file and score them. The file containing the predictions is expected to contain\n a prediction for each sample separated by commas.\n\n Example of valid file content: \"1,4,3,10,1,5\"\n\n Args:\n file_path (str): The path to the file.\n\n Returns:\n float: The score.\n \"\"\"\n with open(file_path, encoding='utf8') as f:\n predictions = [float(x) for x in f.read().strip().split(\",\")]\n return score(predictions)\n\n\nif __name__ == '__main__':\n x = [5] * 4000\n print(score(x))","sub_path":"score_predictions.py","file_name":"score_predictions.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"376262530","text":"import torch\r\nfrom torch import optim, nn\r\nimport torch.utils.data as Data\r\nimport torchvision\r\n\r\nimport argparse\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--cuda-able\", action=\"store_true\", default=False)\r\nargs = parser.parse_args()\r\n\r\nbatch_size = 100\r\nnum_epochs = 20\r\nlearning_rate = 1e-3\r\n\r\ntrain_dataset = torchvision.datasets.MNIST(\"./mnist/\", train=True, transform=torchvision.transforms.ToTensor(), download=False)\r\ntrain_loader = Data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\r\n\r\nclass RNN(nn.Module):\r\n def __init__(self, in_dim, hidden_dim, n_layer, n_class):\r\n super(RNN, self).__init__()\r\n self.lstm = nn.LSTM(in_dim, hidden_dim, n_layer, batch_first=True)\r\n self.classifier = nn.Linear(hidden_dim, n_class)\r\n \r\n def forward(self, x):\r\n out, _ = self.lstm(x)\r\n out = out[:, -1, :]\r\n out = self.classifier(out)\r\n return out\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() and args.cuda_able else \"cpu\")\r\n\r\nmodel = RNN(28, 128, 2, 10).to(device)\r\ncriterion = nn.CrossEntropyLoss()\r\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\r\n\r\nnum_samples = train_dataset.targets.size(0)\r\nnum_batches = num_samples // batch_size\r\nfor epoch in range(num_epochs):\r\n total_correct = 0\r\n total_loss = 0\r\n for i, (img, label) in enumerate(train_loader):\r\n img = img.squeeze(1).to(device)\r\n label = label.to(device)\r\n out = model(img)\r\n num_correct = (torch.argmax(out, dim=1) == label).sum()\r\n total_correct += num_correct\r\n loss = criterion(out, label)\r\n total_loss += loss\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n print(\"epoch[{} / {}], loss: {:.3f}, acc: {:.3f}\".format(epoch + 1, num_epochs, total_loss.item() / num_batches, total_correct.item() / num_samples))\r\n","sub_path":"pytorch-learning/RNN.py","file_name":"RNN.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"622426346","text":"x=1\nwynikN=0\nwynikP=0\nwhile x<50:\n if x%2!=0:\n wynikN=wynikN+x\n else:\n wynikP=wynikP+x\n x=x+1\nprint(\"Suma liczb parzystych to:\",wynikP,\".\",\"Suma liczb nieparzystych to: \",wynikN,\".\")","sub_path":"02-ControlStructures/02-17.py","file_name":"02-17.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"271537373","text":"#!/usr/bin/env python3\n\"\"\"Error Analysis module\"\"\"\nimport numpy as np\n\n\ndef specificity(confusion):\n \"\"\"Calculates the specificity for each class in a confusion matrix\n\n Args:\n confusion (numpy.ndarray): Is a confusion matrix of shape\n (classes, classes) where the row indices is the correct labels\n and the column indices represent the predicted label\n\n Returns:\n numpy.ndarray: Containing the specificity of each class of shape\n (classes,)\n\n \"\"\"\n FP = confusion.sum(axis=0) - np.diag(confusion)\n FN = confusion.sum(axis=1) - np.diag(confusion)\n TP = np.diag(confusion)\n TN = confusion.sum() - (FP + FN + TP)\n return TN / (TN + FP)\n","sub_path":"supervised_learning/0x04-error_analysis/3-specificity.py","file_name":"3-specificity.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"651100957","text":"'''\nTraining of one task.\n__Author__ == 'Haowen Xu'\n__Date__ == '08-25-2018'\n'''\n\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport importlib\nimport itertools\nimport matplotlib\nmatplotlib.use('Agg')\nimport time\nimport threading\nfrom multiprocessing import Pool\nimport numpy as np\nimport os\nimport pdb\nimport pickle\nimport subprocess\nimport sys\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport threading\nimport scipy.misc\nfrom skimage import color\nimport init_paths\nfrom models.sample_models import *\nfrom lib.data.synset import *\nimport scipy\nimport skimage\nimport skimage.io\nimport transforms3d\nimport math\nimport matplotlib.pyplot as plt\nfrom PIL import Image, ImageDraw, ImageFont\nfrom task_viz import *\nimport random\nimport models.architectures as architectures\nfrom data.load_ops import resize_rescale_image\nfrom data.load_ops import rescale_image\nimport utils\nimport lib.data.load_ops as load_ops\n\nparser = argparse.ArgumentParser(description='Viz Single Task')\n\nparser.add_argument('--source_tasks', dest='source_tasks')\nparser.set_defaults(source_tasks='NONE')\n\nparser.add_argument('--target_tasks', dest='target_tasks')\nparser.set_defaults(target_tasks='NONE')\n\nparser.add_argument('--img', dest='im_name')\nparser.set_defaults(im_name='NONE')\n\nparser.add_argument('--store', dest='store_name')\nparser.set_defaults(store_name='NONE')\n\nparser.add_argument('--store-rep', dest='store_rep', action='store_true')\nparser.set_defaults(store_rep=False)\n\nparser.add_argument('--store-pred', dest='store_pred', action='store_true')\nparser.set_defaults(store_pred=False)\n\nparser.add_argument('--on-screen', dest='on_screen', action='store_true')\nparser.set_defaults(on_screen=False)\n\ntf.logging.set_verbosity(tf.logging.ERROR)\n\nlist_of_tasks = 'autoencoder curvature denoise edge2d edge3d \\\nkeypoint2d keypoint3d colorization jigsaw \\\nreshade rgb2depth rgb2mist rgb2sfnorm \\\nroom_layout segment25d segment2d vanishing_point \\\nsegmentsemantic class_1000 class_places inpainting_whole'\nlist_of_tasks = list_of_tasks.split()\n\ndef generate_cfg(task):\n repo_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n CONFIG_DIR = os.path.join(repo_dir, 'experiments/final', task)\n ############## Load Configs ##############\n import utils\n import data.load_ops as load_ops\n from general_utils import RuntimeDeterminedEnviromentVars\n cfg = utils.load_config( CONFIG_DIR, nopause=True )\n #RuntimeDeterminedEnviromentVars.register_dict( cfg )\n #NOTE: Commented out by Haowen, only needed in reference setting\n #cfg['batch_size'] = 1\n #if 'batch_size' in cfg['encoder_kwargs']:\n # cfg['encoder_kwargs']['batch_size'] = 1\n cfg['model_path'] = os.path.join( repo_dir, 'temp', task, 'model.permanent-ckpt' )\n cfg['saved_model_path'] = os.path.join(cfg['saved_model_dir'],\n task, 'model.development-ckpt')\n cfg['root_dir'] = repo_dir\n cfg['max_steps'] = utils.get_max_steps(cfg['num_samples_epoch'], cfg)\n return cfg\n\ndef loading_fn_thread(inputs, thread_id, training_runners, cfg):\n loading_fn = inputs['load_and_enqueue']\n input_placeholder = inputs['input_placeholder']\n target_placeholder = inputs['target_placeholder']\n mask_placeholder = inputs['mask_placeholder']\n data_idx_placeholder = inputs['data_idx_placeholder']\n\n input_filepaths = ['/home/haowen/GitHub/taskonomy/taskbank/lib/data/unit_0_domain_rgb.json']\n unit_size = cfg['unit_size']\n num_samples_epoch = cfg['num_samples_epoch']\n enqueue_op = inputs['enqueue_op']\n\n sess = training_runners['sess']\n loading_fn(thread_id, sess, training_runners['coord'], input_filepaths,\n step=cfg['num_read_threads'],\n unit_size=unit_size,\n num_samples_epoch=num_samples_epoch,\n input_placeholder=input_placeholder,\n target_placeholder=target_placeholder,\n mask_placeholder=mask_placeholder,\n data_idx_placeholder=data_idx_placeholder,\n rs_dim=0,\n enqueue_op=enqueue_op,\n is_training=True,\n cfg=cfg)\n\ndef training_fn(model, inputs, training_runners, cfgs):\n sess = training_runners['sess']\n coord = training_runners['coord']\n sess.run(model['init_fn'])\n\n #for i in range(100):\n # sess.run(inputs['rgb2sfnorm']['target_batch'])\n # print(i)\n\n #coord.request_stop()\n #return\n\n #img = sess.run(inputs['target_batch'])\n #img = img[0:1,:,:,:]\n #target_name = 'assets/target_img.png'\n #simple_rescale_img(img, target_name)\n #coord.request_stop()\n #return\n\n train_step_fn = model['train_step_fn']\n train_step_kwargs = model['train_step_kwargs']\n train_op = model['train_op']\n last_save_time = time.time()\n count = 0\n while True:\n count += 1\n if count < 0:\n task = 'rgb2sfnorm'\n else:\n task = 'curvature'\n cfg = cfgs[task]\n\n g_losses, should_stop = train_step_fn(sess, model['model'], inputs,\n train_op, model['global_step'],\n task,\n return_accuracy=False,\n train_step_kwargs=train_step_kwargs)\n if count % 20 == 0:\n print('task:{}, step{}: g_losses: {}, should_stop: {}'.format(task, count, g_losses, should_stop))\n\n if should_stop:\n break\n cur_time = time.time()\n if cur_time - last_save_time > cfg['checkpoint_save_every_secs']:\n model[ 'saver_op' ].save(sess, cfg[ 'saved_model_path' ],\n global_step=count)\n last_save_time = cur_time\n\n coord.request_stop()\n\ndef train_to_task():\n import general_utils\n from general_utils import RuntimeDeterminedEnviromentVars\n\n tf.logging.set_verbosity(tf.logging.ERROR)\n\n args = parser.parse_args()\n\n source_tasks = args.source_tasks.split()\n target_tasks = args.target_tasks.split()\n\n cfgs = {}\n tasks = []\n for task in source_tasks:\n if task not in list_of_tasks:\n raise ValueError('Task not supported')\n cfgs[task] = generate_cfg(task)\n cfgs[task]['stid'] = 'source'\n tasks.append(task)\n for task in target_tasks:\n if task not in list_of_tasks:\n raise ValueError('Task not supported')\n cfgs[task] = generate_cfg(task)\n cfgs[task]['stid'] = 'target'\n tasks.append(task)\n\n\n general_utils = importlib.reload(general_utils)\n tf.reset_default_graph()\n training_runners = { 'sess': tf.InteractiveSession(), 'coord': tf.train.Coordinator() }\n\n ############## Set Up Inputs ##############\n # tf.logging.set_verbosity( tf.logging.INFO )\n setup_input_fn = utils.setup_input\n inputs = {}\n for task in source_tasks:\n inputs[task] = setup_input_fn( cfgs[task], is_training=True, use_filename_queue=False )\n for task in target_tasks:\n inputs[task] = setup_input_fn( cfgs[task], is_training=True, use_filename_queue=False )\n\n start_time = time.time()\n\n ############## Set Up Model ##############\n model = utils.setup_model_multi_decoders( inputs, cfgs, tasks, is_training=True )\n m = model[ 'model' ]\n cfg = cfgs[tasks[1]]\n model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )\n #print('Loading model from {}'.format(cfgs[tasks[0]]['saved_model_path']))\n #model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfgs[tasks[0]][ 'saved_model_path' ] )\n\n ############## Training Model ##############\n # TODO: Haowen added.\n #cur_time = time.time()\n #threads = []\n #for task in tasks:\n # cfg = cfgs[task]\n # threads = threads + [threading.Thread(target=loading_fn_thread,\n # args=(inputs[task], i, training_runners, cfg))\n # for i in range(cfg['num_read_threads'])]\n\n #threads.append(threading.Thread(target=training_fn,\n # args=(model, inputs, training_runners, cfgs)))\n #for t in threads:\n # t.start()\n #training_runners['coord'].join(threads)\n #print(time.time() - cur_time)\n\n ############## Test ##################\n task = tasks[1]\n cfg = cfgs[task]\n img = load_raw_image_center_crop( args.im_name )\n img = skimage.img_as_float(img)\n scipy.misc.toimage(np.squeeze(img), cmin=0.0, cmax=1.0).save(args.im_name)\n\n # Since we observe that areas with pixel values closes to either 0 or 1 sometimes overflows, we clip pixels value\n low_sat_tasks = 'autoencoder curvature denoise edge2d edge3d \\\n keypoint2d keypoint3d \\\n reshade rgb2depth rgb2mist rgb2sfnorm \\\n segment25d segment2d room_layout'.split()\n if task in low_sat_tasks:\n cfg['input_preprocessing_fn'] = load_ops.resize_rescale_image_low_sat\n\n if task == 'jigsaw' :\n img = cfg[ 'input_preprocessing_fn' ]( img, target=cfg['target_dict'][random.randint(0,99)],\n **cfg['input_preprocessing_fn_kwargs'] )\n else:\n img = cfg[ 'input_preprocessing_fn' ]( img, **cfg['input_preprocessing_fn_kwargs'] )\n\n #img = img[np.newaxis,:]\n img = [img for i in range(cfg['batch_size'])]\n\n #model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )\n\n predicted, representation = training_runners['sess'].run(\n [ m.decoder_outputs[task], m.encoder_output ], feed_dict={m.input_imgs: img} )\n predicted = predicted[0:1]\n representation = representation[0:1]\n\n if args.store_rep:\n s_name, file_extension = os.path.splitext(args.store_name)\n with open('{}.npy'.format(s_name), 'wb') as fp:\n np.save(fp, np.squeeze(representation))\n\n if args.store_pred:\n s_name, file_extension = os.path.splitext(args.store_name)\n with open('{}_pred.npy'.format(s_name), 'wb') as fp:\n np.save(fp, np.squeeze(predicted))\n\n if task == 'segment2d' or task == 'segment25d':\n segmentation_pca(predicted, args.store_name)\n return\n if task == 'colorization':\n single_img_colorize(predicted, img , args.store_name)\n return\n\n if task == 'curvature':\n curvature_single_image(predicted, args.store_name)\n return\n\n just_rescale = ['autoencoder', 'denoise', 'edge2d',\n 'edge3d', 'keypoint2d', 'keypoint3d',\n 'reshade', 'rgb2sfnorm' ]\n\n if task in just_rescale:\n simple_rescale_img(predicted, args.store_name)\n return\n\n just_clip = ['rgb2depth', 'rgb2mist']\n if task in just_clip:\n depth_single_image(predicted, args.store_name)\n return\n\n if task == 'inpainting_whole':\n inpainting_bbox(predicted, args.store_name)\n return\n\n if task == 'segmentsemantic':\n semseg_single_image( predicted, img, args.store_name)\n return\n\n if task in ['class_1000', 'class_places']:\n synset = get_synset(task)\n classification(predicted, synset, args.store_name)\n return\n\n if task == 'vanishing_point':\n _ = plot_vanishing_point_smoothed(np.squeeze(predicted), (np.squeeze(img) + 1. )/2., args.store_name, [])\n return\n\n if task == 'room_layout':\n mean = np.array([0.006072743318127848, 0.010272365569691076, -3.135909774145468,\n 1.5603802322235532, 5.6228218371102496e-05, -1.5669352793761442,\n 5.622875878174759, 4.082800262277375, 2.7713941642895956])\n std = np.array([0.8669452525283652, 0.687915294956501, 2.080513632043758,\n 0.19627420479282623, 0.014680602791251812, 0.4183827359302299,\n 3.991778013006544, 2.703495278378409, 1.2269185938626304])\n predicted = predicted * std + mean\n plot_room_layout(np.squeeze(predicted), (np.squeeze(img) + 1. )/2., args.store_name, [], cube_only=True)\n return\n\n if task == 'jigsaw':\n predicted = np.argmax(predicted, axis=1)\n perm = cfg[ 'target_dict' ][ predicted[0] ]\n show_jigsaw((np.squeeze(img) + 1. )/2., perm, args.store_name)\n return\n\n ############## Clean Up ##############\n #training_runners[ 'coord' ].request_stop()\n #training_runners[ 'coord' ].join()\n print(\"Done: {}\".format(config_name))\n\n ############## Reset graph and paths ##############\n tf.reset_default_graph()\n training_runners['sess'].close()\n return\n\nif __name__ == '__main__':\n train_to_task()\n\n","sub_path":"taskbank/tools/train_img_autoTransfer.py","file_name":"train_img_autoTransfer.py","file_ext":"py","file_size_in_byte":12706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"505640446","text":"# -*- coding: utf-8 -*-\nimport calendar\nfrom decimal import Decimal\nimport json\nimport jwt\nimport time\n\nfrom django.conf import settings\n\nimport fudge\nfrom fudge.inspector import arg\nimport mock\nfrom nose.tools import eq_\nfrom pyquery import PyQuery as pq\nimport waffle.models\n\nfrom addons.models import Addon\nimport amo\nimport amo.tests\nfrom amo.urlresolvers import reverse\nfrom market.models import PreApprovalUser\nfrom paypal import PaypalError\nfrom stats.models import Contribution\nfrom users.models import UserProfile\n\nfrom mkt.inapp_pay.models import (InappPayment, InappConfig, InappPayLog,\n InappImage)\n\n\nclass InappPaymentUtil:\n\n def make_contrib(self, **contrib_kw):\n payload = self.payload()\n uuid_ = '12345'\n kw = dict(addon_id=self.app.pk, amount=payload['request']['price'],\n source='', source_locale='en-US',\n currency=payload['request']['currency'],\n uuid=uuid_, type=amo.CONTRIB_INAPP_PENDING,\n paykey='some-paykey', user=self.user)\n kw.update(contrib_kw)\n return Contribution.objects.create(**kw)\n\n def make_payment(self, contrib=None):\n app_payment = self.payload()\n if not contrib:\n contrib = self.make_contrib()\n return InappPayment.objects.create(\n config=self.inapp_config,\n contribution=contrib,\n name=app_payment['request']['name'],\n description=app_payment['request']['description'],\n app_data=app_payment['request']['productdata'])\n\n def payload(self, app_id=None, exp=None, iat=None,\n typ='mozilla/payments/pay/v1', extra=None):\n if not app_id:\n app_id = self.app_id\n if not iat:\n iat = calendar.timegm(time.gmtime())\n if not exp:\n exp = iat + 3600 # expires in 1 hour\n req = {'price': '0.99',\n 'currency': 'USD',\n 'name': 'My bands latest album',\n 'description': '320kbps MP3 download, DRM free!',\n 'productdata': 'my_product_id=1234'}\n if extra:\n req.update(extra)\n return {\n 'iss': app_id,\n 'aud': settings.INAPP_MARKET_ID,\n 'typ': typ,\n 'exp': exp,\n 'iat': iat,\n 'request': req\n }\n\n\n@mock.patch.object(settings, 'DEBUG', True)\nclass PaymentTest(InappPaymentUtil, amo.tests.TestCase):\n fixtures = ['webapps/337141-steamcube', 'base/users']\n\n @mock.patch.object(settings, 'DEBUG', True)\n def setUp(self):\n self.app = self.get_app()\n cfg = self.inapp_config = InappConfig(addon=self.app,\n status=amo.INAPP_STATUS_ACTIVE)\n cfg.public_key = self.app_id = InappConfig.generate_public_key()\n self.app_secret = InappConfig.generate_private_key()\n cfg.save()\n cfg.set_private_key(self.app_secret)\n self.app.paypal_id = 'app-dev-paypal@theapp.com'\n self.app.save()\n\n def get_app(self):\n return Addon.objects.get(pk=337141)\n\n def request(self, app_secret=None, payload=None, **payload_kw):\n if not app_secret:\n app_secret = self.app_secret\n if not payload:\n payload = json.dumps(self.payload(**payload_kw))\n encoded = jwt.encode(payload, app_secret, algorithm='HS256')\n return unicode(encoded) # django always passes unicode\n\n\nclass PaymentViewTest(PaymentTest):\n\n def setUp(self):\n super(PaymentViewTest, self).setUp()\n waffle.models.Switch.objects.create(name='in-app-payments-ui',\n active=True)\n self.user = UserProfile.objects.get(email='regular@mozilla.com')\n assert self.client.login(username='regular@mozilla.com',\n password='password')\n\n\n@mock.patch.object(settings, 'DEBUG', True)\nclass PayFlowTest(PaymentViewTest):\n\n def setUp(self):\n super(PayFlowTest, self).setUp()\n PreApprovalUser.objects.create(user=self.user,\n paypal_key='fantasmic')\n\n def start(self, req=None, extra_request=None):\n if not req:\n payload = self.payload()\n if extra_request:\n payload['request'].update(extra_request)\n req = self.request(payload=json.dumps(payload))\n return self.client.get(reverse('inapp_pay.pay_start'),\n data=dict(req=req))\n\n\n@mock.patch.object(settings, 'DEBUG', True)\n@mock.patch('mkt.inapp_pay.tasks.fetch_product_image')\nclass TestPayStart(PayFlowTest):\n\n def test_missing_pay_request_on_start(self, fetch_prod_im):\n rp = self.client.get(reverse('inapp_pay.pay_start'))\n eq_(rp.status_code, 400)\n\n def test_pay_start(self, fetch_prod_im):\n rp = self.start()\n eq_(rp.status_code, 200)\n assert 'x-frame-options' in rp, \"Must deny with x-frame-options\"\n self.assertTemplateUsed(rp, 'inapp_pay/pay_start.html')\n\n log = InappPayLog.objects.get()\n eq_(log.action, InappPayLog._actions['PAY_START'])\n eq_(log.config.pk, self.inapp_config.pk)\n assert log.session_key, 'Unexpected session_key: %r' % log.session_key\n assert fetch_prod_im.delay.called, 'product image fetched'\n\n def test_not_logged_in(self, fetch_prod_im):\n self.client.logout()\n rp = self.start()\n eq_(rp.status_code, 200)\n self.assertTemplateUsed(rp, 'inapp_pay/login.html')\n\n def test_no_preapproval(self, fetch_prod_im):\n self.user.preapprovaluser.delete()\n rp = self.start()\n eq_(rp.status_code, 200)\n self.assertTemplateUsed(rp, 'inapp_pay/nowallet.html')\n\n def test_empty_preapproval(self, fetch_prod_im):\n self.user.preapprovaluser.update(paypal_key='')\n rp = self.start()\n eq_(rp.status_code, 200)\n self.assertTemplateUsed(rp, 'inapp_pay/nowallet.html')\n\n @fudge.patch('mkt.inapp_pay.models.inapp_cef.log')\n def test_pay_start_error(self, fetch_prod_im, cef):\n self.inapp_config.addon.support_url = 'http://friendlyapp.org/support'\n self.inapp_config.addon.support_email = 'help@friendlyapp.org'\n self.inapp_config.addon.save()\n\n def inspect_msg(msg):\n assert 'RequestVerificationError' in msg, (\n 'CEF log should have exception message')\n return True\n\n cef.expects_call().with_args(arg.any(), 'unknown',\n 'inapp_pay_error',\n arg.passes_test(inspect_msg),\n severity=arg.any())\n\n rp = self.start(req=self.request(app_secret='invalid'))\n eq_(rp.status_code, 200)\n doc = pq(rp.content)\n eq_(doc('h3').text(), 'Payment Error')\n self.assertContains(rp, 'mailto:help@friendlyapp.org')\n self.assertContains(rp, 'friendlyapp.org/support')\n\n log = InappPayLog.objects.get()\n eq_(log.action, InappPayLog._actions['EXCEPTION'])\n eq_(log.app_public_key, self.inapp_config.public_key)\n eq_(log.exception, InappPayLog._exceptions['RequestVerificationError'])\n assert log.session_key, 'Unexpected session_key: %r' % log.session_key\n assert not fetch_prod_im.delay.called, (\n 'product image not fetched on error')\n\n def test_pay_error_no_app_id(self, fetch_prod_im):\n self.inapp_config.addon.support_url = 'http://friendlyapp.org/support'\n self.inapp_config.addon.support_email = 'help@friendlyapp.org'\n self.inapp_config.addon.save()\n rp = self.start(req='')\n eq_(rp.status_code, 200)\n self.assertNotContains(rp, 'mailto:help@friendlyapp.org')\n self.assertNotContains(rp, 'friendlyapp.org/support')\n\n def test_pay_error_no_support(self, fetch_prod_im):\n self.inapp_config.addon.support_url = None\n self.inapp_config.addon.support_email = None\n self.inapp_config.addon.save()\n rp = self.start(req=self.request(app_secret='invalid'))\n eq_(rp.status_code, 200)\n self.assertNotContains(rp, 'mailto:help@friendlyapp.org')\n self.assertNotContains(rp, 'friendlyapp.org/support')\n\n @mock.patch.object(settings, 'INAPP_VERBOSE_ERRORS', True)\n def test_verbose_error(self, fetch_prod_im):\n rp = self.start(req=self.request(app_secret='invalid'))\n eq_(rp.status_code, 200)\n self.assertContains(rp, 'RequestVerificationError')\n\n\n@mock.patch.object(settings, 'DEBUG', True)\nclass TestPay(PaymentViewTest):\n\n def setUp(self):\n super(TestPay, self).setUp()\n self.complete_url = reverse('inapp_pay.pay_status',\n args=[self.inapp_config.pk, 'complete'])\n self.cancel_url = reverse('inapp_pay.pay_status',\n args=[self.inapp_config.pk, 'cancel'])\n self.netreq = mock.patch('mkt.inapp_pay.tasks.requests')\n self.netreq.start()\n\n def tearDown(self):\n super(TestPay, self).tearDown()\n self.netreq.stop()\n\n def assert_payment_done(self, payload, contrib_type):\n cnt = Contribution.objects.get(addon=self.app)\n eq_(cnt.addon.pk, self.app.pk)\n eq_(cnt.type, contrib_type)\n eq_(cnt.amount, Decimal(payload['request']['price']))\n eq_(cnt.currency, payload['request']['currency'])\n\n pmt = InappPayment.objects.get(contribution=cnt)\n eq_(pmt.config, self.inapp_config)\n eq_(pmt.name, payload['request']['name'])\n eq_(pmt.description, payload['request']['description'])\n eq_(pmt.app_data, payload['request']['productdata'])\n\n def test_missing_pay_request(self):\n rp = self.client.post(reverse('inapp_pay.pay'))\n eq_(rp.status_code, 400)\n\n def test_invalid_pay_request(self):\n rp = self.client.post(reverse('inapp_pay.pay'),\n data=dict(req=self.request(app_id='unknown')))\n eq_(rp.status_code, 200)\n doc = pq(rp.content)\n eq_(doc('h3').text(), 'Payment Error')\n\n @fudge.patch('paypal.get_paykey')\n def test_paykey_exception(self, get_paykey):\n get_paykey.expects_call().raises(PaypalError())\n res = self.client.post(reverse('inapp_pay.pay'),\n data=dict(req=self.request()))\n self.assertContains(res, 'Payment Error')\n eq_(InappPayLog.objects.get().action,\n InappPayLog._actions['PAY_ERROR'])\n\n @mock.patch.object(settings, 'INAPP_VERBOSE_ERRORS', True)\n @fudge.patch('paypal.get_paykey')\n def test_verbose_paypal_error(self, get_paykey):\n get_paykey.expects_call().raises(PaypalError())\n res = self.client.post(reverse('inapp_pay.pay'),\n data=dict(req=self.request()))\n self.assertContains(res, 'PaypalError')\n\n @fudge.patch('paypal.get_paykey')\n def test_no_preauth(self, get_paykey):\n payload = self.payload()\n (get_paykey.expects_call()\n .with_matching_args(addon_id=self.app.pk,\n amount=payload['request']['price'],\n currency=payload['request']['currency'],\n email=self.app.paypal_id)\n .returns(['some-pay-key', '']))\n req = self.request(payload=json.dumps(payload))\n res = self.client.post(reverse('inapp_pay.pay'), dict(req=req))\n assert 'some-pay-key' in res['Location'], (\n 'Unexpected redirect: %s' % res['Location'])\n\n log = InappPayLog.objects.get()\n eq_(log.action, InappPayLog._actions['PAY'])\n eq_(log.config.pk, self.inapp_config.pk)\n\n self.assert_payment_done(payload, amo.CONTRIB_INAPP_PENDING)\n\n @fudge.patch('paypal.check_purchase')\n @fudge.patch('paypal.get_paykey')\n @fudge.patch('mkt.inapp_pay.tasks.payment_notify')\n def test_preauth_ok(self, check_purchase, get_paykey, payment_notify):\n payload = self.payload()\n\n get_paykey.expects_call().returns(['some-pay-key', 'COMPLETED'])\n check_purchase.expects_call().returns('COMPLETED')\n payment_notify.expects('delay').with_args(arg.any()) # pay ID to-be\n\n req = self.request(payload=json.dumps(payload))\n self.client.post(reverse('inapp_pay.pay'), dict(req=req))\n\n logs = InappPayLog.objects.all().order_by('created')\n eq_(logs[0].action, InappPayLog._actions['PAY'])\n eq_(logs[1].action, InappPayLog._actions['PAY_COMPLETE'])\n\n @fudge.patch('paypal.check_purchase')\n @fudge.patch('paypal.get_paykey')\n def test_unverified_preauth(self, check_purchase, get_paykey):\n get_paykey.expects_call().returns(['some-pay-key', 'COMPLETED'])\n check_purchase.expects_call().returns('') # unverified preauth\n res = self.client.post(reverse('inapp_pay.pay'),\n dict(req=self.request()))\n assert 'some-pay-key' in res['Location'], (\n 'Unexpected redirect: %s' % res['Location'])\n eq_(Contribution.objects.get().type, amo.CONTRIB_INAPP_PENDING)\n\n @fudge.patch('mkt.inapp_pay.tasks.payment_notify')\n def test_pay_complete(self, notify_app):\n cnt = self.make_contrib()\n payment = self.make_payment(contrib=cnt)\n notify_app.expects('delay').with_args(payment.pk)\n res = self.client.get(self.complete_url, {'uuid': cnt.uuid})\n eq_(res.status_code, 200)\n #self.assertContains(res, 'Payment received')\n cnt = Contribution.objects.get(pk=cnt.pk)\n eq_(cnt.type, amo.CONTRIB_INAPP)\n eq_(InappPayLog.objects.get().action,\n InappPayLog._actions['PAY_COMPLETE'])\n\n @fudge.patch('mkt.inapp_pay.views.inapp_cef.log')\n def test_invalid_contrib_uuid(self, cef):\n cef.expects_call().with_args(arg.any(), self.inapp_config.addon,\n 'inapp_pay_status', arg.any(),\n severity=arg.any())\n res = self.client.get(self.complete_url, {'uuid': 'invalid-uuid'})\n self.assertContains(res, 'Payment Error')\n\n def test_non_ascii_invalid_uuid(self):\n res = self.client.get(self.complete_url, {'uuid': u'Азәрбајҹан'})\n self.assertContains(res, 'Payment Error')\n\n def test_missing_uuid(self):\n res = self.client.get(self.complete_url)\n self.assertContains(res, 'Payment Error')\n\n\n@mock.patch.object(settings, 'DEBUG', True)\n@mock.patch('mkt.inapp_pay.tasks.fetch_product_image')\nclass TestProductImage(PayFlowTest):\n\n def setUp(self):\n super(TestProductImage, self).setUp()\n self.image_url = '/my/image.jpg'\n InappImage.objects.create(config=self.inapp_config,\n image_url=self.image_url,\n valid=True)\n\n def start(self, req=None, extra_request=None):\n if not extra_request:\n extra_request = {'imageURL': self.image_url}\n return super(TestProductImage, self).start(req=req,\n extra_request=extra_request)\n\n def test_show_image(self, fetch_image):\n resp = self.start()\n doc = pq(resp.content)\n eq_(doc('.product-details img').attr('src'),\n self.inapp_config.image_url(self.image_url))\n\n def test_show_default(self, fetch_image):\n InappImage.objects.all().delete()\n resp = self.start()\n doc = pq(resp.content)\n eq_(doc('.product-details img').attr('src'),\n InappImage.default_image_url())\n\n def test_handle_multiple(self, fetch_image):\n InappImage.objects.create(config=self.inapp_config,\n image_url='/some/other.jpg',\n valid=True)\n resp = self.start()\n doc = pq(resp.content)\n eq_(doc('.product-details img').attr('src'),\n self.inapp_config.image_url(self.image_url))\n","sub_path":"mkt/inapp_pay/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":16246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"335740228","text":"#-*- encoding: utf-8 -*-\nimport sys\nr=sys.stdin.readline\n\nB_r, B_c = map(int, r().split()) # 베시의 좌표\nD_r, D_c = map(int, r().split()) # 데이지의 좌표\nJ_r, J_c = map(int, r().split()) # 존의 좌표\n\ndistance_D = abs(J_r - D_r) + abs(J_c - D_c) # 데이시가 존에게 가는 데 걸리는 시간\ndistance_B = abs(J_r - B_r) + abs(J_c - B_c) # 베시가 존에게 가는 데 걸리는 시간\n\ndistance_B -= min(abs(J_r - B_r), abs(J_c - B_c))\n\nprint('bessie' if distance_B < distance_D else 'daisy' if distance_B > distance_D else 'tie')\n","sub_path":"Algorithm/Baekjoon/16431 베이시와 데이지/16431.py","file_name":"16431.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"330155134","text":"import copy\nfrom typing import Dict, Tuple\n\nimport orjson\n\nfrom drepr.models import ResourceType, DRepr, LiteralNode, DataNode, Attr, Path, \\\n IndexExpr, Sorted, Edge, Resource, ClassNode, RangeAlignment, ResourceData, ResourceDataString\n\n\ndef patch(repr: DRepr, resources: Dict[str, ResourceData]) -> Tuple[DRepr, Dict[str, ResourceData]]:\n \"\"\"\n This patch turns classes that have only static properties to have one attribute in order to keep the\n algorithm implemented in Rust simple and fast\n \"\"\"\n patch_classes = []\n for u in repr.sm.iter_class_nodes():\n if all(isinstance(repr.sm.nodes[e.target_id], LiteralNode) for e in repr.sm.iter_outgoing_edges(u.node_id)):\n patch_classes.append(u)\n\n if len(patch_classes) > 0:\n repr = copy.deepcopy(repr)\n resources = {k: resources[k] for k in resources}\n\n resource_id = '__static_patch_resource__'\n assert resource_id not in resources\n resource_data = {}\n\n existing_attrs = {a.id for a in repr.attrs}\n for u in patch_classes:\n replace_edge = None\n for e in repr.sm.iter_outgoing_edges(u.node_id):\n if e.label == 'drepr:uri':\n replace_edge = e\n break\n replace_edge = e\n v = repr.sm.nodes[replace_edge.target_id]\n\n new_attr_id = f'a87k219da_{u.node_id}_{replace_edge.edge_id}_{v.node_id}'\n assert new_attr_id not in existing_attrs\n resource_data[new_attr_id] = v.value\n\n # remove literal node\n repr.sm.remove_node(replace_edge.target_id)\n # repr.sm.edges.pop(replace_edge.edge_id)\n\n # add new attribute and link to it\n repr.attrs.append(Attr(new_attr_id, resource_id, Path([IndexExpr(new_attr_id)]), [], unique=True, sorted=Sorted.Ascending))\n v = DataNode(f'dnode:{new_attr_id}', new_attr_id)\n repr.sm.nodes[v.node_id] = v\n repr.sm.edges[replace_edge.edge_id] = Edge(replace_edge.edge_id, replace_edge.source_id, v.node_id, replace_edge.label, is_subject=True)\n\n # add alignment with incoming class, add all attributes since we don't know which attribute will be the subject\n for e in repr.sm.iter_incoming_edges(u.node_id):\n if isinstance(repr.sm.nodes[e.source_id], ClassNode):\n for ie in repr.sm.iter_outgoing_edges(e.source_id):\n if isinstance(repr.sm.nodes[ie.target_id], DataNode):\n repr.aligns.append(RangeAlignment(repr.sm.nodes[ie.target_id].attr_id, new_attr_id, []))\n for e in repr.sm.iter_outgoing_edges(u.node_id):\n if isinstance(repr.sm.nodes[e.target_id], ClassNode):\n for ie in repr.sm.iter_outgoing_edges(e.target_id):\n if isinstance(repr.sm.nodes[ie.target_id], DataNode):\n repr.aligns.append(RangeAlignment(new_attr_id, repr.sm.nodes[ie.target_id].attr_id, []))\n\n repr.resources.append(Resource(resource_id, ResourceType.JSON))\n resources[resource_id] = ResourceDataString(orjson.dumps(resource_data))\n\n return repr, resources\n\n\n","sub_path":"python/drepr/patches/static_class_patch.py","file_name":"static_class_patch.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"466360555","text":"import psycopg2\nfrom sqlalchemy import create_engine\nimport os\nfrom os import environ\nimport secret\n\n# There are two different manners in which python can connect to the postgres db. One of them is psycopg2 and the other is sqlalchemy. There are minor differences to both of them.\n\n# This is the psycopg2 method\n\ntry:\n connection = psycopg2.connect(\n user = secret.db_user,\n password = secret.db_password,\n host = secret.db_host,\n port = secret.db_port,\n database = secret.db_database)\n cursor = connection.cursor()\n # Print PostgreSQL Connection properties\n print(\"Connection is successful\")\n print('PostgreSQL database version:')\n cursor.execute('SELECT version()')\n db_version = cursor.fetchone()\n print(f\"db_version is {db_version}\")\n cursor.close()\n connection.close()\n print('Database connection closed.')\n\nexcept (Exception, psycopg2.Error) as error :\n print (\"Error while connecting to PostgreSQL\", error)\n\n\n# This is the sqlalchemy method\n\n# user_database_url = environ.get('DATABASE_URL', secret.user_database_url)\n# engine = create_engine(user_database_url)\n# connection = engine.connect()\n# print(\"Connection using sqlalchemy is also successful\")\n# result = connection.execute(\"select * from users\")\n# for row in result:\n# print(\"users:\", row['email'])\n# connection.close()\n","sub_path":"postgres_connection.py","file_name":"postgres_connection.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"544891415","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 5 01:43:56 2018\n\nPython Package For transverse field ising model (PPFtfim)\n\nM represents the Hamiltonian\n\n\n@author: Wang, Jian\n\"\"\"\n\nimport numpy as np\nfrom pf import pf # the Python Package of Assessing Pfaffian (PPAP)\nimport time # for test \n\n\n\nclass tfim(object):\n def __init__(self,M): \n # each M gives quenched disorder Hamiltonian configuration, \n # in case of disorder average, this object need to be constructed multiple times\n # with different Ms satisfying some distribution.\n self.M=M # M is an L by L matrix, contains all parameters of a chain\n self.L=M.shape[0] # L is the length of 1 dimensional Ising chain\n self.u, self.s, self.vh = np.linalg.svd(M)\n \n self.AABBt0=self.aux_matrix_dynamics(0).real\n \n\n \n def aux_matrix_dynamics_block_for_test(self,t):\n # this is for test purpose\n A1=np.zeros((self.L, self.L), dtype=complex)\n A2=A1+0\n A3=A1+0\n A4=A1+0\n for i in range(self.L):\n for j in range(self.L):\n A1[i,j]=(i+1)+(j+1)*1j\n A2[i,j]=(i+1)-(j+1)*1j\n A3[i,j]=-(i+1)+(j+1)*1j\n A4[i,j]=-(i+1)-(j+1)*1j\n \n for i in range(self.L):\n for j in range(self.L):\n A1[i,j]=1+t\n A2[i,j]=2+t\n A3[i,j]=3+t\n A4[i,j]=4+t\n A12=np.concatenate((A1, A2), axis=1)\n A34=np.concatenate((A3, A4), axis=1)\n return np.concatenate((A12,A34),axis=0) # 这里之所以没有乘以2,参见“推导3”\n \n def aux_matrix_dynamics(self,t):\n W=np.diag(np.exp(-2.0*1j*t*self.s))\n A1=np.matmul(np.matmul(self.vh.transpose(), W), self.vh)\n A2=np.matmul(np.matmul(self.vh.transpose(), W), self.u.transpose())\n A3=np.matmul(np.matmul(self.u, W), self.vh)\n A4=np.matmul(np.matmul(self.u, W), self.u.transpose())\n A12=np.concatenate((A1, A2), axis=1)\n A34=np.concatenate((A3, A4), axis=1)\n return np.concatenate((A12,-A34),axis=0) # 这里之所以没有乘以2,参见“推导3”\n \n def aux_pfaffian_constructor_t(self,i,j,t):\n # i,j are integer\n # 0=jnew:\n S[inew,jnew]=0\n S=S-S.transpose()\n return S\n\n\n def aux_pfaffian_constructor_t_AABB(self,i,j,t,AABB):\n S=np.zeros((2*(i+j-1),2*(i+j-1)), dtype=complex) # DEBUG, dtype=complex can not miss, otherwise only cast to real part\n\n Mt=AABB[t,:,:]\n #M0=self.aux_matrix_dynamics(0.0) # NEED TO BE IMPROVED\n M0=self.AABBt0\n \n AtA=Mt[0:self.L,0:self.L]\n AtB=Mt[0:self.L,self.L:]\n BtA=Mt[self.L:,0:self.L]\n BtB=Mt[self.L:,self.L:]\n \n AA=M0[0:self.L,0:self.L]\n AB=M0[0:self.L,self.L:]\n BA=M0[self.L:,0:self.L]\n BB=M0[self.L:,self.L:]\n #print(BB.shape)\n\n a=2*i-1\n b=2*(i+j-1)\n \n S[0:a:2,0:a:2]=AA[0:i,0:i]\n S[0:a:2,1:a:2]=AB[0:i,0:(i-1)]\n S[1:a:2,0:a:2]=BA[0:(i-1),0:i]\n S[1:a:2,1:a:2]=BB[0:(i-1),0:(i-1)]\n\n S[a:b:2,a:b:2]=AA[0:j,0:j]\n S[a:b:2,(a+1):b:2]=AB[0:j,0:(j-1)]\n S[(a+1):b:2,a:b:2]=BA[0:(j-1),0:j]\n S[(a+1):b:2,(a+1):b:2]=BB[0:(j-1),0:(j-1)]\n \n S[0:a:2,a:b:2]=AtA[0:i,0:j]\n S[0:a:2,(a+1):b:2]=AtB[0:i,0:(j-1)]\n S[1:a:2,a:b:2]=BtA[0:(i-1),0:j]\n S[1:a:2,(a+1):b:2]=BtB[0:(i-1),0:(j-1)]\n \n for inew in range(b):\n for jnew in range(b):\n if inew>=jnew:\n S[inew,jnew]=0\n S=S-S.transpose()\n return S\n\n\n def aux_pfaffian_constructor(self,i,j):\n S=np.zeros((2*(i+j-1),2*(i+j-1))) \n M0=self.AABBt0\n AA=M0[0:self.L,0:self.L]\n AB=M0[0:self.L,self.L:]\n BA=M0[self.L:,0:self.L]\n BB=M0[self.L:,self.L:]\n a=2*i-1\n b=2*(i+j-1) \n S[0:a:2,0:a:2]=AA[0:i,0:i]\n S[0:a:2,1:a:2]=AB[0:i,0:(i-1)]\n S[1:a:2,0:a:2]=BA[0:(i-1),0:i]\n S[1:a:2,1:a:2]=BB[0:(i-1),0:(i-1)]\n S[a:b:2,a:b:2]=AA[0:j,0:j]\n S[a:b:2,(a+1):b:2]=AB[0:j,0:(j-1)]\n S[(a+1):b:2,a:b:2]=BA[0:(j-1),0:j]\n S[(a+1):b:2,(a+1):b:2]=BB[0:(j-1),0:(j-1)] \n S[0:a:2,a:b:2]=AA[0:i,0:j]\n S[0:a:2,(a+1):b:2]=AB[0:i,0:(j-1)]\n S[1:a:2,a:b:2]=BA[0:(i-1),0:j]\n S[1:a:2,(a+1):b:2]=BB[0:(i-1),0:(j-1)]\n \n for inew in range(b):\n for jnew in range(b):\n if inew>=jnew:\n S[inew,jnew]=0\n S=S-S.transpose()\n return S\n \n def correlator_equal_time(self,i,j):\n S=self.aux_pfaffian_constructor(i,j)\n return np.real(pf(S))\n \n def correlator_dynamics(self,i,t,j):\n S=self.aux_pfaffian_constructor_t(i,j,t)\n return pf(S)\n\n def correlator_dynamics_AABB(self,i,t,j,AABB):\n S=self.aux_pfaffian_constructor_t_AABB(i,j,t,AABB)\n return pf(S)\n \n def correlator_equal_time_Matrix(self):\n output=np.zeros((self.L, self.L))\n for i in range(self.L):\n for j in range(i,self.L):\n output[i,j]=self.correlator_equal_time(i+1,j+1)\n # attension (1) i,j runs over all sites, i,j is not the ith or jth site\n # attension (2) equal time correlator suppose to be real\n \n for i in range(self.L):\n for j in range(i):\n output[i,j]=output[j,i]\n return output\n \n def correlator_dynamics_sector(self,i,j,dt,tSteps):\n # return a complex matrix of dimension (tSteps,(i-j+1))\n # 1<=i None:\n subprocess.run(['echo', string])\n\n\nasync def get_layout(i3: Connection, e: Event) -> None:\n root = await i3.get_tree()\n win = root.find_focused()\n if win.type == 'con' and win.name:\n if win.parent.layout.startswith('split'):\n echo('﩯')\n elif win.parent.layout == 'stacked':\n echo('类')\n elif win.parent.layout == 'tabbed':\n echo('裡')\n else:\n echo('')\n\n\nasync def run() -> None:\n i3 = await Connection().connect()\n i3.on(Event.TICK, get_layout)\n i3.on(Event.WINDOW, get_layout)\n i3.on(Event.WORKSPACE, get_layout)\n while True:\n await asyncio.sleep(1)\n\n\nasyncio.get_event_loop().run_until_complete(run())\n","sub_path":"i3/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"521878780","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.ficha_list, name = 'ficha_list'),\n\turl(r'^crear/$', views.crear_ficha, name = 'crear_ficha'),\n\turl(r'^especialista$', views.especialista, name = 'especialista'),\n\turl(r'^especialista/borrar/(?P\\d+)$', views.borrar_ficha, name = 'borrar_ficha'),\n\turl(r'^ficha/editar/(?P\\d+)/$', views.editar_ficha, name = 'editar_ficha'),\n]","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"639002214","text":"# Copyright 2016 Canonical Ltd\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport mock\n\nfrom nova import context\nfrom nova import exception\nfrom nova.network import model as network_model\nfrom nova import test\nfrom nova.tests.unit import fake_instance\nfrom nova.virt.lxd import vif\n\nGATEWAY = network_model.IP(address='101.168.1.1', type='gateway')\nDNS_BRIDGE = network_model.IP(address='8.8.8.8', type=None)\nSUBNET = network_model.Subnet(\n cidr='101.168.1.0/24', dns=[DNS_BRIDGE], gateway=GATEWAY,\n routes=None, dhcp_server='191.168.1.1')\nNETWORK = network_model.Network(\n id='network-id-xxx-yyy-zzz', bridge='br0', label=None,\n subnets=[SUBNET], bridge_interface=None, vlan=99, mtu=1000)\nVIF = network_model.VIF(\n id='0123456789abcdef', address='ca:fe:de:ad:be:ef',\n network=NETWORK, type=network_model.VIF_TYPE_OVS,\n devname='tap-012-345-678', ovs_interfaceid='9abc-def-000')\nINSTANCE = fake_instance.fake_instance_obj(\n context.get_admin_context(), name='test')\n\n\nclass GetVifDevnameTest(test.NoDBTestCase):\n \"\"\"Tests for get_vif_devname.\"\"\"\n\n def test_get_vif_devname_devname_exists(self):\n an_vif = {\n 'id': '0123456789abcdef',\n 'devname': 'oth1',\n }\n\n devname = vif.get_vif_devname(an_vif)\n\n self.assertEqual('oth1', devname)\n\n def test_get_vif_devname_devname_nonexistent(self):\n an_vif = {\n 'id': '0123456789abcdef',\n }\n\n devname = vif.get_vif_devname(an_vif)\n\n self.assertEqual('nic0123456789a', devname)\n\n\nclass GetConfigTest(test.NoDBTestCase):\n \"\"\"Tests for get_config.\"\"\"\n\n def setUp(self):\n super(GetConfigTest, self).setUp()\n self.CONF_patcher = mock.patch('nova.virt.lxd.vif.conf.CONF')\n self.CONF = self.CONF_patcher.start()\n self.CONF.firewall_driver = 'nova.virt.firewall.NoopFirewallDriver'\n\n def tearDown(self):\n super(GetConfigTest, self).tearDown()\n self.CONF_patcher.stop()\n\n def test_get_config_bad_vif_type(self):\n \"\"\"Unsupported vif types raise an exception.\"\"\"\n an_vif = network_model.VIF(\n id='0123456789abcdef', address='ca:fe:de:ad:be:ef',\n network=NETWORK, type='invalid',\n devname='tap-012-345-678', ovs_interfaceid='9abc-def-000')\n\n self.assertRaises(\n exception.NovaException, vif.get_config, an_vif)\n\n def test_get_config_bridge(self):\n expected = {'bridge': 'br0', 'mac_address': 'ca:fe:de:ad:be:ef'}\n an_vif = network_model.VIF(\n id='0123456789abcdef', address='ca:fe:de:ad:be:ef',\n network=NETWORK, type='bridge',\n devname='tap-012-345-678', ovs_interfaceid='9abc-def-000')\n\n config = vif.get_config(an_vif)\n\n self.assertEqual(expected, config)\n\n def test_get_config_ovs_bridge(self):\n expected = {\n 'bridge': 'br0', 'mac_address': 'ca:fe:de:ad:be:ef'}\n an_vif = network_model.VIF(\n id='0123456789abcdef', address='ca:fe:de:ad:be:ef',\n network=NETWORK, type='ovs',\n devname='tap-012-345-678', ovs_interfaceid='9abc-def-000')\n\n config = vif.get_config(an_vif)\n\n self.assertEqual(expected, config)\n\n def test_get_config_ovs_hybrid(self):\n self.CONF.firewall_driver = 'AnFirewallDriver'\n\n expected = {\n 'bridge': 'qbr0123456789a', 'mac_address': 'ca:fe:de:ad:be:ef'}\n an_vif = network_model.VIF(\n id='0123456789abcdef', address='ca:fe:de:ad:be:ef',\n network=NETWORK, type='ovs',\n devname='tap-012-345-678', ovs_interfaceid='9abc-def-000')\n\n config = vif.get_config(an_vif)\n\n self.assertEqual(expected, config)\n\n def test_get_config_tap(self):\n expected = {'mac_address': 'ca:fe:de:ad:be:ef'}\n an_vif = network_model.VIF(\n id='0123456789abcdef', address='ca:fe:de:ad:be:ef',\n network=NETWORK, type='tap',\n devname='tap-012-345-678', ovs_interfaceid='9abc-def-000')\n\n config = vif.get_config(an_vif)\n\n self.assertEqual(expected, config)\n\n\nclass LXDGenericVifDriverTest(test.NoDBTestCase):\n \"\"\"Tests for LXDGenericVifDriver.\"\"\"\n\n def setUp(self):\n super(LXDGenericVifDriverTest, self).setUp()\n self.vif_driver = vif.LXDGenericVifDriver()\n\n @mock.patch('nova.virt.lxd.vif.os_vif')\n def test_plug(self, os_vif):\n self.vif_driver.plug(INSTANCE, VIF)\n\n self.assertEqual(\n 'tap-012-345-678', os_vif.plug.call_args[0][0].vif_name)\n self.assertEqual(\n 'instance-00000001', os_vif.plug.call_args[0][1].name)\n\n @mock.patch('nova.virt.lxd.vif.os_vif')\n def test_unplug(self, os_vif):\n self.vif_driver.unplug(INSTANCE, VIF)\n\n self.assertEqual(\n 'tap-012-345-678', os_vif.unplug.call_args[0][0].vif_name)\n self.assertEqual(\n 'instance-00000001', os_vif.unplug.call_args[0][1].name)\n","sub_path":"nova/tests/unit/virt/lxd/test_vif.py","file_name":"test_vif.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"85663419","text":"\n\"\"\"\nModule Docstring\n\"\"\"\n\n__author__ = \"Enrico Maiorino\"\n__version__ = \"0.1.0\"\n__license__ = \"MIT\"\n\nimport matplotlib.pyplot as plt\nimport argparse\nimport sys\nimport numpy as np\nsys.path.append('../')\nfrom funcs import utils\nfrom funcs.utils import non_nan\nfrom os import listdir\nfrom os.path import join, exists\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description='Plot coexpression boxplots in healthy conditions')\nparser.add_argument('--config', type=str, default=None, help='Configuration file')\nparser.add_argument('--dry', action='store_true', help='Dry run')\nparser.add_argument('--no_show', action='store_true', help='Do not show plots')\nparser.add_argument('--out_file', default=None, help='Override output filepath')\nargs = parser.parse_args()\n\nif args.out_file is None:\n out_file = '../gendata/plots/coexpr_healthy_boxplots.pdf'\nelse:\n out_file = args.out_file\n\nif args.config is not None:\n config = utils.read_config(args.config)\n\nboxprops={'linewidth':1,'zorder':3}\nwhiskerprops={'linewidth':1,'zorder':3}\ncapprops={'linewidth':1,'zorder':3}\nmedianprops={'color':'red', 'linewidth':1.5,'zorder':6}\nflierprops={'marker':'o','markeredgecolor':'lightgray','zorder':1}\nlabels = ['Type A', 'Type B', 'FC']\nshort_labels = ['TA', 'TB', 'FC']\nshortest_labels = ['A','B','F']\n\ndef create_labels(groups):\n out_labels = []\n if len(groups) <= 2:\n short_level = 0\n elif len(groups) <= 4:\n short_level = 1\n else:\n short_level = 2\n for g in groups:\n if short_level==0:\n newtriple = list(labels)\n newtriple[1] = utils.trim_text(newtriple[1] + '\\n' + g.replace(' ','\\n',1), 40)\n elif short_level==1:\n newtriple = list(short_labels)\n newtriple[1] = newtriple[1] + '\\n' + g.split(' ')[0] + '...'\n else:\n newtriple = list(shortest_labels)\n newtriple[1] = newtriple[1] + '\\n' + g.split(' ')[0][:3]\n out_labels += newtriple\n return out_labels\n\nalldata = {}\nallgroups = {}\nmaxpvals = {}\nfor i,gseid in enumerate([gseid_ for gseid_ in listdir('../data/expression') if gseid_.startswith('GSE')]):\n maxpvals[gseid] = 0\n alldata[gseid] = {}\n groups = pd.read_csv(join('../data/expression', gseid,'processed/group_labels.tsv'), index_col='group_name', sep='\\t')\n allgroups[gseid] = groups[groups.group_condition=='healthy']\n for j,group in enumerate(allgroups[gseid].index.tolist()):\n alldata[gseid][group] = {}\n alldata[gseid][group]['fc'] = utils.non_nan(np.loadtxt('../gendata/coexpression/{}/seqcorrs_{}_1000.dat'.format(gseid, group)))\n alldata[gseid][group]['rdma'] = utils.non_nan(np.loadtxt('../gendata/coexpression/{}/seqcorrs_{}_rdm_a_1000.dat'.format(gseid, group)))\n alldata[gseid][group]['rdmb'] = utils.non_nan(np.loadtxt('../gendata/coexpression/{}/seqcorrs_{}_rdm_b_1000.dat'.format(gseid, group)))\n pval_a = utils.mwpval(alldata[gseid][group]['fc'],alldata[gseid][group]['rdma'])\n pval_b = utils.mwpval(alldata[gseid][group]['fc'], alldata[gseid][group]['rdmb'])\n maxpvals[gseid] = max([maxpvals[gseid], pval_a, pval_b])\n\n#sorted_gseid = [k for k in sorted(maxpvals, key=maxpvals.get, reverse=False)]\nsorted_gseid = maxpvals.keys()\n\nplt.figure(figsize=[20,10])\nfor i,gseid in enumerate(sorted_gseid):\n plotdata = []\n groups = allgroups[gseid]\n pos = np.arange(1,4) # 1,2,3\n plt.subplot(4, 5, i + 1)\n if len(groups):\n for j,group in enumerate(groups.index.tolist()):\n gpname = groups.loc[group]\n corrvals = alldata[gseid][group]['fc']\n rdmavals = alldata[gseid][group]['rdma']\n rdmbvals = alldata[gseid][group]['rdmb']\n bplot = plt.boxplot([rdmavals, rdmbvals, corrvals], showfliers=False, positions=pos, patch_artist=True,\n widths=config['go_boxplot_width'], medianprops=medianprops, boxprops=boxprops,\n whiskerprops=whiskerprops, capprops=capprops, flierprops=flierprops);\n colors = [config['rdm_a_boxplot_color'], config['rdm_b_boxplot_color'], config['fc_path_boxplot_color']]\n utils.plot_significances_paths([rdmavals, rdmbvals, corrvals], pos, no_fliers=True)\n for patch, color in zip(bplot['boxes'], colors):\n patch.set_facecolor(color)\n\n pos = pos + 3\n if groups.shape[0] > 2:\n plt.xticks(range(1, 3 * groups.shape[0] + 1), create_labels(groups.group_label.tolist()))\n else:\n plt.xticks(range(1,3*groups.shape[0]+1), create_labels(groups.group_label.tolist()))\n for lnx in np.arange(3.5,groups.shape[0]*3,3):\n plt.axvline(lnx, linestyle=':', color='lightgray')\n else:\n plt.text(0, 0, 'No control group', fontsize=15, ha='center')\n plt.xlim([-1, 1])\n plt.ylim([-1, 1])\n #for k in range(len(groups)):\n # plt.text(2 + k * 3, -3, (groups.iloc[k].group_label).split(' ')[0], ha='center', va='top')\n plt.title(gseid,fontsize=18)\nplt.tight_layout()\n\nif not args.dry:\n plt.savefig(out_file)\n\nif not args.no_show:\n plt.show()\n\n","sub_path":"src/9_plots/plot_coexpr_healthy_boxplots.py","file_name":"plot_coexpr_healthy_boxplots.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"78449960","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\n\nFLAGS = None\n\n\ndef model(x):\n variables = {}\n y = tf.quantize(x, -2.0, 2.0, tf.quint8, name='ys')\n return y, variables\n\ndef main(args):\n dirname = os.path.dirname(os.path.abspath(__file__))\n exportbase = os.path.join(dirname, \"export\")\n if not os.path.isdir(dirname):\n raise NameError('could not find dir {}'.format(dirname))\n\n # Create the model\n x = tf.placeholder(tf.float32, [None, 784])\n y, variables = model(x)\n\n # Variables initializer\n sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n\n # Summary\n summary_writer = tf.summary.FileWriter(os.path.join(dirname, \"summary\"),\n graph=tf.get_default_graph())\n\n\n # Save GraphDef\n pb_path = tf.train.write_graph(sess.graph_def, dirname, \"model.pb\", False)\n print(\" GraphDef saved in file: %s\" % pb_path)\n\n # Save Checkpoints\n if len(variables) > 0:\n saver = tf.train.Saver()\n ckpt_path = saver.save(sess, os.path.join(dirname, \"ckpts\", \"model.ckpt\"))\n print(\" Model saved in file: %s\" % ckpt_path)\n\n # Import data\n batch_xs = (np.random.rand(10, 784) - 0.5) * 4\n batch_xs = batch_xs.astype('float32')\n\n # Run test\n ys = sess.run(y, feed_dict={x: batch_xs})\n\n # Save to npy\n np.save(os.path.join(exportbase, 'batch_xs.npy'), batch_xs)\n np.save(os.path.join(exportbase, 'ys.npy'), ys)\n\n # Save weights to npy\n for k in variables:\n v = variables[k]\n np.save(os.path.join(exportbase, '{}.npy'.format(k)), v.eval())\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n args, unparsed = parser.parse_known_args()\n\n main(args)\n","sub_path":"sandbox/ops/quant-ops/quantize/quantize.py","file_name":"quantize.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"18802211","text":"def BST(n):\n\n c = []\n c = [0 for x in range(n+1)]\n c[0] = c[1] = 1\n for i in range(2, n+1):\n for j in range(i):\n c[i] += c[j]*c[i-j-1]\n\n print(c[i])\n\n\ndef BSTR(n):\n if n == 0 or n == 1:\n return 1\n else:\n nums=0\n for i in range(2,n+1):\n nums+= BSTR(i-1)*BSTR(n-i)\n return nums\n\n \n\n\nif __name__ == \"__main__\":\n print(BSTR(6))\n","sub_path":"Algorithms/DP/TotalBSTwithNKeys.py","file_name":"TotalBSTwithNKeys.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"127087701","text":"import datetime\nfrom types import GeneratorType\nfrom multiprocessing import Manager, Process\n\nimport time\n\n\ndef t(l):\n while True:\n for i in l:\n print(i)\n time.sleep(1)\n\n\ndef d(l):\n while True:\n print(l.remove(l[0]))\n print('d')\n time.sleep(2)\n\n\nif __name__ == '__main__':\n with Manager() as m:\n l = m.list(range(10000))\n p = Process(target=t, args=(l,))\n p2 = Process(target=d, args=(l,))\n p.start()\n p2.start()\n p.join()\n p2.join()\n\n\n# def loop() -> GeneratorType:\n# while True:\n# now = datetime.datetime.now()\n# print('now ', now)\n# yield now\n# break\n\n\n# t = loop()\n# for i in t:\n# print(i)\n# print(next(t))\n# print(next(t, None))\n\n\n# def get_linear_section(value, linear_array) -> ((int, int) or None, (int, int) or None):\n# # 获取渐变区间\n# prev_target = None\n# next_target = None\n# for i in linear_array:\n# if i[0] > value:\n# next_target = i\n# break\n# else:\n# prev_target = i\n# else:\n# next_target = None\n# return prev_target, next_target\n\n\n# ar = [(0, 0), (10, 10), (20, 10), (30, 0)]\n# res = get_linear_section(91, ar)\n# print(res)\n\n\n# def _ms_equation_time(v0, prev_timing, interval, linear_section):\n# # 按时间计算方程\n# prev_section, next_section = get_linear_section(\n# prev_timing, linear_section)\n# if prev_section is None:\n# return next_section[1], 0\n# if next_section is None:\n# return prev_section[1], prev_section[1] * interval\n# a = (next_section[1] - prev_section[1]) / \\\n# (next_section[0] - prev_section[0])\n\n# if prev_timing + interval > next_section[0]:\n# t = next_section[0] - prev_timing\n# next_speed = v0 + a * t\n# speed, mileage = _ms_equation_time(\n# next_speed, next_section[0], interval - t, linear_section)\n# return next_speed + speed, (next_speed + v0) * t/2 + mileage\n# else:\n# return v0 + a * interval, v0 * interval + a * pow(interval, 2) / 2\n\n\n# print(_ms_equation_time(9, 9, 30, ar))\n","sub_path":"tests/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"141020877","text":"from Account import Account\n\n\ndata = [\n {'PARTY': 'Justin',\n 'AMOUNT': 599.99,\n 'TOTAL_AMOUNT': 599.99,\n 'FIRST_COST': 199.99,\n 'DATE': '08-02-28',\n 'TYPE': 'ORDER',\n },\n {'PARTY': 'Justin',\n 'AMOUNT': 599.99,\n 'TOTAL_AMOUNT': 599.99,\n 'FIRST_COST': 199.99,\n 'DATE': '08-02-28',\n 'TYPE': 'ORDER',\n },\n {'PARTY': 'Justin',\n 'AMOUNT': 599.99,\n 'TOTAL_AMOUNT': 599.99,\n 'FIRST_COST': 199.99,\n 'DATE': '08-02-28',\n 'TYPE': 'ORDER',\n },\n]\n\n\nclass Controller(object):\n\n def __init__(self):\n self.account = Account('Sebastian Wolf', 1000)\n\n def run(self, data):\n for transaction in data:\n self.account.create_transaction(\n party=transaction['PARTY'],\n amount=transaction['AMOUNT'],\n total_amount=transaction['TOTAL_AMOUNT'],\n date=transaction['DATE'],\n type=transaction['TYPE'],\n first_cost=transaction['FIRST_COST']\n )\n\n for transaction in self.account.transactions:\n print(transaction.party, transaction.amount, transaction.date)\n\n\nif __name__ == '__main__':\n app = Controller()\n app.run(data)\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"260069050","text":"# import the necessary packages\nfrom collections import deque\nimport numpy as np\nimport argparse\nimport imutils\nimport cv2\nimport time\nimport numpy\n \nimport pygame\nfrom pygame.locals import *\npygame.init()\nWIDTH = 640\nHEIGHT = 480\nwindowSurface = pygame.display.set_mode((WIDTH, HEIGHT), 0,32)\npygame.font.init()\nmyfont = pygame.font.SysFont(\"monospace\", 20)\nplasmalogo = pygame.image.load(\"logoBlackPurpleSmall.jpg\")\n\n\n####################(0,0,0) (190,190,233)#################### 178,210,233\n\n\nradius=0\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-v\", \"--video\",\n\thelp=\"path to the (optional) video file\")\nap.add_argument(\"-b\", \"--buffer\", type=int, default=64,\n\thelp=\"max buffer size\")\nargs = vars(ap.parse_args())\n\n\n# define the lower and upper boundaries of the \"green\"\n# ball in the HSV color space, then initialize the\n# list of tracked points\ntapeHSVLower = (55,80,18) #(29, 86, 6)\ntapeHSVUpper = (90,175,25) #(64, 255, 255)\npts = deque(maxlen=args[\"buffer\"])\n \n# if a video path was not supplied, grab the reference\n# to the webcam\nif not args.get(\"video\", False):\n\tcamera = cv2.VideoCapture(0)\n \n# otherwise, grab a reference to the video file\nelse:\n\tcamera = cv2.VideoCapture(args[\"video\"])\n\n\n\n\n\n# keep looping\nwhile True:\n\t# grab the current frame\n\t(grabbed, frame) = camera.read()\n \n\t# if we are viewing a video and we did not grab a frame,\n\t# then we have reached the end of the video\n\tif args.get(\"video\") and not grabbed:\n\t\tbreak\n \n\t# resize the frame, blur it, and convert it to the HSV\n\t# color space\n\tframe = imutils.resize(frame, width=600)\n\t#hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n\tblurred = cv2.GaussianBlur(frame, (3, 3), 0)\n\thsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n \n\t# construct a mask for the retroreflective tape, then perform\n\t# a series of dilations and erosions to remove any small\n\t# blobs left in the mask\n\tmask = cv2.inRange(hsv, tapeHSVLower, tapeHSVUpper)\n\tmask = cv2.erode(mask, None, iterations=2)\n\tmask = cv2.dilate(mask, None, iterations=2) #8 GETS RID OF HOLES\n\t#maskBGR = cv2.cvtColor(mask, cv2.COLOR_HSV2BGR)\n\t#gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n\t#mask = cv2.erode(mask, None, iterations=2)\n\t#mask = cv2.dilate(mask, None, iterations=2)\n# find contours in the mask and initialize the current\n\t# (x, y) center of the ball\n\t#cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]\n\t#edged = cv2.Canny(mask, 10, 250)\n\t#kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7,7))\n\t#closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)\n\tmask, contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\ttotal = 0\n\n\tfor c in contours:\n peri = cv2.arcLength(c,True)\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n\n if len(approx) == 4:\n cv2.drawContours(frame, [approx], -1, (0,255,0), 2)\n total += 1\n x, y, w, h = cv2.boundingRect(c)\n print (\"w = \" + str(w) + \", h = \" + str(h) + \"Est Dist: \" + str(int((127*2/(w/78)/13))))\n\n\t\n\t#cnt = contours[0]\n\t#x, y, w, h = cv2.boundingRect(cnt)\n\t#cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)\n\t#cv2.drawContours(frame, contours, -1, (0,255,0), 3)\n\n\n\n\t\t# show the frame to our screen\n\t#cv2.imshow(\"Frame\", frame)\n\t\n\tframe=cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n\tframe=numpy.rot90(frame)\n\timg = pygame.surfarray.make_surface(frame)\n\twindowSurface.blit(img, (0, 0)) #Replace (0, 0) with desired coordinates\n\tpygame.draw.rect(windowSurface, (0,0,0) ,[0,0,640,60],0)\n\twindowSurface.blit(plasmalogo,(0,0))\n\tif radius > 5:\n\t\ttextradius=myfont.render(\"Radius: {:.2f} px\".format( radius) ,1,(233,233,233))\n\t\twindowSurface.blit(textradius,(70,10))\n\t\t\n\t\ttextradius=myfont.render(\"Est Dist: \"+str(int((127*4/(radius/78))/25.4)) + \" in.\", 1, (233,233,233))\n\t\twindowSurface.blit(textradius,(70,30))\n\tpygame.display.flip()\n\n\n\tkey = cv2.waitKey(1) & 0xFF\n \n\t# if the 'q' key is pressed, stop the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n\t\n# cleanup the camera and close any open windows\ncamera.release()\ncv2.destroyAllWindows()\npygame.quit()\nsys.exit()\n","sub_path":"python/airship_vision.py","file_name":"airship_vision.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"492663766","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\nimport time\nfrom openerp import pooler\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare\nimport openerp.addons.decimal_precision as dp\nfrom openerp import netsvc\n\nclass product_product(osv.osv):\n _inherit = 'product.product'\n def dsco_pricelist(self,cr,uid,ids,field_name, arg,context=None):\n res = {}\n for id in ids:\n res.update(self.onchange_pricelist(cr, uid, [id],3, context))\n return res\n \n def p_final(self,cr,uid,ids,field_name, arg,context=None):\n res = {}\n for id in ids:\n res.update(self.product_id_change(cr, uid, [id]))\n return res\n \n _columns = {\n 'default_discount' : fields.function(dsco_pricelist,string='Descuento(%)',type='float', max_length = 5), \n 'price_final' : fields.function(p_final,string='Precio Final',type='float', max_length = 9), \n }\n \n def onchange_pricelist(self,cr, uid, ids,pricelist_id, context=None):\n date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT)\n value = {}\n res = {}\n descuento = 0.0 \n flag = False\n prd = self.browse(cr,uid,ids[0]) \n categ_id = prd.categ_id.id\n if pricelist_id:\n obj_price_list = self.pool.get('product.pricelist').browse(cr,uid,pricelist_id) \n \n for ver in obj_price_list.version_id:\n if ver.active == True and date_order >= ver.date_start: \n if ver.date_end and date_order <= ver.date_end :\n for desc in ver.items_id: \n if desc.product_id:\n flag = True\n if desc.product_id.id == ids[0]:\n flag = False\n if desc.price_discount < 0:\n descuento = desc.price_discount * -100\n else:\n descuento = 0.00\n break \n if desc.categ_id:\n flag = True\n if desc.categ_id.id == categ_id:\n flag = False\n if desc.price_discount < 0:\n descuento = desc.price_discount * -100\n else:\n descuento = 0.00\n break\n \n if flag == True:\n continue\n else:\n if desc.price_discount < 0: \n descuento = desc.price_discount * -100\n else:\n descuento = 0.00\n else:\n for desc in ver.items_id: \n if desc.product_id:\n flag = True\n if desc.product_id.id == ids[0]:\n flag = False\n if desc.price_discount < 0:\n descuento = desc.price_discount * -100\n else:\n descuento = 0.00 \n if desc.categ_id:\n flag = True\n if desc.categ_id.id == categ_id:\n flag = False\n if desc.price_discount < 0:\n descuento = desc.price_discount * -100\n else:\n descuento = 0.00 \n if flag == True:\n continue\n else:\n if desc.price_discount < 0: \n descuento = desc.price_discount * -100\n else :\n descuento = 0.00 \n else:\n res[ids[0]] = descuento\n self.write(cr,uid,ids,{'default_discount': descuento })\n #value.update(self.product_id_change(cr, uid, ids, pricelist_id,list_price, qty=1.0, date_order=False))\n return res \n res[ids[0]] = descuento\n return res\n #value.update(self.product_id_change(cr, uid, ids, pricelist_id,list_price, qty=1.0, date_order=False))\n\n def onchange_discount(self,cr,uid,ids,default_discount,list_price,context=None):\n price_final = list_price\n if default_discount >0 and list_price >0:\n list_price = list_price * (100 - default_discount) / 100 \n return {'value': {'price_final': list_price }}\n return {'value': {'price_final': list_price }}\n \n def product_id_change(self, cr, uid, ids, qty=1.0, date_order=False):\n pricelist = False\n product = ids[0]\n date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT) \n cr.execute(\"SELECT id FROM product_pricelist where name like 'TARIFA CON IVA'\")\n pricelist = cr.fetchone()\n result= {}\n if pricelist:\n if not pricelist:\n warn_msg = _('You have to select a pricelist or a customer in the sales form !\\n'\n 'Please set one before choosing a product.')\n warning_msgs += _(\"No Pricelist ! : \") + warn_msg +\"\\n\\n\"\n else:\n prices = self.pool.get('product.pricelist').price_get_2(cr, uid, [pricelist[0]],product, qty or 1.0,1) \n if prices['price'] > 0: \n result[ids[0]] = prices['price']\n else:\n prd = self.browse(cr,uid,ids[0])\n result[ids[0]] = prd.list_price \n else:\n prd = self.browse(cr,uid,ids[0])\n result[ids[0]] = prd.list_price\n return result\n \nproduct_product()\n\nclass product_pricelist(osv.osv): \n _inherit = \"product.pricelist\" \n def price_get_2(self, cr, uid, ids, prod_id, qty, partner=None, context=None):\n res_multi = self.price_get_multi(cr, uid, pricelist_ids=ids, products_by_qty_by_partner=[(prod_id, qty, partner)], context=context)\n vals = {'item_id' : False}\n \n res = res_multi[prod_id]\n vals['price']= res_multi[prod_id]\n res.update({'item_id': {ids[-1]: res_multi.get('item_id', ids[-1])}}) \n vals['price']= res[ids[0]]\n return vals\n\nproduct_pricelist()\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n\n\n\n","sub_path":"econube_producto_tree/product_product.py","file_name":"product_product.py","file_ext":"py","file_size_in_byte":8390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"472994789","text":"import os\n\nfrom flask_classy import FlaskView, route\nfrom flask import render_template, redirect, url_for, request, jsonify\nfrom flask_login import logout_user, login_required, current_user\n\nfrom app import service, get_runtime_folder\nfrom app.home.forms import SettingsForm\nfrom app.service.forms import ServiceSettingsForm\nfrom app.service.service_settings import ServiceSettings\nfrom .forms import ActivateForm\n\n\n# activate license\ndef activate_service(form: ActivateForm):\n if not form.validate_on_submit():\n return render_template('user/activate.html', form=form)\n\n lic = form.license.data\n service.activate(lic)\n return redirect(url_for('UserView:dashboard'))\n\n\ndef _add_service(method: str):\n server = ServiceSettings()\n form = ServiceSettingsForm(obj=server)\n if method == 'POST' and form.validate_on_submit():\n new_entry = form.make_settings()\n new_entry.save()\n return jsonify(status='ok'), 200\n\n return render_template('service/add.html', form=form)\n\n\ndef edit_service(method: str, server: ServiceSettings):\n form = ServiceSettingsForm(obj=server)\n\n if method == 'POST' and form.validate_on_submit():\n server = form.update_entry(server)\n server.save()\n return jsonify(status='ok'), 200\n\n return render_template('service/edit.html', form=form)\n\n\n# routes\nclass UserView(FlaskView):\n route_base = \"/\"\n\n def __init__(self):\n service_settings = ServiceSettings.objects().first() # FIXME\n if not service_settings:\n service_settings = ServiceSettings()\n service.set_settings(service_settings)\n\n @login_required\n def dashboard(self):\n # services = ServiceSettings.objects()\n # services_choices = []\n # for serv in services:\n # services_choices.append((str(serv.id), serv.name))\n\n streams = service.get_streams()\n front_streams = []\n for stream in streams:\n front_streams.append(stream.to_front())\n serv = service.to_front()\n return render_template('user/dashboard.html', streams=front_streams, service=serv)\n\n @route('/settings', methods=['POST', 'GET'])\n @login_required\n def settings(self):\n form = SettingsForm(obj=current_user.settings)\n\n if request.method == 'POST':\n if form.validate_on_submit():\n form.update_settings(current_user.settings)\n current_user.save()\n return render_template('user/settings.html', form=form)\n\n return render_template('user/settings.html', form=form)\n\n @login_required\n def logout(self):\n logout_user()\n return redirect(url_for('HomeView:index'))\n\n @login_required\n def connect(self):\n service.connect()\n return redirect(url_for('UserView:dashboard'))\n\n @login_required\n def disconnect(self):\n service.disconnect()\n return redirect(url_for('UserView:dashboard'))\n\n @route('/activate', methods=['POST', 'GET'])\n @login_required\n def activate(self):\n form = ActivateForm()\n if request.method == 'POST':\n return activate_service(form)\n\n return render_template('user/activate.html', form=form)\n\n @login_required\n def stop_service(self):\n service.stop(1)\n return redirect(url_for('UserView:dashboard'))\n\n @login_required\n def get_log_service(self):\n service.get_log_service()\n return redirect(url_for('UserView:dashboard'))\n\n @login_required\n def view_log_service(self):\n path = os.path.join(get_runtime_folder(), service.id)\n try:\n with open(path, \"r\") as f:\n content = f.read()\n\n return content\n except OSError as e:\n print('Caught exception OSError : {0}'.format(e))\n return '''
    Not found, please use get log button firstly.
    '''\n\n @login_required\n def ping_service(self):\n service.ping()\n return redirect(url_for('UserView:dashboard'))\n\n @login_required\n @route('/add/service', methods=['GET', 'POST'])\n def add_service(self):\n return _add_service(request.method)\n\n @route('/edit/', methods=['GET', 'POST'])\n @login_required\n def edit_service(self, sid):\n server = ServiceSettings.objects(id=sid).first()\n if server:\n return edit_service(request.method, server)\n\n response = {\"status\": \"failed\"}\n return jsonify(response), 404\n\n @route('/service_log/', methods=['POST'])\n def service_log(self, filename):\n # len = request.headers['content-length']\n new_file_path = os.path.join(get_runtime_folder(), filename)\n with open(new_file_path, 'wb') as f:\n data = request.stream.read()\n f.write(b'
    ')\n            f.write(data)\n            f.write(b'
    ')\n f.close()\n return jsonify(status='ok'), 200\n","sub_path":"app/user/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"33298419","text":"import Ecommerce.shopping\nEcommerce.shopping.calc_shopping()\n\nfrom Ecommerce import shopping\nshopping.calc_shopping()\n\nimport random\nfor i in range(3):\n print(random.random())\n print(random.randint(1,100))\n\nmembers = ['john', 'Simi', 'Raman' , 'Marry' , 'BatMan']\nleader = random.choice(members)\n# print(leader)\n# name = input()\n\nprint (f'Hello {leader}')\n\n\n ","sub_path":"others/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"574898734","text":"#!/usr/bin/env python3\nimport rclpy\nfrom rclpy.node import Node\nfrom geometry_msgs.msg import Twist\nfrom cutomsrv.srv import CustomSrv \nfrom turtlesim.msg import Pose \nimport numpy as np\nimport time\nimport math\nclass my_node(Node):\n def __init__(self,node_name):\n super().__init__(node_name)\n self.obj_pub=self.create_publisher(Twist,\"/turtle1/cmd_vel\",10)\n self.obj_sub=self.create_subscription(Pose,\"/turtle1/pose\",self.call_back_subscription,10)\n self.create_service(CustomSrv,\"CustomSrv\",self.call_back_server)\n self.x=0.0\n self.y=0.0\n self.theta=0.0\n self.flag=False\n self.done=False\n self.out_theta=0.0\n self.out_r=0.0\n self.reqx=0.0\n self.reqy=0.0\n self.stillworking=False\n \n def call_back_server(self,req,rsp):\n if(self.stillworking== False):\n self.reqx=req.x\n self.reqy=req.y\n self.stillworking=True\n self.done=False\n\n\n self.get_logger().info(\"other node \"+\" \"+str(self.reqx)+\" \"+\" \"+str(self.reqy))\n self.get_logger().info(\"my node \"+str((self.theta/np.pi)*180)+\" \"+\" \"+str(self.x)+\" \"+\" \"+str(self.y)+\" \")\n\n if(self.done == True ):\n rsp.ack=True\n self.stillworking=False\n self.flag =False \n return rsp\n\n if(self.flag ==False):\n A = self.reqy - self.y\n B = self.reqx - self.x\n self.out_r = np.sqrt(A**2 + B**2)\n # self.out_theta = (np.arcsin(abs(A)/self.out_r))\n # if A > 0 and B < 0: #rob3 tany + 90\n # self.out_theta+=(np.pi/2)\n # elif A < 0 and B > 0 :# rob3 rab3 + 270\n # self.get_logger().info(\"rob333333333 333\")\n # self.out_theta-=(np.pi)\n # elif A < 0 and B < 0 :# rob3 talet + 180\n # self.out_theta-=(np.pi/2)\n self.out_theta=math.atan2(A,B)\n self.flag =True \n\n rsp.ack=False\n return rsp\n\n\n def call_back_subscription(self,msg):\n self.y=msg.y\n self.x=msg.x\n self.theta=msg.theta\n A = self.reqy - self.y\n B = self.reqx - self.x\n self.out_r_cur = np.sqrt(A**2 + B**2)\n\n if(self.flag==True):\n self.req_for_twist=Twist()\n self.get_logger().info(\"out theta \"+str(self.out_theta*(180/np.pi)) +\" mytheta \"+ str(self.theta*(180/np.pi)))\n self.get_logger().info(\"out R \"+str(self.out_r) +\" my R \"+ str(self.out_r_cur))\n if( abs(self.out_theta - self.theta) > 0.005 ):\n self.req_for_twist.linear.x = 0.0\n self.req_for_twist.angular.z = ((self.out_theta) - (self.theta))*1.5\n self.obj_pub.publish(self.req_for_twist)\n else:\n self.req_for_twist.linear.x = (self.out_r_cur)*0.8\n self.req_for_twist.angular.z = ((self.out_theta) - (self.theta))*0.9\n self.obj_pub.publish(self.req_for_twist)\n if(self.out_r_cur<0.5):\n self.get_logger().info(\"killlll\")\n self.flag =False \n self.done =True \n\n\n \n\ndef main (args=None):\n rclpy.init(args=args)\n node1=my_node(\"control_node\")\n\n\n rclpy.spin(node1)\n rclpy.shutdown()\n\nif __name__ == \"__main__\":\n main()","sub_path":"ROS_LABS/ROS_project/projectWs/build/ros2_project/build/lib/ros2_project/control_node.py","file_name":"control_node.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"103595240","text":"from django.urls import path, re_path, include\nfrom .views import main, courses, assignments, quizzes\n\n\nurlpatterns = [\n path('', main.MainView.as_view(), name='main'),\n path('courses', courses.CourseView.as_view(), name='courses'),\n path('joinCourse', courses.joinCourse, name='joinCourse'),\n path('course//update', courses.CourseUpdateView.as_view(), name='updateCourse'),\n path('course//delete', courses.CourseDeleteView.as_view(), name='deleteCourse'),\n path('course//students', assignments.students, name='students'),\n path('course//tasks', assignments.course, name='course'),\n path('course//tasks/', assignments.exam_evaluation, name='exams'),\n path('course//tasks//bubble_sheet', assignments.bubble_sheet, name='bubble_sheet'),\n path('course//tasks//list',assignments.task, name='task'),\n path('course//quizzes', quizzes.home, name='quizzes'),\n path('course//quizzes/create/', quizzes.create, name= 'quizzes-create'),\n path('course//quizzes/vote//', quizzes.vote, name= 'quizzes-vote'),\n path('course//quizzes/results//', quizzes.results, name= 'quizzes-results'),\n]\n","sub_path":"qmain/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"380230838","text":"import pygraphviz\nfrom networkx.drawing import nx_agraph\nimport networkx as nx\nimport sys\nimport json\nimport os\nimport collections\n\n# INPUT FILE: source-CFG.dot\n# OUTPUT FILE: source-edgeCoverageRequirements.txt, source-CFGmapping.json, source-CFGnodeGrpByFunction.txt\n\n\ndef createMappingFromDot(baseDir, dotinput):\n labels=[]\n '''\n creates mapping of CFG node to the line number and code statement\n '''\n\n with open(baseDir+os.path.sep+dotinput,'r') as dotfile:\n for line in dotfile:\n line=line.strip()\n line=line.strip('\\\"];')\n if '// graph-edges' in line:\n break\n if line and line[0]=='v':\n try:\n info=[l.strip() for l in line.split(':') if l.strip()]\n verlab=[l.strip() for l in info[0].split(' ') if l.strip()]\n vertex=verlab[0] #node\n label=verlab[1].split('=')[1].strip().replace('\"','') #lineNumber\n classinfo=info[1] #statement\n labels.append([vertex,label,classinfo])\n except Exception as e:\n info=[l.strip() for l in line.split('=') if l.strip()]\n vertex=[l.strip() for l in info[0].split(' ') if l.strip()][0] #node\n label=verlab[1].split('=')[1].strip().replace('\"','') #lineNumber\n classinfo=info[1]\n labels.append([vertex,0,classinfo]) #statement\n with open(baseDir+os.path.sep+dotinput.split('.')[0]+'mapping.json', 'w') as mappingFile:\n todump=[]\n for label in labels:\n label={label[0]:{'line':label[1],'statement':label[2]}}\n todump.append(label)\n json.dump(todump,mappingFile,indent=2)\n print(mappingFile.name,\" CREATED\")\n # mappingFile.write(label[0]+','+label[1]+','+label[2]+'\\n')\n return labels\n\ndef createNodeGroupsByMethod(allPaths, baseDir, sourceFileName):\n '''\n dumps nodes grouped by the method that they belong to. Includes root node as well as child node in a method.\n Need in comparison later.\n '''\n with open(baseDir+os.path.sep+sourceFileName.split('.')[0]+'nodeGrpByFunction.json','w') as f:\n toDump=[]\n for root,childNodes in allPaths.items():\n childNodes.add(root)\n childWithRoot=list(childNodes)\n tempMap = {int(item[1:]):item for item in childWithRoot}\n od = collections.OrderedDict(sorted(tempMap.items()))\n sortedChildNodes = [v for k,v in od.items()]\n # childWithRoot=sorted(list(childNodes))\n label = {root:sortedChildNodes}\n toDump.append(label)\n # allNodesByFunc = str(allPaths).split(\"},\")\n # for grp in allNodesByFunc:\n json.dump(toDump,f)\n # f.write(str(root)+':'+str(childNodes))\n # f.write('\\n')\n print(f.name,\" CREATED\")\n\n# def createEdgePath(roots, baseDir, sourceFileName): #STORES ROOTS FOR LATER, NEED TO MAKE CHANGE IN COMPARATORS\n# with open(baseDir+os.path.sep+sourceFileName.split('.')[0]+'edgepaths.txt','w') as f:\n# for root in roots:\n# # print(root)\n# f.write(str(root)+',')\n# f.write('\\n')\n# print(f.name,\" CREATED\")\n\n\ndef createSimplePaths(roots,leaves,digraphobject):\n '''\n Find all loopless path between roots and leaves\n '''\n allPaths =[]\n for root in roots:\n for leaf in leaves:\n newRoot=\"\"\n for allPath in nx.all_simple_paths(digraphobject,source=root, target=leaf):\n allPaths.append(allPath)\n return allPaths\n\ndef findPotentialLoops(edges, digraphobject,loops):\n '''\n If there is an edge from vLargerNumber to vSmallerNumber, there could be a loop and save that\n '''\n for start, end in edges:\n if int(start[1:])>int(end[1:]) and nx.has_path(digraphobject, end,start):\n loops.append((start,end))\n return loops\n\ndef createSubPathForLoops(loops, digraphobject, subPathForLoops):\n '''\n Find all simple path between loop start node and loop end node\n '''\n for loop in loops:\n for path in nx.all_simple_paths(digraphobject,source=loop[1], target=loop[0]):\n subPathForLoops.append({loop:path})\n return subPathForLoops\n\ndef addPathWithLoops(subPathForLoops, allPaths):\n '''\n Find all loopless path between roots and leaves\n '''\n for loopsToWorkOn in subPathForLoops:\n print(loopsToWorkOn)\n for loopToAdd,pathToAdd in loopsToWorkOn.items():\n toModifyList=[p for p in allPaths if loopToAdd[0] in p]\n for modifyPath in toModifyList:\n nodeindex=modifyPath.index(loopToAdd[0])\n newPath=modifyPath[0:nodeindex+1]+pathToAdd+modifyPath[nodeindex+1:]\n allPaths.append(newPath)\n return allPaths\n\ndef createEdgeRequirements(edges, baseDir, sourceFileName):\n with open(baseDir+os.path.sep+sourceFileName.split('.')[0]+'edgeCoverageRequirements.txt','w') as f:\n for edge in edges:\n newline=\"\"\n for p in edge:\n if(newline!=\"\"):\n newline=newline+\",\"+str(p)\n else:\n newline=str(p)\n f.write(newline)\n f.write('\\n')\n print(f.name,\" CREATED\")\n\ndef main():\n if(len(sys.argv)>1):\n version=sys.argv[-1]\n sourceClass = sys.argv[-2]\n else:\n print('Usage: python3 graphparser ../output/\\nSource without .java')\n return\n dotFileName = sourceClass+'-CFG.dot'\n print(\"CFG GRAPH PARSING STARTED OF...\",sourceClass+\".java\")\n baseDir = os.path.abspath(version)\n\n abspathofDotFile=baseDir+os.path.sep+dotFileName\n try:\n labels=createMappingFromDot(baseDir,dotFileName)\n except OSError as e:\n print(\"No dot file present!\")\n return\n dotobject=nx_agraph.read_dot(abspathofDotFile)\n digraphobject=nx.DiGraph(dotobject)\n\n roots = [k for k,v in digraphobject.in_degree() if v==0]\n leaves = [k for k,v in digraphobject.out_degree() if v==0]\n edges = digraphobject.edges()\n #print(edges)\n createEdgeRequirements(edges, baseDir, dotFileName)\n allNodesInAFunc ={}\n for root in roots:\n des = nx.algorithms.dag.descendants(digraphobject,root)\n allNodesInAFunc[root]=des\n createNodeGroupsByMethod(allNodesInAFunc,baseDir,dotFileName)\n\n # print(allNodesInAFunc)\n\n # allPaths = createSimplePaths(roots, leaves, digraphobject)\n #\n # loops=[]\n # loops = findPotentialLoops(edges,digraphobject,loops)\n #\n # subPathForLoops=[]\n # subPathForLoops = createSubPathForLoops(loops,digraphobject,subPathForLoops)\n # print(subPathForLoops,\"---------SUBPATH FOR LOOPS\")\n # # print(allPaths,\"-----------ALLPATH\")\n # allPaths = addPathWithLoops(subPathForLoops, allPaths)\n # print(allPaths)\n # createEdgePath(roots,baseDir, dotFileName)\n\n\nif __name__=='__main__':\n main()\n","sub_path":"ReplicationSupport/PythonScripts/graphparser.py","file_name":"graphparser.py","file_ext":"py","file_size_in_byte":7122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"539728822","text":"#!/usr/bin/env python3\n\nimport sys\n\nif len(sys.argv) != 3:\n print(\"Run me as {} NUMBER FACTOR\".format(sys.argv[0]), file=sys.stderr)\n sys.exit(2)\n\nnum, factor = int(sys.argv[1], base=0), int(sys.argv[2], base=0)\nfactor2 = num // factor\nnum2 = factor * factor2\nif num == num2:\n# print(\"OK\")\n sys.exit(0)\nelse:\n# print(\"NOT OK: {} {}\".format(num, factor))\n sys.exit(1)\n","sub_path":"verify_factor.py","file_name":"verify_factor.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"187800409","text":"#coding: utf-8\r\n__author__ = 'yumere'\r\n\r\nimport MeCab\r\nimport math\r\n\r\nclass vector_search(object):\r\n\tdef __init__(self):\r\n\t\tself.m = MeCab.Tagger('-d /usr/local/lib/mecab/dic/mecab-ko-dic')\r\n\r\n\tdef extract_ngrams(self, query):\r\n\t\tquery = query.encode(\"utf8\")\r\n\t\tmorphems = self.m.parseToNode(query)\r\n\r\n\t\tngrams = dict()\r\n\t\ttmp_mor_list = list()\r\n\r\n\t\twhile morphems:\r\n\t\t\tfeature = morphems.feature.split(\",\")[0]\r\n\r\n\t\t\t# Collect consecutive NNG or NNP\r\n\t\t\tif (feature == \"NNG\" or feature == \"NNP\") and len(tmp_mor_list) < 3:\r\n\t\t\t\ttmp_mor_list.append(morphems.surface)\r\n\r\n\t\t\t# Extract N-grams\r\n\t\t\telif len(tmp_mor_list) > 0:\r\n\t\t\t\tfor i in range(len(tmp_mor_list)):\r\n\t\t\t\t\tfor j in range(i, len(tmp_mor_list)):\r\n\t\t\t\t\t\tngram = \"\".join(tmp_mor_list[i:j+1])\r\n\t\t\t\t\t\tngrams.setdefault(ngram, [0,0,0]) # N-gram, [Frequency, Weight, Ns]\r\n\t\t\t\t\t\tngrams[ngram][0] += 1\r\n\t\t\t\t\t\tngrams[ngram][2] = j-i+1\r\n\r\n\t\t\t\t# Reset collected NNG or NNP list\r\n\t\t\t\ttmp_mor_list = list()\r\n\r\n\t\t\telse:\r\n\t\t\t\ttmp_mor_list = list()\r\n\r\n\t\t\tmorphems = morphems.next\r\n\r\n\t\treturn ngrams\r\n\r\n\tdef make_frequency(self, title=\"\", body=\"\", tags=\"\"):\r\n\t\t# Extract N-grams in each place\r\n\t\ttitle_dict = self.extract_ngrams(title)\r\n\t\tbody_dict = self.extract_ngrams(body)\r\n\t\ttags_dict = self.extract_ngrams(tags)\r\n\r\n\t\tresult_dict = dict()\r\n\r\n\t\t# Get term frequency from title\r\n\t\tfor keyword, info in title_dict.items():\r\n\t\t\tresult_dict.setdefault(keyword, [0,0,info[2]])\r\n\t\t\tresult_dict[keyword][0] += info[0]+10\r\n\r\n\t\t# Get term frequency from body\r\n\t\tfor keyword, info in body_dict.items():\r\n\t\t\tresult_dict.setdefault(keyword, [0,0,info[2]])\r\n\t\t\tresult_dict[keyword][0] += info[0]\r\n\r\n\t\t# Get term frequency from tags\r\n\t\tfor keyword, info in tags_dict.items():\r\n\t\t\tresult_dict.setdefault(keyword, [0,0,info[2]])\r\n\t\t\tresult_dict[keyword][0] += info[0]+20\r\n\r\n\t\t# Assign more frequency according to their Ns\r\n\t\tfor keyword, info in result_dict.items():\r\n\t\t\tresult_dict[keyword][0] += info[2]**2\r\n\r\n\t\t# Get rid of item less than 3 frequency\r\n\t\tresult_dict = filter(lambda x:x[1][0]>5, result_dict.items())\r\n\t\treturn result_dict\r\n\r\n\tdef make_weight(self):\r\n\t\tpass\r\n\r\n\tdef consineSim(self, d1_vector, d2_vector):\r\n\t\tterm_list = set(d1_vector.keys()).intersection(set(d2_vector.keys()))\r\n\r\n\t\tnumerator = sum([d1_vector[term] * d2_vector[term] for term in term_list])\r\n\t\tdenominator = math.sqrt(sum([d1_vector[term]**2 for term in d1_vector.keys()])) * math.sqrt(sum([d2_vector[term]**2 for term in d2_vector.keys()]))\r\n\r\n\t\ttry:\r\n\t\t\treturn numerator / float(denominator)\r\n\t\texcept ZeroDivisionError:\r\n\t\t\treturn 0\r\n\r\n\tdef __del__(self):\r\n\t\tpass","sub_path":"vector_space_model/model_class.py","file_name":"model_class.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"357675072","text":"def func(k,a):\r\n if len(a)>=k+1:\r\n for i in range(len(a)-k-1,len(a)):\r\n if a[i]:\r\n return i + 1 + func(k,a[:(i-k)])\r\n for i in range(1,n):\r\n if a[-i]:\r\n cost = len(a)-i+1\r\n for j in range(len(a)-i+k+i, len(a)):\r\n cost += j+1\r\n return cost + func(k,a[:(i-k)])\r\n # else:\r\n # for i in range(0,len(a)):\r\n # if a[i]:\r\n # return i + 1 + func(k,a[:(i-k)])\r\n # for i in range(1,n):\r\n # if a[-i]:\r\n # cost = len(a)-i+1\r\n # for j in range(len(a)-i+k+i, len(a)):\r\n # cost += j+1\r\n # return cost + func(k,a[:(i-k)])\r\n \r\n\r\n\r\nn,k = input().split(' ')\r\nn = int(n)\r\nk = int(k)\r\ns = input()\r\na = []\r\nis_connected = []\r\nfor i in range(0,n):\r\n if s[i] == '0':\r\n a.append(False)\r\n else:\r\n a.append(True)\r\n is_connected.append(False)\r\n\r\nprint(func(k,a))","sub_path":"Olimp/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"590177335","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('replays', '0068_auto_20150316_1940'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Tag',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='modification',\n name='tags',\n field=models.ManyToManyField(to='replays.Tag', null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='replay',\n name='upload_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 3, 16, 19, 43, 26, 812000), null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='textpost',\n name='upload_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 3, 16, 19, 43, 26, 813000), null=True, blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"replays/migrations/0069_auto_20150316_1943.py","file_name":"0069_auto_20150316_1943.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"209873719","text":"# simu - Robot simulation. {{{\n#\n# Copyright (C) 2009 Nicolas Schodet\n#\n# APBTeam:\n# Web: http://apbteam.org/\n# Email: team AT apbteam DOT org\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n# }}}\n\"\"\"Simulation interface.\"\"\"\nfrom simu.inter.drawable import *\nfrom Tkinter import *\n\nclass TableView (DrawableCanvas):\n \"\"\"This class handle the view of the table and every items inside it.\"\"\"\n\n TABLE_WIDTH = 3000\n TABLE_HEIGHT = 2100\n MARGIN = 150\n\n def __init__ (self, master = None,\n width = TABLE_WIDTH, height = TABLE_HEIGHT):\n DrawableCanvas.__init__ (self,\n width + 2 * self.MARGIN, height + 2 * self.MARGIN,\n -width / 2, -height / 2,\n master, borderwidth = 1, relief = 'sunken',\n background = 'white')\n\nclass ActuatorView (DrawableCanvas):\n \"\"\"This class handle the view of the actuators inside the robot.\"\"\"\n\n UNIT = 120\n\n def __init__ (self, master = None):\n DrawableCanvas.__init__ (self, 1, 1, 0, 0, master,\n borderwidth = 1, relief = 'sunken', background = 'white')\n self.configure (width = self.UNIT, height = self.UNIT)\n self.size = 0.0\n\n def add_view (self, width = 1.0, height = 1.0):\n \"\"\"Return a drawable suitable for an actuator view.\"\"\"\n ratio = float (height) / float (width)\n self.size += ratio\n self.resize (1, self.size, 0, self.size / 2)\n self.configure (width = self.UNIT, height = self.UNIT * self.size)\n d = Drawable (self)\n d.trans_translate ((0, - self.size + ratio / 2))\n d.trans_scale (1.0 / width)\n return d\n\nclass Inter (Frame):\n \"\"\"Robot simulation interface.\"\"\"\n\n def __init__ (self, master = None):\n Frame.__init__ (self, master)\n self.pack (expand = True, fill = 'both')\n self.create_widgets ()\n\n def create_widgets (self):\n # Main layout.\n self.right_frame = Frame (self)\n self.right_frame.pack (side = 'right', fill = 'y')\n self.quit_button = Button (self.right_frame, text = 'Quit',\n command = self.quit)\n self.quit_button.pack (side = 'top')\n # Actuator view.\n self.actuator_view = ActuatorView (self.right_frame)\n self.actuator_view.pack (side = 'bottom', fill = 'x')\n # Sensor frame.\n self.sensor_frame = Frame (self.right_frame, borderwidth = 1,\n relief = 'sunken')\n self.sensor_frame.pack (side = 'bottom', fill = 'x')\n # Table view.\n self.table_view = TableView (self)\n self.table_view.pack (expand = True, fill = 'both')\n\n def update (self, *args):\n self.table_view.update ()\n self.actuator_view.update ()\n\nif __name__ == '__main__':\n class TestTable (Drawable):\n def __init__ (self, onto, w, h):\n Drawable.__init__ (self, onto)\n self.w, self.h = w, h\n def draw (self):\n self.reset ()\n w, h = self.w, self.h\n self.draw_rectangle ((0, 0), (w, h), fill = 'blue')\n Drawable.draw (self)\n class TestRectangle (Drawable):\n def __init__ (self, onto, w, h, c1, c2):\n Drawable.__init__ (self, onto)\n self.w, self.h = 0.9 * w, 0.9 * h\n self.c1, self.c2 = c1, c2\n def draw (self):\n self.reset ()\n w, h = self.w, self.h\n self.draw_rectangle ((-w/2, -h/2), (w/2, h/2), fill = self.c1)\n self.draw_rectangle ((0, 0), (w/2, h/2), fill = self.c2)\n Drawable.draw (self)\n class TestSensor:\n def __init__ (self, master):\n self.button = Checkbutton (master, text = 'Sensor',\n indicatoron = False)\n self.button.pack (side = 'top')\n app = Inter ()\n TestTable (app.table_view, 3000, 2100)\n TestRectangle (app.actuator_view.add_view (1, 1), 1, 1, 'red', 'green')\n TestRectangle (app.actuator_view.add_view (2, 1), 2, 1, 'green', 'blue')\n TestRectangle (app.actuator_view.add_view (1, 2), 1, 2, 'blue', 'red')\n TestSensor (app.sensor_frame)\n app.mainloop ()\n","sub_path":"host/simu/inter/inter.py","file_name":"inter.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"201455598","text":"import os\r\nfrom scipy import spatial\r\nimport numpy as np\r\nimport gensim\r\nimport nltk\r\nfrom keras.models import load_model\r\n\r\nmod = load_model('LSTM500.h5')\r\nmod = gensim.models.KeyedVectors.load_word2vec_format(\"GoogleNews-vectors-negative300.bin\",binary=True)\r\n\r\nwhile(True):\r\n x=str(input(\"Enter the message:\"));\r\n sentend=np.ones((300,),dtype=np.float32) \r\n\r\n sent=nltk.word_tokenize(x.lower())\r\n sentvec = [mod[w] for w in sent if w in mod.wv.vocab]\r\n\r\n sentvec[14:]=[]\r\n sentvec.append(sentend)\r\n if len(sentvec)<20:\r\n for i in range(20-len(sentvec)):\r\n sentvec.append(sentend) \r\n sentvec=np.array([sentvec])\r\n \r\n predictions = mod.predict(sentvec)\r\n outputlist=[mod.most_similar([predictions[0][i]])[0][0] for i in range(20)]\r\n output=' '.join(outputlist)\r\n print(output)\r\n","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"207351744","text":"from myThread import MyThread\nfrom time import ctime, sleep\n\ndef fib(x): #斐波那契\n sleep(0.005)\n if x < 2: return 1\n return fib(x-1)+fib(x-2)\n\ndef fac(x):#阶乘\n sleep(0.1)\n if x < 2 : return 1\n return x*fac(x-1)\n\ndef sum_(x):#累加\n sleep(0.2)\n if x < 2 : return 1\n return x + sum_(x-1)\n\nfuncs = [fib, fac, sum_]\nx = 12\n\ndef main():\n funcindex = range(len(funcs))\n\n print('**** SINGLE THREAD')\n for i in funcindex:\n print('starting', funcs[i].__name__, 'at:', ctime())\n print(funcs[i](x))\n print(funcs[i].__name__, 'finished at:', ctime())\n \n print('\\n ***** MULTIPLE THREADS')\n threads = []\n for i in funcindex:\n t = MyThread(funcs[i], (x,), funcs[i].__name__)\n threads.append(t)\n \n for i in funcindex:\n threads[i].start()\n \n for i in funcindex:\n threads[i].join()\n print(threads[i].getResult())\n \n print('all Done')\n\nif __name__ == '__main__':\n main()","sub_path":"threadings/sleep2.py","file_name":"sleep2.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"479338031","text":"import os\nimport re\nimport pickle as cPickle\nimport copy\n\nimport numpy\nimport torch\nimport nltk\nfrom nltk.corpus import ptb\n\nword_tags = ['CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD', 'NN', 'NNS', 'NNP', 'NNPS', 'PDT',\n 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ',\n 'WDT', 'WP', 'WP$', 'WRB']\ncurrency_tags_words = ['#', '$', 'C$', 'A$']\nellipsis = ['*', '*?*', '0', '*T*', '*ICH*', '*U*', '*RNR*', '*EXP*', '*PPA*', '*NOT*']\npunctuation_tags = ['.', ',', ':', '-LRB-', '-RRB-', '\\'\\'', '``']\npunctuation_words = ['.', ',', ':', '-LRB-', '-RRB-', '\\'\\'', '``', '--', ';', '-', '?', '!', '...', '-LCB-', '-RCB-']\nfrom nltk.corpus import BracketParseCorpusReader\ncorpus_root = r\"/home/am8676/nltk_data/corpora/PTB/\"\nfile_pattern = r\".*/WSJ_.*\\.MRG\"\nptb = BracketParseCorpusReader(corpus_root, file_pattern)\n\nfile_ids = ptb.fileids()\ntrain_file_ids = []\nvalid_file_ids = []\ntest_file_ids = []\nrest_file_ids = []\nfor id in file_ids:\n if 'WSJ/00/WSJ_0000.MRG' <= id <= 'WSJ/24/WSJ_2499.MRG':\n train_file_ids.append(id)\n elif 'WSJ/22/WSJ_2200.MRG' <= id <= 'WSJ/22/WSJ_2299.MRG':\n valid_file_ids.append(id)\n # elif 'WSJ/23/WSJ_2300.MRG' <= id <= 'WSJ/23/WSJ_2399.MRG':\n # test_file_ids.append(id)\n # elif 'WSJ/00/WSJ_0000.MRG' <= id <= 'WSJ/01/WSJ_0199.MRG' or 'WSJ/24/WSJ_2400.MRG' <= id <= 'WSJ/24/WSJ_2499.MRG':\n # rest_file_ids.append(id)\n#train_file_ids = train_file_ids[:30]\n#valid_file_ids = train_file_ids\nclass Corpus(object):\n def __init__(self, path):\n from pytorch_pretrained_bert import OpenAIGPTTokenizer\n tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n self.train, self.train_sens, self.train_trees = self.tokenize(train_file_ids, tokenizer)\n self.valid, self.valid_sens, self.valid_trees = self.tokenize(valid_file_ids, tokenizer)\n self.test, self.test_sens, self.test_trees = self.tokenize(test_file_ids, tokenizer)\n self.rest, self.rest_sens, self.rest_trees = self.tokenize(rest_file_ids, tokenizer)\n\n def filter_words(self, tree):\n words = []\n for w, tag in tree.pos():\n words.append(w)\n return words\n\n def add_words(self, file_ids):\n # Add words to the dictionary\n for id in file_ids:\n sentences = ptb.parsed_sents(id)\n for sen_tree in sentences:\n words = self.filter_words(sen_tree)\n words = [''] + words + ['']\n\n def tokenize(self, file_ids, gpt_tokenizer):\n\n def tree2list(tree):\n if isinstance(tree, nltk.Tree):\n if tree.label() in word_tags:\n return tree.leaves()[0]\n else:\n root = []\n for child in tree:\n c = tree2list(child)\n if c != []:\n root.append(c)\n if len(root) > 1:\n return root\n elif len(root) == 1:\n return root[0]\n return []\n\n sens_idx = []\n sens = []\n trees = []\n for id in file_ids:\n sentences = ptb.parsed_sents(id)\n for sen_tree in sentences:\n words = self.filter_words(sen_tree)\n words = [''] + words + ['']\n # if len(words) > 50:\n # continue\n ### now GPT tokenization\n words = gpt_tokenizer.tokenize(\" \".join(words))\n ########\n sens.append(words)\n idx = gpt_tokenizer.convert_tokens_to_ids(words)\n sens_idx.append(torch.LongTensor(idx))\n trees.append(tree2list(sen_tree))\n\n return sens_idx, sens, trees\n","sub_path":"data_ptb.py","file_name":"data_ptb.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"369428672","text":"import cv2\r\nimport numpy as np\r\n#import pylab as plt\r\n\r\nwidth=320\r\nheight=240\r\n\r\ncap1=cv2.VideoCapture(0)\r\ncap2=cv2.VideoCapture(1)\r\n\t\r\nret=cap1.set(3,width);\r\nret=cap1.set(4,height);\r\nret=cap2.set(3,width);\r\nret=cap2.set(4,height);\r\n\r\nwhile True:\r\n\tret,frame1 = cap1.read()\r\n\tret,frame2 = cap2.read()\r\n\t#gray1 = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)\r\n\t#filtered = cv2.GaussianBlur(gray1,(11,11),0)\r\n\tcv2.imshow(\"camera1\",frame1)\r\n\tcv2.imshow(\"camera2\",frame2)\r\n\t#cv2.imshow(\"gray1\",gray1)\r\n\t#cv2.imshow(\"gaussian\",filtered)\r\n\r\n\tk=cv2.waitKey(1)\r\n\tif k == 27:\r\n\t\tbreak\r\n\t\r\ncap1.release()\r\ncap2.release()\r\ncv2.destroyAllwindows()\r\n","sub_path":"python/ninsiki.py","file_name":"ninsiki.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"434008611","text":"from django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\n\n# Create your views here.\nfrom django.views import View\n\nfrom apps.product.forms import ProductForm\nfrom apps.product.models import Product\n\n\nclass ProductListView(LoginRequiredMixin, View):\n def get(self, request):\n products = Product.objects.filter().order_by('-id')\n search_product = request.GET.get('product_name', \"\")\n if search_product != \"\":\n products = products.filter(name__icontains=search_product)\n if request.is_ajax(): # using ajax search in home page\n product_list = products.values('id', 'name', 'stock', 'code', 'unit_price', 'unit_type')[:5]\n return JsonResponse({\"product_count\": len(product_list), 'products': list(product_list)}, status=200)\n pagination = Paginator(products, 30)\n context = {'products': pagination.get_page(request.GET.get('page'))}\n\n return render(request, 'product/product-list.html', context=context)\n\n\nclass ProductAddView(LoginRequiredMixin, View):\n\n def get(self, request, product_id):\n # edit or create view\n if product_id is not None:\n product = get_object_or_404(Product, pk=product_id)\n else:\n product = None\n context = {'form': ProductForm(instance=product), 'product_id': product_id}\n\n return render(request, 'product/product-add.html', context=context)\n\n def post(self, request, product_id):\n # update or new create view\n if product_id is not None: # product_id will be None as it's set in create url\n product = get_object_or_404(Product, pk=product_id)\n message = 'Product updated successfully'\n else:\n product = None\n message = 'Product created successfully'\n\n product_form = ProductForm(request.POST, instance=product)\n if product_form.is_valid():\n product_form.save()\n messages.success(self.request, message)\n return redirect(\"product:product-list\")\n messages.error(self.request, 'Failed to update product')\n context = {'form': product_form, 'product_id': product_id}\n return render(request, 'product/product-add.html', context=context)\n\n\nclass ProductDetailsView(LoginRequiredMixin, View):\n def get(self, request, product_id):\n product = get_object_or_404(Product, id=product_id)\n return render(request, 'product/product-details.html', {\"product\": product})\n\n\nclass ProductDeleteView(LoginRequiredMixin, View):\n def get(self, request, product_id):\n if request.user.is_superuser:\n product = Product.objects.filter(id=product_id).first()\n if product:\n product.delete()\n messages.success(self.request, 'Product deleted successfully')\n else:\n messages.warning(self.request, 'Only Admin can delete a product')\n return redirect(request.META.get('HTTP_REFERER'))\n","sub_path":"apps/product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"134162861","text":"import sys\nimport pathlib\nimport threading\nimport unittest\nimport json\nimport time\nparent_dir = str(pathlib.Path(__file__).parent.parent.resolve())\nsys.path.append(parent_dir)\nfrom common.c_messenger import Sender, Receiver\n\nTEST_PROCESS_ID = 1\n\n\ndef func1(json_mess):\n print('exec func1')\n\n\ndef func2(json_mess):\n print('exec func2')\n\n\ndef func3(json_mess):\n print('exec func3')\n\nreceiver = Receiver(TEST_PROCESS_ID)\nd_callbacks = {'func1': func1, 'func2': func2, 'func3': func3}\nrecv_thread = threading.Thread(target=receiver.open, args=(d_callbacks,))\nrecv_thread.start()\n\nclass TestMessenger(unittest.TestCase):\n def setUp(self):\n print('TestMessenger setup')\n self.sender = Sender()\n \n def test_send_mess_regular(self):\n print(sys._getframe().f_code.co_name + ' start')\n d_message = {'func': 'func2', 'arg': 'test'}\n j_message = json.dumps(d_message)\n result = self.sender.send(TEST_PROCESS_ID, j_message)\n self.assertEqual(True, result)\n \n def test_send_mess_nostring(self):\n print(sys._getframe().f_code.co_name + ' start')\n d_message = {}\n j_message = json.dumps(d_message)\n result = self.sender.send(TEST_PROCESS_ID, j_message)\n self.assertEqual(True, result)\n\n def test_send_mess_invalid_func(self):\n print(sys._getframe().f_code.co_name + ' start')\n d_message = {'func': 'func4', 'arg': 'test'}\n j_message = json.dumps(d_message)\n result = self.sender.send(TEST_PROCESS_ID, j_message)\n self.assertEqual(True, result)\n \n def tearDown(self):\n print('TestMessenger teardown')\n # self.receiver.close()\n # self.recv_thread.join()\n del self.sender\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test_messenger.py","file_name":"test_messenger.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"92832964","text":"from pathlib import Path\n\n# For os.path.expandvars (no pathlib equivalent cf.\n# https://bugs.python.org/issue21301)\nimport os\n\n# Import abstract placeholder class.\nfrom . import PathPlaceHolder\n\n\nclass OutputDir(PathPlaceHolder):\n def __init__(self, path: str) -> None:\n # Expand environment variables within context (eg. $SLURM_TMPDIR within\n # hcp compute node).\n path = os.path.expandvars(path)\n\n # Process path using pathlib.\n path = Path(path)\n\n # Convert to abs path + resolve symlinks.\n path = path.resolve(strict=False)\n\n # Generate folder.\n if not path.exists():\n path.mkdir(parents=True, exist_ok=True)\n\n # Check path type.\n if not path.is_dir():\n raise FileNotFoundError('path is not a directory: {path}')\n\n # Call super constructor.\n path = str(path)\n super().__init__(path)\n","sub_path":"src/path/OutputDir.py","file_name":"OutputDir.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"438868330","text":"# -*- coding: utf-8 -*-\n# @Time : 2019-05-11 16:28\n# @Author : ssdcxy\n# @Email : 18379190862@163.com\n# @File : File.py\n\nfrom setting import Constant\n\n\ndef store_csv(data, file_path, date_format=\"%Y%m%d\", index=None, encoding=\"utf-8\", header=True):\n data.to_csv(path_or_buf=file_path, date_format=date_format, index=index, encoding=encoding,\n header=header)\n\n\nif __name__ == \"__main__\":\n print()\n","sub_path":"Competition/TIANCHI/Large-scaleCapitalInflowAndOutflowForecast/TimeSeriesAnalyse/handle/File.py","file_name":"File.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"382004314","text":"import time\nimport torch\n\nimport mlflow\nimport mlflow.pytorch\n\nmodel = mlflow.pytorch.load_model(\"models:/sensor_cartpole/1\")\n\nimport gym\nenv = gym.envs.make(\"CartPole-v0\")\n\ndef select_action(model, state):\n with torch.no_grad():\n state = torch.tensor(state, dtype=torch.float)\n action = model(state)\n return action.argmax().item()\n\n# The nice thing about the CARTPOLE is that it has very nice rendering functionality (if you are on a local environment). Let's have a look at an episode\nobs = env.reset()\nenv.render()\ndone = False\nwhile not done:\n action = select_action(model, obs)\n obs, reward, done, _ = env.step(action)\n env.render()\n time.sleep(0.05)\nenv.close() # Close the environment or you will have a lot of render screens soon","sub_path":"reinforcement_learning/sensor_cartpole/Q-learning/sensor_cartpole_run.py","file_name":"sensor_cartpole_run.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"174487889","text":"import Debug\nimport decimal, os\n\nedgePrefix = \"e_\"\nendStmt = \";\"\nplus = \" + \"\nequals = \" = \"\nltOrEqual = \" <= \"\ncomma = \", \"\nnewLine = \"\\n\"\n\nclass LinearProgram ():\n def __init__(self, ipg, traceData, basename, basepath):\n self.__wcet = 0\n outputFilename = basepath + os.sep + basename + \".ilp\"\n with open(outputFilename, 'w') as f:\n self.__writeObjectiveFunction(f, ipg, traceData)\n self.__writeStructuralConstraints(f, ipg)\n self.__writeCountConstraints(f, ipg, traceData)\n self.__writeNonNegativeConstraints(f, ipg)\n self.__solve(ipg, outputFilename)\n \n def getWCET (self):\n return self.__wcet\n \n def __solve(self, ipg, ilpFile):\n from subprocess import Popen, PIPE\n import shlex\n Debug.debugMessage(\"Solving ILP\", 10)\n command = \"lp_solve %s\" % ilpFile \n proc = Popen(command, shell=True, executable=\"/bin/bash\", stdout=PIPE, stderr=PIPE)\n returnCode = proc.wait()\n if returnCode != 0:\n Debug.exitMessage(\"Running '%s' failed\" % command)\n for line in proc.stdout.readlines():\n if line.startswith(\"Value of objective function\"):\n lexemes = shlex.split(line)\n wcet = long(decimal.Decimal(lexemes[-1]))\n self.__wcet = wcet\n \n def __writeObjectiveFunction(self, f, ipg, traceData):\n f.write(\"max: \")\n count = 0\n numOfEdges = ipg.numOfEdges()\n for vertexID in ipg.vertices:\n v = ipg.getVertex(vertexID)\n for succe in v.getSuccessorEdges ():\n count += 1\n edgeID = succe.getEdgeID()\n wcet = traceData.getWCETOfEdge(edgeID)\n f.write(\"%d %s%d\" % (wcet, edgePrefix, edgeID))\n if count < numOfEdges:\n f.write(plus)\n f.write(\"%s%s\" % (endStmt, newLine))\n \n def __writeStructuralConstraints(self, f, ipg):\n f.write(newLine)\n for vertexID in ipg.vertices:\n v = ipg.getVertex(vertexID)\n count = 0\n for prede in v.getPredecessorEdges ():\n count += 1\n f.write(\"%s%d\" % (edgePrefix, prede.getEdgeID()))\n if count < v.numberOfPredecessors():\n f.write(plus)\n \n f.write(equals)\n \n count = 0\n for succe in v.getSuccessorEdges ():\n count += 1\n f.write(\"%s%d\" % (edgePrefix, succe.getEdgeID()))\n if count < v.numberOfSuccessors():\n f.write(plus)\n \n f.write(\"%s%s\" % (endStmt, newLine))\n \n def __writeCountConstraints(self, f, ipg, traceData):\n f.write(newLine)\n for vertexID in ipg.vertices:\n v = ipg.getVertex(vertexID)\n for succe in v.getSuccessorEdges ():\n edgeID = succe.getEdgeID()\n wcec = traceData.getWCECOfEdge(edgeID)\n if vertexID == ipg.getExitID():\n wcec = 1\n f.write(\"%s%d%s%d\" % (edgePrefix, edgeID, ltOrEqual, wcec))\n f.write(\"%s%s\" % (endStmt, newLine))\n \n def __writeNonNegativeConstraints(self, f, ipg):\n f.write(newLine)\n f.write(\"int \")\n count = 0\n numOfEdges = ipg.numOfEdges()\n for vertexID in ipg.vertices:\n v = ipg.getVertex(vertexID)\n for succe in v.getSuccessorEdges ():\n edgeID = succe.getEdgeID()\n count += 1\n f.write(\"%s%d\" % (edgePrefix, edgeID))\n if count < numOfEdges:\n f.write(comma)\n f.write(\"%s%s\" % (endStmt, newLine))\n ","sub_path":"GPUTimingAnalysis/src/WCET.py","file_name":"WCET.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"276591326","text":"\nimport sys, json, random, string\nfrom redfish import RedfishClient\nfrom redfish.rest.v1 import ServerDownOrUnreachableError\n\nfrom get_resource_directory import get_resource_directory\n\n\ndef get_logicalvolume_actions(volumeIds):\n # change bios properties in the action settings\n\n params = {\n \"LogicalDrives\": [],\n \"DataGuard\": \"Permissive\"\n }\n for id in volumeIds:\n action = {\n \"Actions\": \"[Action: LogicalDriveDelete]\"\n }\n # item['VolumeUniqueIdentifier'] = id\n # params['LogicalDrives'].append(action)\n body = dict()\n body[\"LogicalDrives\"] = dict()\n body[\"LogicalDrives\"][\"Actions\"] = dict()\n body[\"LogicalDrives\"][\"Actions\"][\"Action\"] = \"LogicalDriveDelete\"\n body[\"LogicalDrives\"][\"VolumeUniqueIdentifier\"] = str(volumeIds[0])\n body[\"DataGuard\"] = \"Permissive\"\n \n print(body)\n return body\n\n\ndef create_logicaldrive_json(StorageTotalSize, locations):\n # creating logical drive disks with sorting the disks for which raid\n\n numberOfDisks = len(locations)\n diskSize = StorageTotalSize / numberOfDisks\n if len(locations) is 2:\n totalStorage = diskSize\n raid_type = 'Raid1'\n elif len(locations) > 3:\n totalStorage = (numberOfDisks - 1) * diskSize\n raid_type = 'Raid5'\n elif len(locations) < 2:\n print(\"ERROR!\")\n body = dict()\n body['CapacityGiB'] = totalStorage\n body['Raid'] = raid_type\n body['StripSizeBytes'] = 262144\n source = string.digits\n body['LogicalDriveName'] = 'RADCOM'+''.join((random.choice(source) for i in range(3)))\n body['DataDrives'] = list()\n for location in locations:\n body['DataDrives'].append(location)\n body['Accelerator'] = 'ControllerCache'\n\n print(json.dumps(body, indent=4))\n\n# resp = _redfishobj.put(smartstorage_uri_config, body)\n\n\n\ndef createLogicalDrive(_redfishobj):\n # Creates a new logical drive on the selected controller\n\n resource_instances = get_resource_directory(_redfishobj)\n if DISABLE_RESOURCE_DIR or not resource_instances:\n #if we do not have a resource directory or want to force it's non use to find the\n #relevant URI\n\n systems_uri = _redfishobj.root.obj['Systems']['@odata.id']\n systems_response = _redfishobj.get(systems_uri)\n systems_members_uri = next(iter(systems_response.obj['Members']))['@odata.id']\n systems_members_response = _redfishobj.get(systems_members_uri)\n smart_storage_uri = systems_members_response.obj.Oem.Hpe.Links\\\n ['SmartStorage']['@odata.id']\n smart_storage_arraycontrollers_uri = _redfishobj.get(smart_storage_uri).obj.Links \\\n ['ArrayControllers']['@odata.id']\n smartstorage_response = _redfishobj.get(smart_storage_arraycontrollers_uri).obj['Members']\n else:\n drive_locations = []\n totalStorage = 0\n for instance in resource_instances:\n #Use Resource directory to find the relevant URI\n if '#HpeSmartStorageArrayController.' in instance['@odata.type']:\n smartstorage_uri = instance['@odata.id']\n smartstorage_resp = _redfishobj.get(smartstorage_uri).obj\n sys.stdout.write(\"Logical Drive URIs for Smart Storage Array Controller \" \\\n \"'%s\\' : \\n\" % smartstorage_resp.get('Id'))\n PysicalDrives_uri = smartstorage_resp.Links['PhysicalDrives']['@odata.id']\n Pysicaldrives_resp = _redfishobj.get(PysicalDrives_uri)\n if not Pysicaldrives_resp.dict['Members']:\n sys.stderr.write(\"\\tPysical drives are not available for this controller.\\n\")\n for drives in Pysicaldrives_resp.dict['Members']:\n sys.stdout.write(\"\\t An associated logical drive: %s\\n\" % drives)\n drive_data = _redfishobj.get(drives['@odata.id']).dict\n # drive_ids.append(drive_data[\"VolumeUniqueIdentifier\"])\n print(drive_data[\"Location\"])\n drive_locations.append(str(drive_data[\"Location\"]))\n totalStorage += drive_data[\"CapacityGB\"]\n print(totalStorage)\n print(drive_locations)\n create_logicaldrive_json(totalStorage, drive_locations)\n elif '#SmartStorageConfig.' in instance['@odata.type']:\n smartstorage_uri_config = instance['@odata.id']\n # print(smartstorage_uri_config)\n print(\"uri\")\n\n\n\n\n\ndef change_temporary_boot_order(_redfishobj, boottarget):\n #getting response boot\n\n systems_members_uri = None\n systems_members_response = None\n\n resource_instances = get_resource_directory(_redfishobj)\n if DISABLE_RESOURCE_DIR or not resource_instances:\n systems_uri = _redfishobj.root.obj['Systems']['@odata.id']\n systems_response = _redfishobj.get(systems_uri)\n systems_members_uri = next(iter(systems_response.obj['Members']))['@odata.id']\n systems_members_response = _redfishobj.get(systems_members_uri)\n else:\n for instance in resource_instances:\n if '#ComputerSystem.' in instance['@odata.type']:\n systems_members_uri = instance['@odata.id']\n systems_members_response = _redfishobj.get(systems_members_uri)\n\n if systems_members_response:\n print(\"\\n\\nShowing bios attributes before changes:\\n\\n\")\n print(json.dumps(systems_members_response.dict.get('Boot'), indent=4, sort_keys=True))\n body = {'Boot': {'BootSourceOverrideTarget': boottarget}}\n resp = _redfishobj.patch(systems_members_uri, body)\n\n #If iLO responds with soemthing outside of 200 or 201 then lets check the iLO extended info\n #error message to see what went wrong\n if resp.status == 400:\n try:\n print(json.dumps(resp.obj['error']['@Message.ExtendedInfo'], indent=4, sort_keys=True))\n except Exception as excp:\n sys.stderr.write(\"A response error occurred, unable to access iLO Extended Message \"\\\n \"Info...\")\n elif resp.status != 200:\n sys.stderr.write(\"An http response of \\'%s\\' was returned.\\n\" % resp.status)\n else:\n print(\"\\nSuccess!\\n\")\n print(json.dumps(resp.dict, indent=4, sort_keys=True))\n if systems_members_response:\n print(\"\\n\\nShowing boot override target:\\n\\n\")\n print(json.dumps(systems_members_response.dict.get('Boot'), indent=4, sort_keys=True))\n\n\n\ndef reboot_server(_redfishobj):\n # Reboot a server\n\n systems_members_response = None\n\n resource_instances = get_resource_directory(_redfishobj)\n if DISABLE_RESOURCE_DIR or not resource_instances:\n #if we do not have a resource directory or want to force it's non use to find the\n #relevant URI\n systems_uri = _redfishobj.root.obj['Systems']['@odata.id']\n systems_response = _redfishobj.get(systems_uri)\n systems_uri = next(iter(systems_response.obj['Members']))['@odata.id']\n systems_response = _redfishobj.get(systems_uri)\n else:\n for instance in resource_instances:\n #Use Resource directory to find the relevant URI\n if '#ComputerSystem.' in instance['@odata.type']:\n systems_uri = instance['@odata.id']\n systems_response = _redfishobj.get(systems_uri)\n\n if systems_response:\n system_reboot_uri = systems_response.obj['Actions']['#ComputerSystem.Reset']['target']\n body = dict()\n body['Action'] = 'ComputerSystem.Reset'\n body['ResetType'] = \"ForceRestart\"\n resp = _redfishobj.post(system_reboot_uri, body)\n #If iLO responds with soemthing outside of 200 or 201 then lets check the iLO extended info\n #error message to see what went wrong\n if resp.status == 400:\n try:\n print(json.dumps(resp.obj['error']['@Message.ExtendedInfo'], indent=4, \\\n sort_keys=True))\n except Exception as excp:\n sys.stderr.write(\"A response error occurred, unable to access iLO Extended \"\n \"Message Info...\")\n elif resp.status != 200:\n sys.stderr.write(\"An http response of \\'%s\\' was returned.\\n\" % resp.status)\n else:\n print(\"Success!\\n\")\n print(json.dumps(resp.dict, indent=4, sort_keys=True))\n\ndef delete_SmartArray_LogicalDrives(_redfishobj):\n #deleting the logical drives\n\n smartstorage_response = []\n smartarraycontrollers = dict()\n\n resource_instances = get_resource_directory(_redfishobj)\n if DISABLE_RESOURCE_DIR or not resource_instances:\n #if we do not have a resource directory or want to force it's non use to find the\n #relevant URI\n\n systems_uri = _redfishobj.root.obj['Systems']['@odata.id']\n systems_response = _redfishobj.get(systems_uri)\n systems_members_uri = next(iter(systems_response.obj['Members']))['@odata.id']\n systems_members_response = _redfishobj.get(systems_members_uri)\n smart_storage_uri = systems_members_response.obj.Oem.Hpe.Links\\\n ['SmartStorage']['@odata.id']\n smart_storage_config_uri = systems_members_response.obj.Oem.Hpe.Links\\\n ['SmartStorageconfig']['@odata.id']\n print(smart_storage_config_uri)\n smart_storage_arraycontrollers_uri = _redfishobj.get(smart_storage_uri).obj.Links \\\n ['ArrayControllers']['@odata.id']\n smartstorage_response = _redfishobj.get(smart_storage_arraycontrollers_uri).obj['Members']\n else:\n drive_ids = []\n for instance in resource_instances:\n #Use Resource directory to find the relevant URI\n if '#HpeSmartStorageArrayController.' in instance['@odata.type']:\n smartstorage_uri = instance['@odata.id']\n smartstorage_resp = _redfishobj.get(smartstorage_uri).obj\n sys.stdout.write(\"Logical Drive URIs for Smart Storage Array Controller \" \\\n \"'%s\\' : \\n\" % smartstorage_resp.get('Id'))\n logicaldrives_uri = smartstorage_resp.Links['LogicalDrives']['@odata.id']\n logicaldrives_resp = _redfishobj.get(logicaldrives_uri)\n if not logicaldrives_resp.dict['Members']:\n sys.stderr.write(\"\\tLogical drives are not available for this controller.\\n\")\n for drives in logicaldrives_resp.dict['Members']:\n sys.stdout.write(\"\\t An associated logical drive: %s\\n\" % drives)\n drive_data = _redfishobj.get(drives['@odata.id']).dict\n drive_ids.append(drive_data[\"VolumeUniqueIdentifier\"])\n print(drive_data[\"VolumeUniqueIdentifier\"])\n elif '#SmartStorageConfig.' in instance['@odata.type']:\n smartstorage_uri_config = instance['@odata.id']\n # print(smartstorage_uri_config)\n print(\"uri\")\n\n\n body = get_logicalvolume_actions(drive_ids)\n print(smartstorage_uri_config)\n print(body)\n # res = _redfishobj.put(\"https://febm-probe3.ilo.ps.radcom.co.il/redfish/v1/Systems/1/SmartStorageConfig/Settings/\", )\n resp = _redfishobj.put(smartstorage_uri_config, body)\n print(resp)\n\n\n\n\ndef get_SmartArray_LogicalDrives(_redfishobj):\n #List all logical drives associated with a smart array controller\n\n smartstorage_response = []\n smartarraycontrollers = dict()\n\n resource_instances = get_resource_directory(_redfishobj)\n if DISABLE_RESOURCE_DIR or not resource_instances:\n #if we do not have a resource directory or want to force it's non use to find the\n #relevant URI\n systems_uri = _redfishobj.root.obj['Systems']['@odata.id']\n systems_response = _redfishobj.get(systems_uri)\n systems_members_uri = next(iter(systems_response.obj['Members']))['@odata.id']\n systems_members_response = _redfishobj.get(systems_members_uri)\n smart_storage_uri = systems_members_response.obj.Oem.Hpe.Links\\\n ['SmartStorage']['@odata.id']\n smart_storage_arraycontrollers_uri = _redfishobj.get(smart_storage_uri).obj.Links\\\n ['ArrayControllers']['@odata.id']\n smartstorage_response = _redfishobj.get(smart_storage_arraycontrollers_uri).obj['Members']\n else:\n for instance in resource_instances:\n #Use Resource directory to find the relevant URI\n if '#HpeSmartStorageArrayController.' in instance['@odata.type']:\n smartstorage_uri = instance['@odata.id']\n smartstorage_resp = _redfishobj.get(smartstorage_uri).obj\n sys.stdout.write(\"Logical Drive URIs for Smart Storage Array Controller \" \\\n \"'%s\\' : \\n\" % smartstorage_resp.get('Id'))\n logicaldrives_uri = smartstorage_resp.Links['LogicalDrives']['@odata.id']\n logicaldrives_resp = _redfishobj.get(logicaldrives_uri)\n if not logicaldrives_resp.dict['Members']:\n sys.stderr.write(\"\\tLogical drives are not available for this controller.\\n\")\n for drives in logicaldrives_resp.dict['Members']:\n sys.stdout.write(\"\\t An associated logical drive: %s\\n\" % drives)\n drive_data = _redfishobj.get(drives['@odata.id']).dict\n print(json.dumps(drive_data, indent=4, sort_keys=True))\n\ndef get_SmartArray_EncryptionSettings(_redfishobj, desired_properties):\n #Obtain Smart Array controller encryption property data\n\n smartstorage_response = []\n smartarraycontrollers = dict()\n\n resource_instances = get_resource_directory(_redfishobj)\n if DISABLE_RESOURCE_DIR or not resource_instances:\n #if we do not have a resource directory or want to force it's non use to find the\n #relevant URI\n systems_uri = _redfishobj.root.obj['Systems']['@odata.id']\n systems_response = _redfishobj.get(systems_uri)\n systems_members_uri = next(iter(systems_response.obj['Members']))['@odata.id']\n systems_members_response = _redfishobj.get(systems_members_uri)\n smart_storage_uri = systems_members_response.obj.Oem.Hpe.Links\\\n ['SmartStorage']['@odata.id']\n smart_storage_arraycontrollers_uri = _redfishobj.get(smart_storage_uri).obj.Links\\\n ['ArrayControllers']['@odata.id']\n smartstorage_response = _redfishobj.get(smart_storage_arraycontrollers_uri).obj['Members']\n else:\n for instance in resource_instances:\n #Use Resource directory to find the relevant URI\n if '#HpeSmartStorageArrayControllerCollection.' in instance['@odata.type']:\n smartstorage_uri = instance['@odata.id']\n smartstorage_response = _redfishobj.get(smartstorage_uri).obj['Members']\n break\n\n for controller in smartstorage_response:\n smartarraycontrollers[controller['@odata.id']] = _redfishobj.get(controller['@odata.id']).\\\n obj\n sys.stdout.write(\"Encryption Properties for Smart Storage Array Controller \\'%s\\' : \\n\" \\\n % smartarraycontrollers[controller['@odata.id']].get('Id'))\n for data in smartarraycontrollers[controller['@odata.id']]:\n if data in desired_properties:\n sys.stdout.write(\"\\t %s : %s\\n\" % (data, smartarraycontrollers[controller\\\n ['@odata.id']].get(data)))\n\nif __name__ == \"__main__\":\n # When running on the server locally use the following commented values\n #SYSTEM_URL = None\n #LOGIN_ACCOUNT = None\n #LOGIN_PASSWORD = None\n\n # When running remotely connect using the secured (https://) address,\n # account name, and password to send https requests\n # SYSTEM_URL acceptable examples:\n # \"https://10.0.0.100\"\n # \"https://ilo.hostname\"\n SYSTEM_URL = \"https://febm-probe3.ilo.ps.radcom.co.il\"\n LOGIN_ACCOUNT = \"admin\"\n LOGIN_PASSWORD = \"Radmin1234\"\n\n #list of desired properties related to Smart Array controller encryption\n DESIRED_PROPERTIES = [\"Name\", \"Model\", \"SerialNumber\", \"EncryptionBootPasswordSet\",\\\n \"EncryptionCryptoOfficerPasswordSet\",\\\n \"EncryptionLocalKeyCacheEnabled\", \"EncryptionMixedVolumesEnabled\",\\\n \"EncryptionPhysicalDriveCount\", \"EncryptionRecoveryParamsSet\",\\\n \"EncryptionStandaloneModeEnabled\", \"EncryptionUserPasswordSet\"]\n # flag to force disable resource directory. Resource directory and associated operations are\n # intended for HPE servers.\n DISABLE_RESOURCE_DIR = False\n\n try:\n # Create a Redfish client object\n REDFISHOBJ = RedfishClient(base_url=SYSTEM_URL, username=LOGIN_ACCOUNT, \\\n password=LOGIN_PASSWORD)\n # Login with the Redfish client\n REDFISHOBJ.login()\n except ServerDownOrUnreachableError as excp:\n sys.stderr.write(\"ERROR: server not reachable or does not support RedFish.\\n\")\n sys.exit()\n\n # get_SmartArray_EncryptionSettings(REDFISHOBJ, DESIRED_PROPERTIES)\n # delete_SmartArray_LogicalDrives(REDFISHOBJ)\n createLogicalDrive(REDFISHOBJ)\n print(\"\")\n # reboot_server(REDFISHOBJ)\n\n\n REDFISHOBJ.logout()\n","sub_path":"radcom/raid.py","file_name":"raid.py","file_ext":"py","file_size_in_byte":17874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"269515082","text":"__author__ = 'lopli_000'\n\nimport csv\nimport json\nimport ipdb\n\nwith open('verbs.csv', newline='', encoding='utf-8') as file:\n file.readline()\n reader = csv.reader(file, delimiter=',')\n\n dictionary = dict()\n\n for row in reader:\n word = row[0]\n\n #if word == 'so\\u00f1ar':\n # ipdb.set_trace()\n\n if word not in dictionary:\n dictionary[word] = dict(\n definition=row[1]\n )\n\n word = dictionary[word]\n\n mood = row[3]\n if mood not in word:\n word[mood] = dict()\n\n mood = word[mood]\n tense = row[5]\n\n mood[tense] = dict(\n definition=row[6],\n form1s=row[7] or None,\n form2s=row[8] or None,\n form3s=row[9] or None,\n form1p=row[10] or None,\n form2p=row[11] or None,\n form3p=row[12] or None,\n )\n\nwith open('js/spanish/infinitives.json', 'w', encoding=\"utf-8\") as outfile:\n json.dump(dictionary, outfile)\n\n#0 infinitive\n#1 definition\n#3 mood\n#5 tense\n#6 moodTenseDefinition\n#form1-3s form1-3p\n#gerund","sub_path":"parsecsv.py","file_name":"parsecsv.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"431717384","text":"import os\nimport json\nimport time\nimport csv\nfrom datetime import datetime\nfrom logging import getLogger\nfrom tqdm import tqdm\nimport numpy as np\nimport cupy as cp\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\n\nfrom config import Config\nfrom lib import optimizers\nfrom lib.losses import mean_square_error, accuracy\nfrom visualize import Visualize\n\nlogger = getLogger(__name__)\n\ndef start(config: Config):\n return Trainer(config).start()\n\n\nclass Trainer:\n def __init__(self, config: Config):\n self.config = config\n self.model = None\n self.dataset = None\n self.optimizer = None\n self.visualize = Visualize(config)\n \n def start(self):\n self.model = self.load_model()\n self.training()\n\n def training(self):\n tc = self.config.trainer\n self.compile_model()\n self.dataset = self.load_dataset()\n self.fit(x=self.dataset[0][0], y=self.dataset[0][1], epochs=tc.epoch, batch_size=tc.batch_size, validation_data=self.dataset[1], is_visualize=tc.is_visualize, is_accuracy=tc.is_accuracy)\n self.evaluate(self.dataset[2][0], self.dataset[2][1])\n self.save_model()\n self.save_result()\n \n def compile_model(self):\n self.optimizer = optimizers.get(self.config)\n self.loss = mean_square_error\n self.accuracy = accuracy\n \n def fit(self, x=None, y=None, epochs=1, batch_size=1, validation_data=None, is_shuffle=True, is_visualize=False, is_accuracy=False):\n if x is None or y is None:\n raise ValueError(\"There is no fitting data\")\n n_train = len(x)\n self.losses = []\n if validation_data is not None: self.losses_val = []\n if is_accuracy: self.accuracies = []\n if validation_data is not None and is_accuracy: self.accuracies_val = []\n\n t = cp.asnumpy(self.model.t)\n \n logger.info(\"training start\")\n cp.cuda.Stream.null.synchronize()\n start_time = time.time()\n for epoch in range(epochs):\n if is_shuffle:\n x, y = shuffle(x, y)\n error = 0.\n if is_accuracy: accuracy = 0.\n with tqdm(range(0, n_train, batch_size), desc=\"[Epoch: {}]\".format(epoch+1)) as pbar:\n for i, ch in enumerate(pbar):\n x_bs = cp.array(x[i:i+batch_size])\n y_bs = cp.array(y[i:i+batch_size])\n self.model.params = self.optimizer(self.model.params, self.model.gradient(x_bs, y_bs))\n y_pred = self.model(x_bs)\n error += self.loss(y_pred, y_bs) * len(y_bs)\n if is_accuracy: accuracy += self.accuracy(y_pred, y_bs) * len(y_bs)\n error /= n_train\n self.losses.append(error)\n if validation_data is None:\n if not is_accuracy:\n message = \"Epoch:{} Training loss: {:.5f}\".format(epoch+1, error)\n if is_visualize:\n params = tuple(cp.asnumpy(param) for param in self.model.params)\n self.visualize.plot_realtime(t, params, [self.losses])\n else:\n accuracy /= n_train\n self.accuracies.append(accuracy)\n message = \"Epoch:{} Training loss:{:.5f} Training accuracy:{:.5f}\".format(epoch+1, error, accuracy)\n if is_visualize:\n params = tuple(cp.asnumpy(param) for param in self.model.params)\n self.visualize.plot_realtime(t, params, [self.losses], [self.accuracies])\n else:\n error_val = 0.\n if is_accuracy: accuracy_val = 0.\n for i in range(0, len(validation_data[0]), batch_size):\n x_bs = cp.array(validation_data[0][i:i+batch_size])\n y_bs = cp.array(validation_data[1][i:i+batch_size])\n y_pred = self.model(x_bs)\n error_val += self.loss(y_pred, y_bs) * len(y_bs)\n if is_accuracy: accuracy_val += self.accuracy(y_pred, y_bs) * len(y_bs)\n error_val /= len(validation_data[0])\n self.losses_val.append(error_val)\n if not is_accuracy:\n message = \"Epoch:{} Training loss:{:.5f} Validation loss:{:.5f}\".format(epoch+1, error, error_val)\n if is_visualize:\n params = tuple(cp.asnumpy(param) for param in self.model.params)\n self.visualize.plot_realtime(t, params, [self.losses, self.losses_val])\n else:\n accuracy /= n_train\n self.accuracies.append(accuracy)\n accuracy_val /= len(validation_data[0])\n self.accuracies_val.append(accuracy_val)\n message = \"Epoch:{} Training loss:{:.5f} Validation loss:{:.5f} Training accuracy:{:.5f} Validation accuracy:{:.5f}\".format(epoch+1, error, error_val, accuracy, accuracy_val)\n if is_visualize:\n params = tuple(cp.asnumpy(param) for param in self.model.params)\n self.visualize.plot_realtime(t, params, [self.losses, self.losses_val], [self.accuracies, self.accuracies_val])\n logger.info(message)\n cp.cuda.Stream.null.synchronize()\n interval = time.time() - start_time\n logger.info(\"end of training\")\n logger.info(\"time: {}\".format(interval))\n logger.info(message)\n \n def evaluate(self, x, y):\n batch_size = self.config.trainer.batch_size\n error = 0\n is_accuracy = self.config.trainer.is_accuracy\n if is_accuracy: accuracy = 0\n for i in range(0, len(x), batch_size):\n x_bs = cp.array(x[i:i+batch_size])\n y_bs = cp.array(y[i:i+batch_size])\n y_pred = self.model(x_bs)\n error += self.loss(y_pred, y_bs) * len(y_bs)\n if is_accuracy: accuracy += self.accuracy(y_pred, y_bs) * len(y_bs)\n error /= len(x)\n if is_accuracy:\n accuracy /= len(x)\n message = \"Test loss:{} Test accuracy:{}\".format(error, accuracy)\n else:\n message = \"Test loss:{}\".format(error)\n logger.info(message)\n \n def load_model(self):\n from model import NeuralODEModel\n model = NeuralODEModel(self.config)\n model.load(self.config.resource.model_path)\n return model\n \n def save_model(self):\n rc = self.config.resource\n model_id = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n model_dir = os.path.join(rc.model_dir, \"model_{}\".format(model_id))\n os.makedirs(model_dir, exist_ok=True)\n config_path = os.path.join(model_dir, \"parameter.conf\")\n model_path = os.path.join(model_dir, \"model.json\")\n self.model.save(config_path, model_path)\n \n def load_dataset(self):\n data_path = self.config.resource.data_path\n if os.path.exists(data_path):\n logger.debug(\"loading data from {}\".format(data_path))\n with open(data_path, \"rt\") as f:\n datasets = json.load(f)\n x = datasets.get(\"Input\")\n y = datasets.get(\"Output\")\n if x is None or y is None:\n raise TypeError(\"Dataset does not exists in {}\".format(data_path))\n if len(x[0]) != self.config.model.dim_in:\n raise ValueError(\"Input dimensions in config and dataset are not equal: {} != {}\".format(self.config.model.dim_in, len(x[0])))\n if len(y[0]) != self.config.model.dim_out:\n raise ValueError(\"Output dimensions in config and dataset are not equal: {} != {}\".format(self.config.model.dim_out, len(y[0])))\n x_train, x_test, y_train, y_test = train_test_split(np.array(x, dtype=np.float32), np.array(y, dtype=np.float32), test_size=self.config.trainer.test_size)\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=self.config.trainer.validation_size)\n train = (x_train, y_train)\n validation = (x_val, y_val)\n test = (x_test, y_test)\n return (train, validation, test)\n else:\n raise FileNotFoundError(\"Dataset file can not loaded!\")\n \n def save_result(self):\n rc = self.config.resource\n tc = self.config.trainer\n result_id = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n result_dir = os.path.join(rc.result_dir, \"result_train_{}\".format(result_id))\n os.makedirs(result_dir, exist_ok=True)\n result_path = os.path.join(result_dir, \"learning_curve.csv\")\n e = [i for i in range(1, tc.epoch+1)]\n try:\n self.visualize.save_plot_loss([self.losses, self.losses_val], xlabel=\"Epoch\", ylabel=\"Loss\", title=\"Loss\", save_file=os.path.join(result_dir, \"loss.png\"))\n if tc.is_accuracy:\n result_csv = [e, self.losses, self.losses_val, self.accuracies, self.accuracies_val]\n columns = [\"epoch\", \"loss_train\", \"loss_val\", \"accuracy_train\", \"accuracy_val\"]\n self.visualize.save_plot_accuracy([self.accuracies, self.accuracies_val], xlabel=\"Epoch\", ylabel=\"Accuracy\", title=\"Accuracy\", save_file=os.path.join(result_dir, \"accuracy.png\"))\n else:\n result_csv = [e, self.losses, self.losses_val]\n columns = [\"epoch\", \"loss_train\", \"loss_val\"]\n except AttributeError:\n self.visualize.save_plot_loss([self.losses], xlabel=\"Epoch\", ylabel=\"Loss\", title=\"Loss\", save_file=os.path.join(result_dir, \"loss.png\"))\n if tc.is_accuracy:\n result_csv = [e, self.losses, self.accuracies]\n columns = [\"epoch\", \"loss_train\", \"accuracy_train\"]\n self.visualize.save_plot_accuracy([self.accuracies], xlabel=\"Epoch\", ylabel=\"Accuracy\", title=\"Accuracy\", save_file=os.path.join(result_dir, \"accuracy.png\"))\n else:\n result_csv = [e, self.losses]\n columns = [\"epoch\", \"loss_train\"]\n logger.debug(\"save result to {}\".format(result_path))\n with open(result_path, \"wt\") as f:\n writer = csv.writer(f)\n writer.writerow(columns)\n writer.writerows(list(zip(*result_csv)))\n if tc.is_visualize:\n save_params_path = [os.path.join(result_dir, \"alpha.png\"), os.path.join(result_dir, \"beta.png\"), os.path.join(result_dir, \"gamma.png\"), os.path.join(result_dir, \"params.png\")]\n self.visualize.save_plot_params(cp.asnumpy(self.model.t), tuple(cp.asnumpy(param) for param in self.model.params), save_file=save_params_path)","sub_path":"src/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":10733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"379454525","text":"#!/usr/bin/env python\n\nimport sys\n\nfrom PyQt5.QtCore import QThreadPool\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\n\nfrom reuther_digitization_client.config import load_config\nfrom reuther_digitization_client.database import create_connection\nfrom reuther_digitization_client.items_window import Items\nfrom reuther_digitization_client.projects_window import Projects\n\nfrom reuther_digitization_client.ui.application_window import Ui_ApplicationWindow\n\n\nclass DigitizationClient(QMainWindow, Ui_ApplicationWindow):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi(self)\n self.config = load_config()\n self.threadpool = QThreadPool()\n\n self.projects = Projects(self)\n self.stackedWidget.addWidget(self.projects)\n\n self.items = Items(self)\n self.stackedWidget.addWidget(self.items)\n\n def show_projects(self):\n self.projects.load_projects()\n self.stackedWidget.setCurrentIndex(0)\n\n def load_items(self, project_id, project_dir):\n self.items.load_items(project_id, project_dir)\n self.stackedWidget.setCurrentIndex(1)\n\n\ndef main():\n app = QApplication(sys.argv)\n if not create_connection():\n sys.exit(1)\n form = DigitizationClient()\n form.show()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"reuther_digitization_client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"401335766","text":"from __future__ import print_function\nimport collections\nimport os, sys\nimport tensorflow as tf\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Activation, Embedding, Dropout, TimeDistributed\nfrom keras.layers import LSTM\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\nfrom keras.callbacks import ModelCheckpoint\nimport numpy as np\nimport argparse\nimport MeCab\n\n\"\"\"To run this code, you'll need to first download and extract the text dataset\n from here: http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz. Change the\n data_path variable below to your local exraction path\"\"\"\n\nresult_path = 'result'\nrun_opt = 1 # 2にしたかったらコマンドライン引数に2を追加\nargs = sys.argv\nif args[1]:\n run_opt = args[1]\n print(run_opt)\n\ndef remove_values_from_list(the_list, val):\n return [value for value in the_list if value != val]\n\ndef read_words(filename):\n f = open(filename)\n text = f.read()\n tagger = MeCab.Tagger(\"-Owakati\")\n text = tagger.parse(text)\n text = list(map(str, text.split(' ')))\n text = remove_values_from_list(text,\"\\u3000\") \n return text\n\ndef build_vocab(filename):\n \"\"\"\n サンプルテキスト内の単語を頻度順に並べて全てvocabularyに利用\n \"\"\"\n data = read_words(filename)\n\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n\n return word_to_id\n\ndef file_to_word_ids(filename, word_to_id):\n \"\"\"\n Blogのサンプルだと全ての単語を利用しているので word in data not ifは起きない \n \"\"\"\n data = read_words(filename)\n return [word_to_id[word] for word in data if word in word_to_id]\n\ndef load_data():\n # get the data paths\n train_path = os.path.join(\"akai_kabutomushi.txt\")\n valid_path = os.path.join(\"akai_heya.txt\")\n test_path = os.path.join(\"ichimaino_kippu.txt\")\n\n # build the complete vocabulary, then convert text data to list of integers\n word_to_id = build_vocab(train_path)\n train_data = file_to_word_ids(train_path, word_to_id)\n valid_data = file_to_word_ids(valid_path, word_to_id)\n test_data = file_to_word_ids(test_path, word_to_id)\n vocabulary = len(word_to_id)\n reversed_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))\n\n print(\"train_data[:5]\",train_data[:5])\n # print(word_to_id) This is 10000 words dictionary\n print(\"vocabulary:\",vocabulary)\n print(\" \".join([reversed_dictionary[x] for x in train_data[:10]]))\n return train_data, valid_data, test_data, vocabulary, reversed_dictionary\n\ntrain_data, valid_data, test_data, vocabulary, reversed_dictionary = load_data()\n\nclass KerasBatchGenerator(object):\n\n def __init__(self, data, num_steps, batch_size, vocabulary, skip_step=5):\n self.data = data\n self.num_steps = num_steps\n self.batch_size = batch_size\n self.vocabulary = vocabulary\n # this will track the progress of the batches sequentially through the\n # data set - once the data reaches the end of the data set it will reset\n # back to zero\n self.current_idx = 0 # バッチの数\n # skip_step is the number of words which will be skipped before the next\n # batch is skimmed from the data set\n self.skip_step = skip_step\n\n def generate(self):\n x = np.zeros((self.batch_size, self.num_steps))\n y = np.zeros((self.batch_size, self.num_steps, self.vocabulary))\n while True:\n for i in range(self.batch_size):\n if self.current_idx + self.num_steps >= len(self.data):\n # reset the index back to the start of the data set\n self.current_idx = 0\n x[i, :] = self.data[self.current_idx:self.current_idx + self.num_steps]\n temp_y = self.data[self.current_idx + 1:self.current_idx + self.num_steps + 1]\n # convert all of temp_y into a one hot representation\n y[i, :, :] = to_categorical(temp_y, num_classes=self.vocabulary)\n self.current_idx += self.skip_step\n yield x, y\n\nnum_steps = 30 # modelに与える文章内の単語数\nbatch_size = 20 # 1バッチあたりいくつの文章を与えるか\ntrain_data_generator = KerasBatchGenerator(train_data, num_steps, batch_size, vocabulary,\n skip_step=num_steps)\nvalid_data_generator = KerasBatchGenerator(valid_data, num_steps, batch_size, vocabulary,\n skip_step=num_steps)\n\nhidden_size = 500\nuse_dropout=True\nmodel = Sequential()\nmodel.add(Embedding(vocabulary, hidden_size, input_length=num_steps)) # model.add(BatchNormalization(axis=-1)) があったほうが良いかも\nmodel.add(LSTM(hidden_size, return_sequences=True))\nmodel.add(LSTM(hidden_size, return_sequences=True)) # model.add(BatchNormalization(axis=-1)) があったほうが良いかも\nif use_dropout:\n model.add(Dropout(0.5))\nmodel.add(TimeDistributed(Dense(vocabulary)))\nmodel.add(Activation('softmax'))\n\noptimizer = Adam()\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])\n\nprint(model.summary())\ncheckpointer = ModelCheckpoint(filepath=result_path + '/model-{epoch:02d}.hdf5', verbose=1)\nnum_epochs = 50\nif run_opt == 1:\n model.fit_generator(train_data_generator.generate(), len(train_data)//(batch_size*num_steps), num_epochs,\n validation_data=valid_data_generator.generate(),\n validation_steps=len(valid_data)//(batch_size*num_steps), callbacks=[checkpointer])\n # model.fit_generator(train_data_generator.generate(), 2000, num_epochs,\n # validation_data=valid_data_generator.generate(),\n # validation_steps=10)\n model.save(result_path + \"final_model.hdf5\")\nelif run_opt == 2:\n model = load_model(result_path + \"/model-50.hdf5\")\n dummy_iters = 40\n example_training_generator = KerasBatchGenerator(train_data, num_steps, 1, vocabulary,\n skip_step=1)\n print(\"Training data:\")\n for i in range(dummy_iters):\n dummy = next(example_training_generator.generate())\n num_predict = 10\n true_print_out = \"Actual words: \"\n pred_print_out = \"Predicted words: \"\n for i in range(num_predict):\n data = next(example_training_generator.generate())\n prediction = model.predict(data[0])\n predict_word = np.argmax(prediction[:, num_steps-1, :])\n true_print_out += reversed_dictionary[train_data[num_steps + dummy_iters + i]] + \" \"\n pred_print_out += reversed_dictionary[predict_word] + \" \"\n print(true_print_out)\n print(pred_print_out)\n # test data set\n dummy_iters = 40\n example_test_generator = KerasBatchGenerator(test_data, num_steps, 1, vocabulary,\n skip_step=1)\n print(\"Test data:\")\n for i in range(dummy_iters):\n dummy = next(example_test_generator.generate())\n num_predict = 10\n true_print_out = \"Actual words: \"\n pred_print_out = \"Predicted words: \"\n for i in range(num_predict):\n data = next(example_test_generator.generate())\n prediction = model.predict(data[0])\n predict_word = np.argmax(prediction[:, num_steps - 1, :])\n true_print_out += reversed_dictionary[test_data[num_steps + dummy_iters + i]] + \" \"\n pred_print_out += reversed_dictionary[predict_word] + \" \"\n print(true_print_out)\n print(pred_print_out)","sub_path":"sentence_generator/adventures-in-ml-code_01.py","file_name":"adventures-in-ml-code_01.py","file_ext":"py","file_size_in_byte":7695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"36719793","text":"import json\r\nimport gensim.downloader as api\r\nimport gensim\r\nimport spacy\r\nimport pandas as pd\r\nimport re\r\nimport csv\r\nfrom collections import defaultdict as dd\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import RegexpTokenizer\r\nfrom gensim import corpora, models, similarities\r\nfrom gensim.models import CoherenceModel\r\npd.set_option(\"display.width\",None)\r\nstw=stopwords.words('english')\r\nstw.extend(['from', 'subject', 're', 'edu', 'use','etfs','us','esg','qqq','also','pm','billion','million','etf','market','fund','year','month','think','one'])\r\nCLASS_NUM = 3\r\ndef preprocess(sentence):\r\n\tsentence = sentence.replace(\",\",\"\")\r\n\tsentence = sentence.replace(\".\",\"\")\r\n\ttokenizer =RegexpTokenizer(r'\\w+')\r\n\tsentence=sentence.lower()\r\n\tdocs = tokenizer.tokenize(sentence) # token\r\n\tdocs = [doc for doc in docs if doc not in stw] # remove stopword\r\n\t\r\n\treturn docs\r\ndef create_data(file_name):\r\n\tdescription_data = []\r\n\twith open(file_name , 'r' ) as f:\r\n\t\traw_dic = json.load(f)\r\n\tfor page,info in raw_dic.items():\r\n\t\tdescription_data.append(preprocess(info[\"topic\"] +' ' + info['parag'])) \r\n\treturn description_data\r\ndef make_bigrams(texts,bigram_mod):\r\n\treturn [bigram_mod[doc] for doc in texts]\r\ndef make_trigrams(texts,trigram_mod,bigram_mod):\r\n return [trigram_mod[bigram_mod[doc]] for doc in texts]\r\n\r\ndef lemmatization(texts,nlp ,allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\r\n\ttexts_out = []\r\n\tfor sent in texts:\r\n\t\tdoc = nlp(\" \".join(sent)) \r\n\t\ttexts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\r\n\treturn texts_out\r\ndef format_topics_sentences(ldamodel, corpus, texts):\r\n sent_topics_df = pd.DataFrame()\r\n\r\n for i, row_list in enumerate(ldamodel[corpus]):\r\n row = row_list[0] if ldamodel.per_word_topics else row_list \r\n row = sorted(row, key=lambda x: (x[1]), reverse=True)\r\n for j, (topic_num, prop_topic) in enumerate(row):\r\n if j == 0: # => dominant topic\r\n wp = ldamodel.show_topic(topic_num)\r\n topic_keywords = \", \".join([word for word, prop in wp])\r\n sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)\r\n else:\r\n break\r\n sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']\r\n\r\n # Add original text to the end of the output\r\n contents = pd.Series(texts)\r\n sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)\r\n return(sent_topics_df)\r\n\r\n\r\n\r\ndef main():\r\n\tdata_words = create_data(\"output.json\")\r\n\tbigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)\r\n\ttrigram = gensim.models.Phrases(bigram[data_words], threshold=100)\r\n\t\r\n\tbigram_mod = gensim.models.phrases.Phraser(bigram)\r\n\ttrigram_mod = gensim.models.phrases.Phraser(trigram)\r\n\t\r\n\tdata_words_bigrams = make_bigrams(data_words,bigram_mod)\r\n\tdata_words_trigrams = make_trigrams(data_words,trigram_mod,bigram_mod)\r\n\t\r\n\tnlp = spacy.load('en', disable=['parser', 'ner'])\r\n\r\n\tdata_lemmatized = lemmatization(data_words_trigrams, nlp,allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\r\n\t\r\n\tid2word = corpora.Dictionary(data_lemmatized)\r\n\r\n\tcorpus = [id2word.doc2bow(text) for text in data_words_trigrams]\r\n\t#print([[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]])\r\n\r\n\tlda_model = gensim.models.ldamodel.LdaModel(corpus = corpus,id2word = id2word,num_topics = CLASS_NUM,\r\n\t\t\t\t\t\t\t\t\t\t\t\trandom_state = 100,update_every = 1,chunksize = 100,passes = 25,\r\n\t\t\t\t\t\t\t\t\t\t\t\talpha = 'auto',per_word_topics = True)\r\n\t#print(\"lda_model.topic\\n\",lda_model.print_topics())\r\n\tnull_topics = lda_model.print_topics()\r\n\t#print(type(a),a)\r\n\t\r\n\tclassify_dict = {}\r\n\tfor row in null_topics:\r\n\t\tself_type,key_word_combind = row[0],row[1]\r\n\t\tkey_word_list = ''.join(c for c in key_word_combind if (c.isalnum() or c.isspace()) and not c.isdigit()).split()\r\n\t\tfor key_word in key_word_list:\r\n\t\t\tclassify_dict.setdefault(key_word,self_type)\r\n\ttemp_cla_dict = dd(list)\r\n\tfor w ,self_type in classify_dict.items():\r\n\t\ttemp_cla_dict[self_type].append(w)\r\n\tclassify_dict = temp_cla_dict\r\n\r\n\tprint(\"classify word\",classify_dict)\r\n\twith open(\"output.json\" , 'r' ) as f:\r\n\t\toutput = json.load(f) \r\n\t\ttdm = []\r\n\t\tco_tdm = []\r\n\t\tkey_word_list = []\r\n\t\tfor i,words in classify_dict.items():\r\n\t\t\tkey_word_list += words\r\n\t\t\r\n\t\t\r\n\t\tfor index,info in output.items():\r\n\t\t\ttemp_tdm = [0,0,0]\r\n\t\t\ttemp_co_tdm = [0]*len(key_word_list)\r\n\t\t\twords = preprocess(info[\"topic\"] +' ' + info['parag'])\r\n\t\t\twords = [(w,-1) for w in words]\r\n\t\t\tfor w in words:\r\n\t\t\t\tfor key,val in classify_dict.items():\r\n\t\t\t\t\tif w[0] in val:\r\n\t\t\t\t\t\ttemp_tdm[key] = temp_tdm[key]+1\r\n\t\t\t\t\t\tw = (w[0],key)\r\n\t\t\t\tfor key, val in enumerate(key_word_list):\r\n\t\t\t\t\tif w[0] == val:\r\n\t\t\t\t\t\ttemp_co_tdm[key] = temp_co_tdm[key]+1\r\n\t\t\ttdm.append(temp_tdm)\r\n\t\t\tco_tdm.append(temp_co_tdm)\r\n\t\t\tinfo['parag'] = words\r\n\twith open(\"result.json\",'w') as fp:\r\n\t\tjson.dump(output,fp)\r\n\tprint(\"tdm\",tdm)\r\n\tprint(\"\\nco_tdm\",co_tdm)\r\n\ttotal = len(co_tdm)\r\n\t\r\n\tfix_matrix = [0]*len(key_word_list)\r\n\tfor row in co_tdm:\r\n\t\tfor key,val in enumerate(row):\r\n\t\t\tfix_matrix[key] = fix_matrix[key] + val\r\n\tco_ocr_matrix = []\r\n\tfor i in range(len(key_word_list)):\r\n\t\ttemp = [0]*len(key_word_list)\r\n\t\ttemp[i] = 1\r\n\t\tco_ocr_matrix.append(temp)\r\n\r\n\tfor i in range(len(key_word_list)-1):\r\n\t\tfor row in co_tdm:\r\n\t\t\tif(row[i]>0 and row[i+1]>0):\r\n\t\t\t\tco_ocr_matrix[i][i+1] = co_ocr_matrix[i][i+1] + 1/fix_matrix[i]\r\n\t\t\t\tco_ocr_matrix[i+1][i] = co_ocr_matrix[i+1][i] + 1/fix_matrix[i+1]\r\n\tprint(\"key word appear times\",fix_matrix)\r\n\tprint(\"co-occurence-matrix\",co_ocr_matrix)\r\n\tfor key,row in enumerate(co_ocr_matrix):\r\n\t\trow.insert(0,key_word_list[key])\r\n\twith open(\"co_ocr_matrix.csv\",\"w\",newline = '') as f:\r\n\t\twriter = csv.writer(f)\r\n\t\twriter.writerow([\"\"]+key_word_list)\r\n\t\twriter.writerows(co_ocr_matrix)\r\n\r\n\t\t\t\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t#print(type(a[0][1]))\r\n\t#doc_lda = lda_model[corpus]\r\n\t#print('\\nPerplexity: ', lda_model.log_perplexity(corpus))\r\n\t#coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')\r\n\t#coherence_lda = coherence_model_lda.get_coherence()\r\n\t#print('\\nCoherence Score: ', coherence_lda)\r\n\t#df_topic_sents_keywords = format_topics_sentences(lda_model,corpus, data_lemmatized)\r\n\r\n# Format\r\n\t#df_dominant_topic = df_topic_sents_keywords.reset_index()\r\n\t#df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']\r\n\t#print(df_dominant_topic.head(10))\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n\r\n\r\n\r\n","sub_path":"hw2/myner.py","file_name":"myner.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"369483556","text":"import os \nimport numpy as np \n\ndef subpbs(path):\n basename='p'+str(path)\n for gsw in [0.25,0.50,0.75,1.0]: \n fname=basename+'/'+'gsw'+str(gsw)+'.vmc.pbs'\n os.system('qsub '+fname)\n\nif __name__=='__main__':\n for path in [17]: subpbs(path) \n","sub_path":"undoped/NEW/subpbs.py","file_name":"subpbs.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"396548756","text":"import base64\nimport requests\n\n\ndef fetch(host, slug, auth):\n\tresponse = None\n\tif host is None:\n\t\thost = 'https://teamcity.jetbrains.com'\n\tif auth:\n\t\theaders =\\\n\t\t{\n\t\t\t'Accept': 'application/json',\n\t\t\t'Authorization': 'Basic ' + base64.b64encode(auth.encode('utf-8')).decode('ascii')\n\t\t}\n\t\tif slug:\n\t\t\tresponse = requests.get(host + '/app/rest/buildTypes?locator=affectedProject:(id:' + slug + ')&fields=buildType(builds($locator(running:any),build(id,running,status,buildType(id,projectName))))', headers = headers)\n\t\telse:\n\t\t\tresponse = requests.get(host + '/app/rest/buildTypes/?fields=buildType(builds($locator(user:current,running:any),build(id,running,status,buildType(id,projectName))))', headers = headers)\n\n\t# process response\n\n\tif response and response.status_code == 200:\n\t\tdata = response.json()\n\t\tif 'buildType' in data:\n\t\t\tresult = []\n\t\t\tfor project in data['buildType']:\n\t\t\t\tif project['builds']['build']:\n\t\t\t\t\tresult.extend(normalize_data(project['builds']['build'][0]))\n\t\t\treturn result\n\treturn []\n\n\ndef normalize_data(build):\n\treturn\\\n\t[\n\t\t{\n\t\t\t'provider': 'teamcity',\n\t\t\t'slug': build['buildType']['projectName'],\n\t\t\t'active': True,\n\t\t\t'status': normalize_status(build['running'], build['status'].lower())\n\t\t}\n\t]\n\n\ndef normalize_status(running, status):\n\tif running is True:\n\t\treturn 'process'\n\tif status == 'error':\n\t\treturn 'errored'\n\tif status == 'failure':\n\t\treturn 'failed'\n\treturn 'passed'\n","sub_path":"src/provider/teamcity.py","file_name":"teamcity.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"241950902","text":"\nimport os\nimport time\n# 创建子进程\npid=os.fork()\nif pid==0:\n for i in range(10):\n print(i)\n time.sleep(0.5)\n# 执行主进程\nelif pid>0:\n for i in range(10):\n print(\"a\")\n print(\"b\")\n time.sleep(0.5)","sub_path":"Py_text/0330/创建进程.py","file_name":"创建进程.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"157644034","text":"# n<=2000 nums[i]<=10^9\r\n# !每次移除数组中的两个数x,y 然后将gcd(x,y)或者min(x,y)加入数组中,问最后剩下的数有多少种可能\r\n\r\n# !最后答案是若干个数的gcd 并且这个gcd需要小于等于所有数的最小值\r\n# => 多个数的gcd 对每个数分解因数\r\n# 1819. 序列中不同最大公约数的数目-遍历范围枚举\r\n\r\nfrom collections import defaultdict\r\nfrom typing import List\r\n\r\n\r\ndef gcdOrMin(nums: List[int]) -> int:\r\n mp = defaultdict(list)\r\n for num in nums:\r\n for factor in getFactors(num):\r\n mp[factor].append(num)\r\n groupGcd = defaultdict(int)\r\n for factor, group in mp.items():\r\n g = tuple(group)\r\n groupGcd[g] = max(groupGcd[g], factor)\r\n min_ = min(nums)\r\n return sum(v <= min_ for v in groupGcd.values())\r\n\r\n\r\ndef getFactors(n: int) -> List[int]:\r\n \"\"\"n 的所有因数 O(sqrt(n))\"\"\"\r\n if n <= 0:\r\n return []\r\n small, big = [], []\r\n upper = int(n**0.5) + 1\r\n for i in range(1, upper):\r\n if n % i == 0:\r\n small.append(i)\r\n if i != n // i:\r\n big.append(n // i)\r\n return small + big[::-1]\r\n\r\n\r\nn = int(input())\r\nnums = list(map(int, input().split()))\r\nprint(gcdOrMin(nums))\r\n","sub_path":"19_数学/因数筛/F - GCD or MIN-多个数的gcd.py","file_name":"F - GCD or MIN-多个数的gcd.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"485503115","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport numpy as np\nimport torch\nfrom nltk.stem.porter import *\nfrom filter_posts import prepro\nfrom sentence_transformers import util\n\n\ndef tfidf_posts(fl_keywords, fl_descriptions, tweet_keywords, path_connect, path_fl_pics, path_fl_descript, path_tweets,\n path_tweet_pics):\n fl, tw, connect = prepro(fl_keywords, fl_descriptions, tweet_keywords, path_connect, path_fl_pics, path_fl_descript,\n path_tweets, path_tweet_pics)\n\n # make results reproducible\n connect = connect.sort_values(by='twitterusername').reset_index(drop=True)\n\n # stem words\n ps = PorterStemmer()\n fl = [ps.stem(x) for x in fl]\n tw = [ps.stem(x) for x in tw]\n\n vectorizer = TfidfVectorizer(strip_accents='unicode', lowercase=True, stop_words='english', max_df=0.7)\n #max_features=10000)\n vector_fl = vectorizer.fit_transform(fl)\n vector_tw = vectorizer.transform(tw)\n\n # convert to tensor for faster cosine calculation\n fl_emb = torch.tensor(vector_fl.toarray())\n tw_emb = torch.tensor(vector_tw.toarray())\n\n # Compute cosine-similarits\n cosine_scores = util.pytorch_cos_sim(fl_emb, tw_emb)\n cosine_scores[cosine_scores != cosine_scores] = 0\n\n # get indices of max similarity values\n ind = torch.argmax(cosine_scores, dim=1)\n\n # extract usernames\n fl_names = connect['flickrusername'].tolist()\n tw_names = connect['twitterusername'].tolist()\n\n # initialize result\n result = pd.DataFrame(columns=['flickrusername', 'twitterusername'])\n\n for i in range(len(fl)):\n result.loc[i] = [fl_names[i], tw_names[ind[i]]]\n\n # compare actual matching with tfidf matching\n df1 = result.merge(connect, on=['flickrusername', 'twitterusername'], how='left', indicator='Exist')\n df1['Exist'] = np.where(df1.Exist == 'both', True, False)\n\n # count correct matches\n cor = df1['Exist'].sum()\n\n # print performance measures\n print(f'tf-idf + cosine similarity - Dataset B')\n print(f'{cor} / {len(df1)}') # correct / total\n print(cor / len(df1)) # accuracy\n\n\nif __name__ == '__main__':\n dataset = 'dataset_b'\n\n path_connect = f'../../../../data/{dataset}/connection.csv'\n path_fl_pics = f'../../../../data/{dataset}/flickr/flickr_pic_tags/'\n path_fl_descript = f'../../../../data/{dataset}/flickr/flickr_photo_descriptions/'\n path_tweets = f'../../../../data/{dataset}/twitter/tweets/'\n path_tweet_pics = f'../../../../data/{dataset}/twitter/tweet_pic_tags/'\n # decide which data to consider (if both fl options are false fl_descriptions are utilized)\n fl_keywords = True\n fl_descriptions = False\n # tweet text is always used, additionally tweet keywords can be considered\n tweet_keywords = False\n\n tfidf_posts(fl_keywords, fl_descriptions, tweet_keywords, path_connect, path_fl_pics, path_fl_descript, path_tweets,\n path_tweet_pics)\n\n\n \"\"\" unlimited features:\n with all data\n tf-idf + cosine similarity - Dataset B\n 254 / 2301\n 0.11038678835289005\n \n without flickr keywords\n tf-idf + cosine similarity - Dataset B\n 259 / 2445\n 0.10593047034764826\n \n without flickr description\n tf-idf + cosine similarity - Dataset B\n 22 / 2325\n 0.00946236559139785\n \n without twitter keywords\n tf-idf + cosine similarity - Dataset B\n 413 / 4931\n 0.08375583046035287\n \n without keywords\n tf-idf + cosine similarity - Dataset B\n 417 / 5233\n 0.07968660424230843\n \n without fl_descriptions and without twitter keywords\n tf-idf + cosine similarity - Dataset B\n 15 / 4983\n 0.0030102347983142685\n \n \"\"\"","sub_path":"baseline_experiments/dataset_b/posts/tf-idf.py","file_name":"tf-idf.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"329931351","text":"from Code.settings import *\nfrom Code.Map.minimaps import Minimap\nfrom Code.buttons import Buttons, Button, ButtonTwoStates\nfrom Code.utils import Interface, Dial\nfrom Code.sound import Music\nfrom Code.texts import TextMaxSizeCenter, max_size_list_text\n\n\nclass Panel:\n def __init__(self, width: int, height: int, pos: Tuple[int, int], pad: int, size: int) -> None:\n self.rect = pg.Rect(*pos, width, height)\n self.surface = pg.Surface((self.rect.width, self.rect.height))\n self.color_background = pg.Color((128, 128, 128))\n self.pad = pad\n self.size = size\n self.interface = Interface(\n pos=(0, self.rect.height // 70), max_width=width, max_height=height,\n indent=(0, self.rect.height // 100), size=(self.rect.width, (height - width) // 13)\n )\n\n def get_absolute_pos(self, x: int, y: int) -> Tuple[int, int]:\n return self.rect.x + x, self.rect.y + y\n\n def render(self) -> None:\n pass\n\n def draw(self, surface: pg.Surface) -> None:\n surface.blit(self.surface, self.rect)\n\n def update(self) -> None:\n pass\n\n def event(self, event: pg.event.Event) -> None:\n pass\n\n\nclass LeftPanel(Panel):\n def __init__(self, width: int, height: int, pos: Tuple[int, int], pad: int, size: int, music: Music = None) -> None:\n super().__init__(width, height, pos, pad, size)\n self.music = music\n # Миникарта\n self.minimap = Minimap(\n (self.pad, self.rect.height - self.pad - self.size), self.size, self.size)\n # Интерфейс\n self.buttons = None\n self.time = None\n self.running_line = None\n self.pos_cursor = None\n self.button_file = None\n self.button_del_file = None\n self.button_info = None\n self.processor = None\n self.dial = None\n #\n\n def init(self, processor) -> None:\n self.interface = Interface(\n pos=(0, self.rect.height // 70), max_width=self.rect.width, max_height=self.rect.height,\n indent=(0, self.rect.height // 100), size=(self.rect.width, (self.rect.height - self.rect.width) // 13)\n )\n self.buttons = Buttons()\n self.processor = processor\n self.init_interface()\n self.update()\n self.render()\n\n def init_interface(self) -> None:\n size = max_size_list_text(\n ['<', '>', '||', '►'], self.interface.width, self.interface.height, PT_MONO\n )\n width3, height = int(round(self.interface.width / 3, 0)), 2 * self.interface.height + self.interface.indent[1]\n self.dial = Dial(self.interface.pos, self.interface.height, UPDATE_CHANGE_TIME,\n (255, 255, 255), (255, 255, 255))\n self.time = TextMaxSizeCenter(\n text=f\"\", width=self.interface.width - 4 * self.interface.height, height=self.interface.height,\n pos=(self.interface.pos[0] + self.interface.height, self.interface.pos[1]), font_type=PT_MONO\n )\n self.buttons.add(Button(\n pos=(self.interface.width - 3 * self.interface.height, self.interface.pos[1]),\n width=self.interface.height,\n height=self.interface.height,\n func=self.processor.down_speed, color_disabled=(30, 30, 30), color_active=(40, 40, 40),\n text=TextMaxSizeCenter(text='<', width=self.interface.height, height=self.interface.height,\n font_type=PT_MONO)\n ))\n self.buttons.add(ButtonTwoStates(\n pos=(self.interface.width - 2 * self.interface.height, self.interface.pos[1]), width=self.interface.height,\n height=self.interface.height,\n func=self.processor.change, color_disabled=(30, 30, 30), color_active=(40, 40, 40),\n text=TextMaxSizeCenter(text='||', width=self.interface.height, height=self.interface.height,\n font_type=PT_MONO),\n texts=('►', '||'), get_state=self.processor.get_state\n ))\n self.buttons.add(Button(\n pos=(self.interface.width - self.interface.height, self.interface.pos[1]), width=self.interface.height,\n height=self.interface.height,\n func=self.processor.up_speed, color_disabled=(30, 30, 30), color_active=(40, 40, 40),\n text=TextMaxSizeCenter(text='>', width=self.interface.height, height=self.interface.height,\n font_type=PT_MONO)\n ))\n self.interface.move(0)\n #\n self.running_line = RunningLineMaxSizeCenter(\n text='пример текста', width=self.interface.width, height=self.interface.height,\n pos=self.interface.pos, speed=30, font_type=PT_MONO\n )\n self.interface.move(0)\n button = Button(\n pos=self.interface.pos, width=width3, height=height,\n func=self.music.previous, color_disabled=(30, 30, 30), color_active=(40, 40, 40),\n text=TextCenter(\n text='<', width=width3, height=height, font_type=PT_MONO,\n font_size=size\n )\n )\n self.buttons.add(button)\n self.interface.move(button.rect.width, 0, is_indent=(False, False))\n button = ButtonTwoStates(\n pos=self.interface.pos, width=self.interface.width - 2 * width3, height=height,\n func=self.music.pause_and_play, color_disabled=(30, 30, 30), color_active=(40, 40, 40),\n text=TextCenter(text='||', width=self.interface.width - 2 * width3, height=height,\n font_type=PT_MONO, font_size=size),\n texts=('►', '||'), get_state=self.music.get_state\n )\n self.buttons.add(button)\n self.interface.move(button.rect.width, 0, is_indent=(False, False))\n button = Button(\n pos=self.interface.pos, width=width3, height=height,\n func=self.music.next, color_disabled=(30, 30, 30), color_active=(40, 40, 40),\n text=TextCenter(\n text='>', width=width3, height=height, font_type=PT_MONO,\n font_size=size\n )\n )\n self.buttons.add(button)\n self.interface.move(- self.interface.width + width3, is_indent=(False, False))\n self.interface.move(0)\n self.pos_cursor = TextMaxSizeCenter(\n text='', width=self.interface.width, height=self.interface.height, pos=self.interface.pos, font_type=PT_MONO\n )\n self.interface.move(0)\n height2 = 2 * self.interface.height + self.interface.indent[1]\n self.button_info = Button(\n pos=self.interface.pos, width=self.interface.width, height=height2,\n func=None, color_disabled=(30, 30, 30), color_active=(40, 40, 40),\n text=TextMaxSizeCenter(text='Информация', width=self.interface.width,\n height=height2, font_type=PT_MONO)\n )\n self.interface.move(0, height2)\n self.button_file = Button(\n pos=self.interface.pos, width=self.interface.width, height=height2,\n func=None, color_disabled=(30, 30, 30), color_active=(40, 40, 40),\n text=TextMaxSizeCenter(text='Выбрать файл', width=self.interface.width,\n height=height2, font_type=PT_MONO)\n )\n self.interface.move(0, height2)\n self.button_del_file = Button(\n pos=self.interface.pos, width=self.interface.width, height=height2,\n func=None, color_disabled=(30, 30, 30), color_active=(40, 40, 40),\n text=TextMaxSizeCenter(text='Удалить файл', width=self.interface.width,\n height=height2, font_type=PT_MONO)\n )\n self.interface.move(0, height2)\n\n def update(self) -> None:\n self.running_line.update(self.music.get_text())\n if self.processor:\n self.time.set_text(f\"{self.processor.tick_complete}\")\n self.buttons.update_text()\n\n def update_cursor(self, pos_cursor: Tuple[int, int]) -> None:\n text = f'(x: {pos_cursor[0]}, y: {pos_cursor[1]})'\n if pos_cursor and text != self.pos_cursor.text:\n self.pos_cursor.set_text(text)\n\n def render_minimap(self, surface: pg.Surface, pos: Tuple[int, int] = None,\n width: int = None, height: int = None) -> None:\n self.minimap.render(surface, pos, width, height)\n self.render()\n\n def render(self) -> None:\n self.surface.fill(self.color_background)\n #\n self.running_line.draw(self.surface)\n self.time.draw(self.surface)\n self.buttons.draw(self.surface)\n if self.button_file.func:\n self.button_file.draw(self.surface)\n if self.button_del_file.func:\n self.button_del_file.draw(self.surface)\n if self.button_info.func:\n self.button_info.draw(self.surface)\n if self.processor:\n self.dial.draw(self.surface, self.processor.tick_complete)\n self.minimap.draw(self.surface)\n self.pos_cursor.draw(self.surface)\n\n def event(self, event: pg.event.Event) -> None:\n self.buttons.event(event)\n if self.button_file.func:\n self.button_file.event(event)\n if self.button_del_file.func:\n self.button_del_file.event(event)\n if self.button_info.func:\n self.button_info.event(event)\n\n\nclass RightPanel(Panel):\n def __init__(self, width: int, height: int, pos: Tuple[int, int], pad: int, size: int) -> None:\n super().__init__(width, height, pos, pad, size)\n # Интерфейс\n self.info_update = None\n self.inventory_settings = (pos[0] + pad, self.rect.height - pad - size), size, size\n #\n self.counter_line = 10\n self.lines = []\n for _ in range(self.counter_line):\n self.lines.append(TextMaxSizeCenter(\n text='', width=self.interface.width - self.interface.width // 50, height=self.interface.height,\n pos=(self.interface.pos[0] + self.interface.width // 100, self.interface.pos[1]), font_type=PT_MONO\n ))\n self.interface.move(0)\n #\n self.update()\n self.render()\n\n def update(self) -> None:\n pass\n\n def update_text(self, texts: list = None) -> None:\n last = -1\n texts = list() if texts is None else texts\n for ind, text in enumerate(texts):\n if self.lines[ind].text != text:\n self.lines[ind].set_text(text)\n last = ind\n for ind in range(last + 1, self.counter_line):\n if self.lines[ind].text != '':\n self.lines[ind].set_text('')\n self.render()\n\n def render(self) -> None:\n self.surface.fill(self.color_background)\n #\n for line in self.lines:\n line.draw(self.surface)\n","sub_path":"Code/info_panel.py","file_name":"info_panel.py","file_ext":"py","file_size_in_byte":10926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"82037641","text":"from typing import Mapping, Sequence, Any, Dict\n\nfrom nn import ErrorCalculator\nfrom nn import sigmoid, batch\nfrom nn.utilities import read_monk\nfrom nn.activation_functions import relu, tanh, tanh_classification\nfrom nn.validation import grid_search, write_on_file\nimport multiprocessing as mp\n\ntrain_data, test_data = read_monk(2)\n\nparams_nn: Dict[str, Sequence[Any]] = dict(\n error_calculator=[ErrorCalculator.MSE],\n learning_algorithm=[batch],\n epochs_limit=[1000],\n n_init=[10],\n epsilon=[1e-05],\n patience=[10],\n)\nparams_architecture: Mapping[str, Sequence[Any]] = dict(\n size_hidden_layers=[(2,)],\n activation=[tanh_classification],\n activation_hidden=[relu],\n eta=[0.1, 0.01, 0.001],\n alpha=[0, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],\n alambd=[0, 0.0001, 0.001, 0.01],\n eta_decay=[0],\n)\n\ncv_params: Mapping[str, Any] = dict(\n cv=3,\n error_calculator=ErrorCalculator.MSE,\n to_shuffle=True,\n)\n\ngrid_search_results = grid_search(\n train_data,\n params_nn=params_nn,\n params_architecture=params_architecture,\n cv_params=cv_params,\n\n n_jobs=8,\n)\n\nwrite_on_file(grid_search_results, 'results/monk2-mse-low_eta')\n","sub_path":"nn/playground/monk/monk2.py","file_name":"monk2.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"596521307","text":"import sys\nimport math\n\n\ndef is_inside_polygon(polygon, x, y):\n A = []\n B = []\n C = []\n for i in range(len(polygon)):\n p1x, p1y = polygon[i]\n p2x, p2y = polygon[(i + 1) % len(polygon)]\n\n a = -(p2y - p1y)\n b = p2x - p1x\n c = -(a * p1x + b * p1y)\n \n A.append(a)\n B.append(b)\n C.append(c)\n\n D = []\n for i in range(len(A)):\n d = A[i] * x + B[i] * y + C[i]\n D.append(d)\n\n t1 = all(d >= 0 for d in D)\n t2 = all(d <= 0 for d in D)\n return t1 or t2\n\n\nn = int(input())\ntarget = []\nfor i in range(n):\n x, y = [int(j) for j in input().split()]\n target.append((x, y))\nm = int(input())\nfor i in range(m):\n x, y = [int(j) for j in input().split()]\n if is_inside_polygon(target, x, y):\n print(\"hit\")\n else:\n print(\"miss\")\n\n","sub_path":"practice/puzzles/easy/Dead_mens_shot.py","file_name":"Dead_mens_shot.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"147008710","text":"data = {\n\t\t\"deviceData\": '{\"entity_id\":\"light.hue_color_lamp_3\",\"rgb_color\": [247,171,81]}',\n\t\t\"deviceDomain\": \"light\",\n\t\t\"deviceId\": \"light.hue_color_lamp_3\",\n\t\t\"deviceService\": \"turn_on\",\n\t\t\"time\": \"07:00:12\",\n\t\t\"dcmPk\": 94,\n\t\t\"usrPk\": [\"3\",\"4\"],\n\t\t\"usrCode\": 3,\n\t\t\"statCode\": 1,\n\t\t\"outCond\": 1,\n\t\t\"tit\": \"테스트 메시지입니다.\",\n\t\t\"message\": '{\"type\":\"01\",\"content\":\"테스트 내용입니다.\"}',\n\t\t\"interv\": 32,\n\t\t\"inDt\": [2019, 5, 10, 15, 1, 58],\n\t\t\"msgtyp\": 103004,\n\t\t\"isgroupmsg\": False,\n\t\t\"isshowmsg\": True,\n\t\t\"dt\": \"2019-05-31 13:36:30 KST+0900\"\n\t}","sub_path":"test_python/chat_data/data_server.py","file_name":"data_server.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"640125263","text":"class Solution:\n def sortArray(self, nums: List[int]) -> List[int]:\n nums.sort()\n return nums\n\n\n# 解法2\n# 采用计数排序\n# 注意对负数的处理上,该解法没有使用题目中给出的范围信息\nclass Solution:\n def sortArray(self, nums: List[int]) -> List[int]:\n length = len(nums)\n max_value = max(nums)\n min_value = min(nums)\n bias = 0\n c_length = max_value + 1\n if min_value < 0:\n c_length += abs(min_value)\n bias = abs(min_value)\n c = [0 for _ in range(c_length)]\n for num in nums:\n c[num + bias] += 1\n z = 0\n for i in range(c_length):\n while c[i] > 0:\n nums[z] = i - bias\n c[i] -= 1\n z += 1\n return nums\n\n# 解法3\n# 考虑题目中给出的数组长度和范围的限制\nclass Solution:\n def sortArray(self, nums: List[int]) -> List[int]:\n max_length = 50000 + 50000 +1\n bias = 50000\n c = [0] * (max_length + 1)\n for num in nums:\n c[num + bias] += 1\n j = 0\n for i in range(max_length + 1):\n while c[i] > 0:\n c[i] -= 1\n nums[j] = i - bias\n j += 1\n return nums","sub_path":"leetcode/python3 version/#912. Sort an Array.py","file_name":"#912. Sort an Array.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"423070797","text":"#Szükséges csomagok importálása\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nfrom sklearn.model_selection import train_test_split\r\nfrom keras.utils import np_utils\r\n\r\n#Beolvasott kép újraméterezése és forgatása\r\ndef rotate(image):\r\n image = image.reshape([28, 28])\r\n image = np.fliplr(image)\r\n image = np.rot90(image)\r\n return image\r\n\r\n#Fájlok beolvasása\r\ntrain = pd.read_csv('emnist-balanced-train.csv')\r\ntest = pd.read_csv('emnist-balanced-test.csv')\r\n\r\n#Adatbetöltés\r\nx_train, y_train = train.iloc[:, 1:], train.iloc[:, 0]\r\nx_test, y_test = test.iloc[:, 1:], test.iloc[:, 0]\r\n\r\n#Felesleges pandas adatbázisok törlése\r\ndel train\r\ndel test\r\n\r\n#Normalizálás\r\nx_train = np.apply_along_axis(rotate, 1, x_train.values)\r\nx_test = np.apply_along_axis(rotate, 1, x_test.values)\r\nx_train = x_train.astype('float32') / 255\r\nx_test = x_test.astype('float32') / 255\r\n\r\n#Train adatok alapján lehetőségek számának beállítása\r\nnumber_of_classes = y_train.nunique()\r\ny_train = np_utils.to_categorical(y_train, number_of_classes)\r\ny_test = np_utils.to_categorical(y_test, number_of_classes)\r\n\r\n#Újraméretezés\r\nx_train = x_train.reshape(-1, 28, 28, 1)\r\nx_test = x_test.reshape(-1, 28, 28, 1)\r\n\r\nx_train, X_val, y_train, y_val = train_test_split(x_train, y_train, test_size= 0.10, random_state=88)\r\n\r\n#Modell létrehozása\r\nmodel = tf.keras.models.Sequential()\r\nmodel.add(tf.keras.layers.Conv2D(filters=6, kernel_size=(3, 3), activation=tf.nn.relu, input_shape=(28,28,1)))\r\nmodel.add(tf.keras.layers.AveragePooling2D())\r\nmodel.add(tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation=tf.nn.relu))\r\nmodel.add(tf.keras.layers.AveragePooling2D())\r\nmodel.add(tf.keras.layers.Flatten())\r\nmodel.add(tf.keras.layers.Dense(units=120, activation=tf.nn.relu))\r\nmodel.add(tf.keras.layers.Dense(units=84, activation=tf.nn.relu))\r\nmodel.add(tf.keras.layers.Dense(units=47, activation = tf.nn.softmax))\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\nmodel.fit(x_train, y_train, epochs=10, batch_size=16, verbose=1, validation_data=(X_val, y_val))\r\n\r\nmodel.save('char_reader.model')\r\n","sub_path":"make_char_model.py","file_name":"make_char_model.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"93236793","text":"from django.contrib import admin\nfrom django.db import models\n\nfrom django_json_widget.widgets import JSONEditorWidget\nfrom .models import Deal, DealInvite, Bid, BidDoc, BidDocStats\nfrom django.conf import settings\n\nfrom django import template\nfrom .doctools import DocTools\n\nfrom decimal import Decimal\n\n# ML loading\nimport numpy as np\nfrom joblib import dump, load\nimport nltk\n\n# Register your models here.\n\nadmin.site.register(Deal)\nadmin.site.register(Bid)\n# admin.site.register(BidDoc, BidDocAdmin)\nadmin.site.register(DealInvite)\n\n@admin.register(BidDoc)\nclass BidDocAdmin(admin.ModelAdmin):\n list_display = ('status', 'deal', 'original_doc_name')\n formfield_overrides = {\n # fields.JSONField: {'widget': JSONEditorWidget}, # if django < 3.1\n models.JSONField: {'widget': JSONEditorWidget},\n }\n\n@admin.register(BidDocStats)\nclass BidDocStatsAdmin(admin.ModelAdmin):\n change_form_template = \"bidinterpreter/admin/biddocstats.html\"\n\n search_fields = ['original_doc_name']\n list_display = ['original_doc_name_bid', 'start', 'pytesseract_processing_time']\n\n def get_sentence_predictions(self, text):\n\n sentences = nltk.sent_tokenize(text)\n class_names = ['closing', 'dd', 'deposit', 'none', 'purchase_price']\n\n # load model\n model = load('models/sentence_model.joblib')\n y_hat_proba = model.predict_proba(sentences).round(3).tolist()\n y_hat = model.predict(sentences).tolist()\n\n # return sentences, y_hat -- forgive the complex comprehension it's just easier to write\n return [\n {\n \"sentence\": row[0], \n \"predicted\": class_names[row[2]],\n \"y_hat\": {\n item[0]: item[1] for item in list(zip(class_names, row[1]))\n }\n } for row in zip(sentences, y_hat_proba, y_hat)\n ]\n\n def change_view(self, request, object_id, form_url='', extra_context=None):\n extra_context = extra_context or {}\n \n dt = DocTools(django_settings = settings)\n stat = BidDocStats.objects.get(pk=object_id)\n\n if stat.biddoc == None:\n return super().change_view(\n request, object_id, form_url, extra_context=extra_context,\n )\n \n doc_name = stat.biddoc.original_doc_name.split('/')[-1::][0]\n \n img_filepath = f\"{stat.biddoc.deal_id}/{doc_name}.png\"\n pdf_filepath = f\"{stat.biddoc.deal_id}/{doc_name}.png.processed.pdf\"\n doctext, word_coords = stat.biddoc.text, stat.biddoc.word_coords\n\n print(\"img_filepath\", img_filepath)\n print('pdf_filepath', pdf_filepath)\n\n ## Convert back to decimal for legacy code -- we use JSON type for widget in backend.\n word_coords = word_coords = [{name: Decimal(value) if type(value) == float else value for name, value in row.items()} for row in word_coords[0]['words']]\n\n matches = dt.get_entity_matches(doctext, word_coords) # 4. Extract matches /w coordinates\n hilight_coords = dt.image_to_highlighted(matches, pdf_filepath, img_filepath) \n entities = dt.map_entities(pdf_filepath, word_coords, doctext = doctext, vocabulary = word_coords)\n\n extra_context['entities'] = entities\n extra_context['sentences'] = self.get_sentence_predictions(doctext)\n \n return super().change_view(\n request, object_id, form_url, extra_context=extra_context,\n )\n\n def original_doc_name(self, row):\n return row.original_doc_name.split(\"/\")[-1:]\n\n def original_doc_name_bid(self, row):\n return row.original_doc_name.split(\"/\")[-1:]\n\n def pytesseract_processing_time(self, row):\n if row.end and row.start:\n return row.end - row.start\n return \n\n formfield_overrides = {\n # fields.JSONField: {'widget': JSONEditorWidget}, # if django < 3.1\n models.JSONField: {'widget': JSONEditorWidget},\n }\n\n","sub_path":"dev/webapp/project/apps/bidinterpreter/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"458436375","text":"import yaml\nimport os,sys,stat\nimport subprocess\nimport uuid\nfrom flask import Flask\nfrom flask import request\nimport logging\nimport logging.config\nimport wget\nimport tarfile\nimport pwd\nimport glob\nfrom ndimCollector import ndimCollector\nimport getpass\n\ndef readconfig(pathtoconfig):\n with open(pathtoconfig, 'r') as f:\n return yaml.load(f)\n\ndef set_jobdirroot(path):\n global jobdirroot\n jobdirroot = path\n\ndef get_jobdirroot():\n return jobdirroot\n\ndef get_wfdir(wfid):\n return os.path.join(jobdirroot,wfid)\n\ndef get_inputdir(wfid):\n return os.path.join(get_wfdir(wfid),\"inputs\")\n\ndef get_jobdir(wfid,jobid):\n return os.path.join(jobdirroot,wfid,jobid)\n\ndef get_sandboxdir(wfid,jobid):\n jobdir = get_jobdir(wfid,jobid)\n return os.path.join(jobdir,'sandbox')\n\ndef get_exe_name(config):\n return config['executable']['filename']\n\ndef get_args(config):\n return config['arguments']\n\ndef create_dir(path):\n if not os.path.exists(path): os.makedirs(path)\n\ndef save_a_file(directory,name,content):\n fullpath = os.path.join(directory,name)\n fo = open(fullpath, \"wb\")\n fo.write(content);\n fo.close()\n return fullpath\n\ndef download_a_file(url,targetdir):\n log.debug(\"- downloading \"+url+\"...\")\n return wget.download(url,out=targetdir,bar=None)\n\ndef unzip_a_file(tgzpath,targetdir):\n tar = tarfile.open(tgzpath)\n tar.extractall(path=targetdir)\n tar.close()\n\ndef input_file_deploy(confinp,confapp,directory):\n inputlist = confapp['inputs']\n for k in inputlist:\n filename = k['name']\n if confinp['name'] == filename:\n filename=filename+\"_\"+str(confinp['index'])\n if 'content' in confinp:\n save_a_file(directory,filename,confinp['content'])\n elif 'tgzURL' in confinp:\n tgzpath = download_a_file(confinp['tgzURL'],directory)\n unzip_a_file(tgzpath,directory)\n untarred = os.path.join(directory,filename)\n os.chown(untarred,pwd.getpwnam('root').pw_uid,pwd.getpwnam('root').pw_gid)\n elif 'url' in confinp:\n download_a_file(confinp['url'],os.path.join(directory,filename))\n elif 'post_file' in confinp:\n f = request.files[confinp['post_file']]\n log.info('Saving file ' + filename + ' to ' + os.path.join(directory,filename))\n f.save(os.path.join(directory,filename))\n else:\n log.error(\"No content, nor url(s) are defined in job for file: \"+filename+\" !\")\n log.debug(\"- inputfile: \"+filename)\n\ndef input_files_link(inputdir,input_names,input_fileindexes,sandboxdir,input_filenames):\n for index, ifilename in enumerate(input_filenames):\n if type(ifilename) is list:\n for subindex,onefilename in enumerate(ifilename):\n ifileininputdir = input_names[index]+\"_\"+str(input_fileindexes[index][subindex])\n os.symlink(os.path.join(inputdir,ifileininputdir),os.path.join(sandboxdir,onefilename))\n else:\n ifileininputdir = ifilename+\"_\"+str(input_fileindexes[index])\n os.symlink(os.path.join(inputdir,ifileininputdir),os.path.join(sandboxdir,ifilename))\n return\n\ndef create_executable(confapp,directory):\n filepath = save_a_file(directory,confapp['executable']['filename'],confapp['executable']['content'])\n st = os.stat(filepath)\n os.chmod(filepath, st.st_mode | stat.S_IEXEC)\n log.debug(\"- executable: \"+confapp['executable']['filename'])\n\ndef download_executable(confapp,directory):\n tgzpath = download_a_file(confapp['executable']['tgzURL'],directory)\n log.debug(\"- executable downloaded: \"+tgzpath)\n\n unzip_a_file(tgzpath,directory)\n log.debug(\"- executable extracted.\")\n\n filepath = os.path.join(directory,confapp['executable']['filename'])\n st = os.stat(filepath)\n os.chmod(filepath, st.st_mode | stat.S_IEXEC)\n username = getpass.getuser()\n os.chown(filepath,pwd.getpwnam(username).pw_uid,pwd.getpwnam(username).pw_gid)\n log.debug(\"- executable: \"+confapp['executable']['filename'])\n\ndef pass_to_executor(wfiddir,jobdir):\n newjobdir = \"E_\"+jobdir[2:]\n os.rename(os.path.join(wfiddir,jobdir),os.path.join(wfiddir,newjobdir))\n log.debug(\"- passed for exec: \"+newjobdir)\n return\n\ndef deploy_input_descr(jobdir,descr):\n save_a_file(jobdir,\"inputs.yaml\",yaml.dump(descr))\n return\n\ndef get_naming_format(input):\n for i in confapp['inputs']:\n if i['name']==input and \"format\" in i:\n return i['format']\n return\ndef gen_input_filenames(input_descr):\n #print input_descr\n ifnames = []\n for index,item in enumerate(input_descr['names']):\n if type(input_descr['indexes']['inp_file_indxs'][index]) is list:\n format=get_naming_format(item)\n if not format:\n format = item+\"_%i\"\n newlist=[]\n for collitem in input_descr['indexes']['inp_file_indxs'][index]:\n newlist.append(str(format % collitem))\n ifnames.append(newlist)\n else:\n collitem = input_descr['indexes']['inp_file_indxs'][index] \n ifnames.append(item)\n #ifnames.append(item+\"_\"+str(collitem))\n return ifnames\n\ndef gen_jobdir(input_descr):\n jobdir_name = \"R_job\"\n for index,item in enumerate(input_descr['names']):\n if type(input_descr['indexes']['inp_file_indxs'][index]) is list:\n jobdir_name+=\"_\"+str(input_descr['indexes']['inp_file_indxs'][index][0])\n else:\n jobdir_name+=\"_\"+str(input_descr['indexes']['inp_file_indxs'][index])\n return jobdir_name\n\ndef deploy(wfid,input_descr,confapp):\n log.info(\"Job deployment starts.\")\n\n wfiddir = get_wfdir(wfid)\n log.debug(\"- jobid: \"+input_descr['jobdir'])\n log.debug(\"- wfid: \"+wfid)\n\n jobdir = os.path.join(wfiddir,input_descr['jobdir'])\n create_dir(jobdir)\n\n log.debug(\"- jobinput files: \"+str(input_descr['files']))\n\n sandboxdir = os.path.join(jobdir,\"sandbox\")\n create_dir(sandboxdir)\n\n if 'content' in confapp['executable']:\n create_executable(confapp,sandboxdir)\n elif 'tgzURL' in confapp['executable']:\n download_executable(confapp,sandboxdir)\n else:\n log.critical(\"Application is not defined. No content, no url found!\")\n sys.exit(1)\n\n input_files_link(get_inputdir(wfid),input_descr['names'],input_descr['indexes']['inp_file_indxs'],sandboxdir,input_descr['files'])\n\n deploy_input_descr(jobdir,input_descr)\n\n pass_to_executor(wfiddir,input_descr['jobdir'])\n\n log.info(\"Job deployment finished.\")\n\ndef deploy_jobs(wfid,confapp):\n input_names = nDimColl.getDimNames()\n input_lengths = nDimColl.getDimLengths()\n input_indexes = nDimColl.getHitListHead()\n input_descr = {}\n input_descr['names']=input_names\n input_descr['lengths']=input_lengths\n input_descr['wfid']=wfid\n while (input_indexes):\n input_descr['indexes']=input_indexes\n input_descr['files']=gen_input_filenames(input_descr)\n input_descr['jobdir']=gen_jobdir(input_descr)\n deploy(wfid,input_descr,confapp)\n nDimColl.removeHitListHead()\n input_indexes = nDimColl.getHitListHead()\n return\n\ndef loadconfig(sysconfpath):\n global confsys, app, confapp, routepath, log, nDimColl\n confsys = readconfig(sysconfpath)\n log = logging.config.dictConfig(confsys['logging'])\n log = logging.getLogger(\"flowbster.receiver\")\n create_dir(confsys['jobdirroot'])\n set_jobdirroot(confsys['jobdirroot'])\n confapp = readconfig(confsys['appconfigpath'])\n nDimColl = ndimCollector(len(confapp['inputs']))\n\ndef input_set_default(input_item):\n if 'index' not in input_item.keys():\n input_item['index']=0\n input_item['count']=1\n if 'index_list' not in input_item.keys():\n input_item['index_list']=[input_item['index']]\n input_item['count_list']=[input_item['count']]\n else:\n maxind = len(input_item['count_list'])-1\n for ind in range(maxind,-1,-1):\n if input_item['count_list'][ind]== 1 and len(input_item['count_list'])>1:\n del input_item['count_list'][ind]\n del input_item['index_list'][ind]\n maxind-=1\n #print \"DEFAULT:\",input_item\n\ndef input_is_collector(portname):\n isColl = False\n for inputitem in confapp['inputs']:\n if inputitem['name'] == portname and 'collector' in inputitem and inputitem['collector']:\n isColl = True\n #print \"PORT:\",portname,\"COLL:\",isColl\n return isColl\n\ndef input_register(input_item):\n #print \"INPUT_ITEM:\",input_item\n if not nDimColl.checkDimExists(input_item['name']):\n isColl = input_is_collector(input_item['name'])\n nDimColl.addDim(input_item['name'],input_item['count'],nDimColl.getNumOfDim(),isColl,input_item['count_list'])\n nDimColl.addItem(input_item['name'],input_item['index'],input_item['index_list'])\n return\n\nroutepath = \"/flowbster\"\napp = Flask(__name__)\n \n@app.route(routepath,methods=['GET','POST','HEAD'])\ndef receive():\n if request.method in ['GET','HEAD']:\n return \"OK\"\n log.info(\"New input(s) arrived.\")\n yaml_param = request.args.get('yaml', '')\n rdata = request.files[yaml_param]\n confjob = yaml.load(rdata.read())\n wfid = confjob['wfid']\n wfdir = get_wfdir(wfid)\n create_dir(wfdir)\n inputdir = get_inputdir(wfid)\n create_dir(inputdir)\n nDimColl.deserialise(os.path.join(wfdir,\"nDimColl.yaml\"))\n for input_item in confjob['inputs']:\n input_set_default(input_item)\n input_file_deploy(input_item,confapp,inputdir)\n input_register(input_item)\n deploy_jobs(wfid,confapp)\n nDimColl.serialise(os.path.join(wfdir,\"nDimColl.yaml\"))\n return wfid\n\nif len(sys.argv)==3 and sys.argv[1]==\"-c\":\n loadconfig(sys.argv[2])\nelse:\n loadconfig(os.path.join('/etc','flowbster-config-sys.yaml'))\n\nlog.info(\"App config: \"+confsys['appconfigpath'])\nlog.info(\"Job directory: \"+get_jobdirroot())\nlog.info(\"Listening on port \"+str(confsys['listeningport'])+\", under url \\\"\"+routepath+\"\\\"\")\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=confsys['listeningport'])\n","sub_path":"devel/flowbster_receiver.py","file_name":"flowbster_receiver.py","file_ext":"py","file_size_in_byte":10205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"55958705","text":"import kitchen\nimport logging\nimport sys\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\ndef run():\n # brew services start rabbitmq\n \n try:\n dark_kitchen = kitchen.Kitchen()\n except KeyboardInterrupt:\n logging.info('Process interrupted')\n finally:\n logging.info('Successfully shutdown the Kitchen service.')\n\nif __name__ == '__main__':\n run()","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"25674819","text":"from model.GA import Population\nfrom model.MOGA import MOGA\nfrom model.MPGA import MPGA\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\n# Read file by URL\nurl = \"http://archive.ics.uci.edu/ml/machine-learning-databases/statlog/australian/australian.dat\"\ndata = pd.read_csv(url, header=None, sep = \" \", )\nFullFeatures = ['Att_'+str(i) for i in range(1, data.shape[1])]\nTargetFeature = ['Target']\ndata.columns = FullFeatures + TargetFeature\nscaler = MinMaxScaler()\nscaler.fit(data[FullFeatures])\ndata[FullFeatures] = scaler.transform(data[FullFeatures])\n\n# 1) GA\n\nconfig_GA = {'population_size': 100, 'offspring_ratio': 0.5,\n'crossover_probability': 0.9,\n'selection_method': {'type': 'roulette_wheel', 'k': 10},\n'crossover_method': {'type': '1point', 'parameters': None},\n'mutation_probability': 0.1, 'mutation_ratio': 0.2,\n'generations_number': 15, 'stop_criterion_depth': 100}\n\n\nconfig_init_GA = {\n 'FullFeatures': FullFeatures,\n 'TargetFeature': TargetFeature,\n 'Table': data,\n 'ScoreType': 'accuracy_score',\n 'InitMethod': 'random_init'\n}\n\npopulation_GA = Population.population_initialization(config_init_GA, new_population = None, population_size = 100)\nsolution_GA, fitness_GA, all_best_fitness_GA = population_GA.generate_populations(config=config_GA, verbose=1)\nprint(fitness_GA)\nprint(len(all_best_fitness_GA))\nprint(solution_GA.FeatureSubset)\n\n\n\n# 2) MPGA\nconfig_MPGA = {'population_size': 100, 'offspring_ratio': 0.5,\n'crossover_probability': 0.9,\n'selection_method': {'type': 'roulette_wheel', 'k': 10},\n'crossover_method': {'type': '1point', 'parameters': None},\n'mutation_probability': 0.1, 'mutation_ratio': 0.2,\n'generations_number': 15, 'stop_criterion_depth': 100}\n\n\nconfig_init_MPGA = {\n 'FullFeatures': FullFeatures,\n 'TargetFeature': TargetFeature,\n 'Table': data,\n 'ScoreType': 'accuracy_score',\n 'InitMethod': 'random_init'\n}\npopulation_MPGA = MPGA.gas_initialization(config_init_MPGA, population_size = 100, gas_size=3)\nbest_solution_MPGA, best_fitness_MPGA, all_best_fitness_MPGA = population_MPGA.generate_gas(config_MPGA)\nprint(best_fitness_MPGA)\nprint(len(all_best_fitness_MPGA))\n\n\n# 3) MOGA\nconfig_init_MOGA = {\n 'FullFeatures': FullFeatures,\n 'TargetFeature': TargetFeature,\n 'Table': data,\n 'ScoreType': ['accuracy_score', 'number_feature_subset'],\n 'InitMethod': 'random_init'\n}\n\nR = MOGA.population_initialization(config_init_MOGA, population_size = 100)\n\n\nconfig_MOGA = {'population_size': 100, 'offspring_ratio': 0.5,\n'crossover_probability': 0.9,\n'selection_method': {'type': 'roulette_wheel', 'k': 2},\n'crossover_method': {'type': '1point', 'parameters': None},\n'mutation_probability': 0.1, 'mutation_ratio': 0.1,\n'generations_number': 5, 'stop_criterion_depth': 100}\n\npopulation_MOGA = R.generate_populations(config_MOGA)\n\nconfig_GA = {'population_size': 100, 'offspring_ratio': 0.5,\n'crossover_probability': 0.9,\n'selection_method': {'type': 'roulette_wheel', 'k': 10},\n'crossover_method': {'type': '1point', 'parameters': None},\n'mutation_probability': 0.1, 'mutation_ratio': 0.2,\n'generations_number': 15, 'stop_criterion_depth': 100}\n\n\nconfig_init_GA = {\n 'FullFeatures': FullFeatures,\n 'TargetFeature': TargetFeature,\n 'Table': data,\n 'ScoreType': 'accuracy_score',\n 'InitMethod': 'random_init'\n}\n\npopulation_MOGA = Population.population_initialization(config_init_GA, new_population = population_MOGA, population_size = 100)\nsolution_MOGA, fitness_MOGA, all_best_fitness_MOGA = population_MOGA.generate_populations(config=config_GA, verbose=1)\n#print(fitness_GA)\n#print(len(all_best_fitness_GA))\n#print(solution_GA.FeatureSubset)\n\n# plot\nx = [i for i in range(len(all_best_fitness_GA))]\nimport matplotlib.pyplot as plt\nwith plt.style.context('Solarize_Light2'):\n plt.plot(x, all_best_fitness_GA, 'o-', label='GA')\n plt.plot(x, all_best_fitness_MPGA, 'o-', label='MPGA')\n plt.plot(x, all_best_fitness_MOGA, 'o-', label='MOGA')\n plt.title('Evolutionary processes of GA, MPGA, and MOGA (Australia credit data set)')\n plt.xlabel('generations', fontsize=9)\n plt.ylabel('accuracy', fontsize=9)\n\nplt.legend(loc='lower right')\nplt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"120553712","text":"import logging\nfrom decimal import Decimal\n\nfrom bs4 import BeautifulSoup\n\nfrom storescraper.categories import GAMING_CHAIR, COMPUTER_CASE, MOTHERBOARD, \\\n USB_FLASH_DRIVE, CPU_COOLER\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.utils import html_to_markdown, session_with_proxy\n\n\nclass IsiBook(Store):\n @classmethod\n def categories(cls):\n return [\n 'Notebook',\n 'Tablet',\n 'Printer',\n 'StereoSystem',\n 'StorageDrive',\n 'ExternalStorageDrive'\n 'UsbFlashDrive',\n 'MemoryCard',\n 'SolidStateDrive',\n 'Projector',\n 'Monitor',\n 'AllInOne',\n 'Mouse',\n 'Keyboard',\n 'Headphones',\n 'Processor',\n 'PowerSupply',\n 'Ram',\n 'VideoCard',\n GAMING_CHAIR,\n COMPUTER_CASE,\n MOTHERBOARD,\n CPU_COOLER,\n ]\n\n @classmethod\n def discover_urls_for_category(cls, category, extra_args=None):\n category_paths = [\n ['pc-y-portatiles/all-in-one', 'AllInOne'],\n ['pc-y-portatiles/notebook', 'Notebook'],\n ['pc-y-portatiles/tablet', 'Tablet'],\n ['audio-video-y-fotografia/audifonos', 'Headphones'],\n ['audio-video-y-fotografia/parlantes', 'StereoSystem'],\n ['partes-y-piezas/tarjeta-de-video', 'VideoCard'],\n ['partes-y-piezas/gabinetes', COMPUTER_CASE],\n ['partes-y-piezas/monitores', 'Monitor'],\n ['partes-y-piezas/placas-madre', MOTHERBOARD],\n ['partes-y-piezas/procesadores', 'Processor'],\n ['partes-y-piezas/fuentes-de-poder', 'PowerSupply'],\n ['partes-y-piezas/memorias-ram', 'Ram'],\n ['partes-y-piezas/refrigeracion', CPU_COOLER],\n ['almacenamiento/disco-duros', 'StorageDrive'],\n ['almacenamiento/pendrives-y-memorias-flash', USB_FLASH_DRIVE],\n ['impresion/multifuncionales-tinta', 'Printer'],\n ['impresion/impresoras-laser', 'Printer'],\n ['accesorios/sillas', GAMING_CHAIR],\n ['accesorios/mouse-teclado-y-mousepad', 'Mouse'],\n ['gamers', 'Motherboard'],\n ]\n\n session = session_with_proxy(extra_args)\n base_url = 'https://www.isibook.cl/{}.html?product_list_limit=64&p={}'\n product_urls = []\n\n for url_extension, local_category in category_paths:\n if category != local_category:\n continue\n\n page = 1\n local_urls = []\n done = False\n\n while not done:\n if page > 10:\n raise Exception('Page overflow')\n\n url = base_url.format(url_extension, page)\n print(url)\n res = session.get(url)\n if res.url != url:\n raise Exception('URL mismatch: ' + url + ' ' + res.url)\n soup = BeautifulSoup(res.text, 'html.parser')\n product_containers = soup.find('ol', 'products')\n\n if not product_containers:\n if page == 1:\n logging.warning('Empty path: ' + url)\n break\n\n products = product_containers.findAll('li', 'item')\n\n for product in products:\n product_url = product.find('a')['href']\n if product_url in local_urls:\n done = True\n break\n local_urls.append(product_url)\n\n page += 1\n\n product_urls.extend(local_urls)\n\n return product_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n print(url)\n session = session_with_proxy(extra_args)\n response = session.get(url)\n\n if response.status_code in [410, 404]:\n return []\n\n soup = BeautifulSoup(response.text, 'html.parser')\n\n name = soup.find('h1', 'page-title').text.strip()\n sku = soup.find('span', 'sku').text.split(':')[1].strip()\n stock = 0\n\n if soup.find('button', {'id': 'product-addtocart-button'}):\n stock = -1\n\n offer_price = Decimal(\n soup.find('span', 'special-price').find('span', 'price')\n .text.replace('$', '').replace('.', ''))\n normal_price = Decimal(\n soup.find('span', 'price-credit-card')\n .text.replace('$', '').replace('.', ''))\n\n picture_urls = [soup.find('div', 'preloaded-image').find('img')['src']]\n\n description = html_to_markdown(\n str(soup.find('div', 'description')))\n\n p = Product(\n name,\n cls.__name__,\n category,\n url,\n url,\n sku,\n stock,\n normal_price,\n offer_price,\n 'CLP',\n sku=sku,\n picture_urls=picture_urls,\n description=description,\n )\n\n return [p]\n","sub_path":"storescraper/stores/isi_book.py","file_name":"isi_book.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"509713215","text":"'''\n로직\n레이저와 만날 때, bar가 끝날 때, bar의 개수는 하나씩 추가된다.\ntime complexity : O(n)\n'''\nfrom collections import deque\ndef solution(arrangement):\n answer = 0\n stack = deque()\n for i in range(len(arrangement)):\n if arrangement[i] == \"(\":\n stack.append(i)\n else:\n element = stack.pop()\n if element == i -1:\n answer += len(stack)\n else:\n answer += 1\n \n return answer\n\n\n '''\n규칙\n 1. ()으로 바로 닫히면 레이저고 아니면 막대기 bar임\n 2. 즉 (로 생긴 막대기들이 ()에 의해 잘려서 생성되는 것. \n 3. 레이저일 경우 스택에 남아있는 ( 개수만큼 막대기 개수가 늘어나고\n 4. bar가 끝나는 경우 하나의 막대기만 생성된다.\n로직\n 1. (이면 스택에 추가하기 \n 2. )이면 레이저인지 막대기인지 확인하기 - 레이저일 경우 바로 그전 배치가 (일 것이다.\n 0.공통적으로 스택에서 하나를 팝함\n 1. 레이저일 경우 스택에 남아있는 ( 수만큼 더해준다.\n 2. 바일 경우 1을 더해준다. \n \n'''\ndef solution(arrangement):\n answer = 0\n opened_bracket_count = 0\n for i, bracket in enumerate(arrangement):\n if bracket == \"(\":\n opened_bracket_count += 1\n else:\n opened_bracket_count -= 1\n if arrangement[i-1] == \"(\":\n answer += opened_bracket_count\n else:\n answer += 1\n return answer","sub_path":"week2/lv2_스택_쇠막대기.py","file_name":"lv2_스택_쇠막대기.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"352531728","text":"\n#https://projecteuler.net/problem=2\n#Even Fibonacci numbers\nimport time\n\ndef fibb_recursive(n):\n if n == 0 :\n return 0\n elif n == 1 :\n return 1\n else:\n return fibb_recursive(n - 1) + fibb_recursive(n - 2)\n\n\nt1 = time.clock()\nfibbonaci_seq = (fibb_recursive(i) for i in range(0,35) if fibb_recursive(i) < 4000000)\nprint('4백만보다 작은 피보나치 시퀀스'.format(fibbonaci_seq))\nsum_of_even_fibb = 0\nfor i in fibbonaci_seq:\n if i % 2 == 0:\n sum_of_even_fibb += i\n\nprint('4백만보다 작은 피보나치 시퀀스중 짝수의 합은 {}'.format(sum_of_even_fibb))\nt2 = time.clock()\ntotal_time = t2 - t1\nprint('총 소요된 시간: {:.6f} 초'.format(total_time))\n","sub_path":"MyProject/helloPython/project_ruler/answer-2.py","file_name":"answer-2.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"579873738","text":"import json\nfrom .containers import *\nfrom globals import *\n\nmake_file_safe('resources/rss.json', content='[]\\n')\n\n\ndef load_rss() -> list:\n with open('resources/rss.json', 'r') as f:\n feeds = json.load(f)\n formatted_feeds = []\n for feed in feeds:\n if feed:\n _type = feed['type']\n if _type == 'TwitterFeed':\n formatted_feeds.append(TwitterFeed(**feed))\n elif _type == 'TwitchFeed':\n formatted_feeds.append(TwitchFeed(**feed))\n elif _type == 'YouTubeFeed':\n formatted_feeds.append(YouTubeFeed(**feed))\n\n return formatted_feeds\n\n\ndef save_rss(feeds: list):\n \"\"\"\n :type feeds: list[RssFeed]\n \"\"\"\n with open('resources/rss.json', 'w') as f:\n json.dump([x.as_dict() for x in feeds], f)\n","sub_path":"server/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"551916779","text":"from django.core import mail\nfrom django.test import TestCase\n\nclass SubscribePostValid(TestCase):\n\n def setUp(self):\n data = dict(name='Ivan Barabach', cpf='12345678900',\n email='ivan.barabach@gmail.com', phone='47-99971-4042')\n self.client.post('/inscricao/', data)\n self.email = mail.outbox[0]\n\n def test_subscription_email_subject(self):\n\n expect = 'Confirmacao de inscricao'\n\n self.assertEqual(expect, self.email.subject)\n\n def test_subscription_email_from(self):\n\n expect = 'contato@eventex.com.br'\n\n self.assertEqual(expect,self.email.from_email)\n\n def test_subscription_email_to(self):\n\n expect = ['contato@eventex.com.br','ivan.barabach@gmail.com']\n\n self.assertEqual(expect,self.email.to)\n\n\n def test_subscription_email_body(self):\n\n contents = [\n 'Ivan Barabach',\n '12345678900',\n 'ivan.barabach@gmail.com',\n '47-99971-4042'\n ]\n\n for content in contents:\n with self.subTest():\n self.assertIn(content,self.email.body)\n\n","sub_path":"eventex/subscriptions/tests/test_mail_subscribe.py","file_name":"test_mail_subscribe.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"224745207","text":"import os, openpyxl\n\ndef makeDir(filedir):\n for dir in os.listdir(filedir):\n if dir.endswith('.xlsm') or dir.endswith('.xlsx'):\n\n fullpath = os.path.join(filedir, dir) # fullpath come with .xlxm\n filename = os.path.splitext(fullpath)[0] # filename without .xlxm\n outDir = os.path.splitext(dir)[0]\n\n if not os.path.exists(outDir):\n os.makedirs(outDir)\n txtcon(fullpath,outDir)\n\n\ndef txtcon(fullpath,outDir):\n wb = openpyxl.load_workbook(fullpath)\n ws = wb['Display']\n maxrow = ws.max_row + 1\n maxcol = ws.max_column + 1\n\n list = []\n list2 = []\n #k is the number of Drawing in each FCS\n k=0\n\n for col in range(1, maxcol):\n\n for row in range(1, maxrow):\n a = ws.cell(row=row, column=col).value\n if a is not None:\n list.append(a)\n\n #consider only those collums with contents\n if len(list):\n # get the 1st element in the list as the name of the txt file\n i_str = str(list[0])\n # Create the new file name in the file path\n location = str(os.path.join(outDir, i_str))\n file_name = location + '.txt'\n\n # since we take the data from the 2nd element, 1st element is the tile of txt file\n # so we create a new list to store the data\n for i in range(0, len(list) - 1):\n if len(list) :\n b = list[i + 1]\n list2.append(b)\n\n # add a \"return\" ASCII to the each element of list2 for easy reading\n c = \"\\n\".join(str(i) for i in list2)\n #'join'function will return a string, so c will be a string now.\n # c.strip()will consider those none empty string\n if c.strip():\n with open(file_name, 'w') as f:\n f.write(str(c))\n #for each write to txt, k increase by 1\n k=k+1\n list2.clear()\n list.clear()\n\n print('for %s total %d file processed' % (outDir, k))\n\ndef main():\n filepath='target'\n makeDir(filepath)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Excl2txt.py","file_name":"Excl2txt.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"254493941","text":"import pandas as pd\nimport os\nimport csv\n\n\"\"\"\nDATA CREATION\n\"\"\"\n#Reading in content (data) of all the txt files\npath = os.getcwd() + r'\\dataset'\nfile_content = []\nfor file in os.listdir(path):\n try:\n f = open(path + '\\\\' + file, 'r', encoding='utf8')\n file_content.append([os.path.splitext(file)[0], f.read()])\n except Exception:\n print(\"Error\")\n\n#Putting the data above into a csv file\ncol_names = ['treatyNum', 'content']\nwith open('dataset.csv', 'w', newline='', encoding='utf8',) as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(col_names)\n for i in file_content:\n writer.writerow(i)\n csv_file.close()\n\n\"\"\"\nCLEANING DATA\n\"\"\"\n#This part is here to resolve encoding problem in the print() statement (don't delete)\nimport sys\nsys.stdout.reconfigure(encoding='utf-8')\n\n#Loading the csv file data into panda dataframe\nfile_path = os.path.join(os.path.dirname(__file__), 'dataset.csv')\ndf = pd.read_csv(file_path)\n\n#Uncomment to see the data in this dataframe \n# print (df.head(3))\n\n#Importing appropriate libraries for data cleaning\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\n#uncomment these three lines for first execution of the code\n# nltk.download('punkt')\n# nltk.download('wordnet')\n# nltk.download('stopwords')\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import chi2\nimport numpy as np\n\n#Using the df_copy instead of the orginal dataframe\ndf_copy = df.copy(deep=True)\n\n#Displaying the content the second treaty in the dataframe\n#Uncomment to see the data before cleaning\n# print('Before cleaning:')\n# print(df_copy.head(3))\n# print(df_copy.loc[1]['content'])\n\n# Panda dataframes (2d) can be broken down into series (1d)\n# Cleaning a panda 1d array (series)\n# Cleaning \\r and \\n, quotation marks, large spaces, possessive words\ndf_copy['cleanedContent'] = df_copy['content'].str.replace(\"\\r\", \" \")\ndf_copy['cleanedContent'] = df_copy['cleanedContent'].str.replace(\"\\n\", \" \")\ndf_copy['cleanedContent'] = df_copy['cleanedContent'].str.replace(\" \", \" \")\ndf_copy['cleanedContent'] = df_copy['cleanedContent'].str.replace('\"', '')\ndf_copy['cleanedContent'] = df_copy['cleanedContent'].str.replace(\"'s\",\"\")\n\n#Changing all texts to lower case\ndf_copy['cleanedContent'] = df_copy['cleanedContent'].str.lower()\n\n#Cleaning punctuation\npunctuations = list(\"?:!.,;~\u000E\")\n\nfor punctuation in punctuations:\n df_copy['cleanedContent'] = df_copy['cleanedContent'] .str.replace(punctuation, '')\n\n#Initializing a lemmatizer object\nwordnet_lemmatizer = WordNetLemmatizer()\n\n#Lemmtizing\nnrows = len(df_copy)\nlemmatized_text_list = []\n\nfor row in range(0, nrows):\n # Create an empty list containing lemmatized words\n lemmatized_list = []\n\n # Save the text and its words into an object\n text = df_copy.loc[row]['cleanedContent']\n text_words = text.split(\" \")\n\n # Iterate through every word to lemmatize\n for word in text_words:\n lemmatized_list.append(wordnet_lemmatizer.lemmatize(word, pos=\"v\"))\n\n # Join each word back together seperated by a space\n lemmatized_text = \" \".join(lemmatized_list)\n \n # Append to the list containing the texts\n lemmatized_text_list.append(lemmatized_text)\n\ndf['cleanedContent'] = lemmatized_text_list\n\n#Stop words removing\nstop_words = list(stopwords.words('english'))\n\nfor stop_word in stop_words:\n regex_stopword = r\"\\b\" + stop_word + r\"\\b\"\n df_copy['cleanedContent'] = df_copy['cleanedContent'].str.replace(regex_stopword, '')\n\n#Uncomment to see the data after cleaning\n# print('After cleaning:')\n# print(df_copy.head(3))\n# print(df_copy.loc[1]['cleanedContent'])\n\n\"\"\"\nDATA ANALYSIS\n\"\"\"\n#TBD\n\n\n\"\"\"\nFEATURE ENGINEERING\n\"\"\"\n#IN PROGRESS...\n\n\n\n\"\"\"\nMODELING\n\"\"\"\n#IN PROGRESS...","sub_path":"phase 2/phase2.py","file_name":"phase2.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"284708835","text":"\"\"\"\n\tPlease feel free to use the code without citing or crediting the author(s) mentioned below. Cheers to science :-)\n\tI'd be happy to hear from you about how to improve this code, and as to how the code may have been useful to you.\n\t\n\tAuthor: Vipin P. Veetil\n\tContact: vipin.veetil@gmail.com\n\t\n\tPaper title: Network Origins of Coordination\n\tPaper URL: http://papers.ssrn.com/sol3/papers.cfm?abstract_id=2621852\n\t\n\tLanguage: Python\n\t\n\tModule name: agents\n\"\"\"\n\n\nfrom __future__ import division\nimport random\n\n\nclass Agent(object):\n\tdef __initi__(self):\n\t\tself.number_of_states = 0\n\t\t\"\"\" number of possible states \"\"\"\n\t\tself.state = 0\n\t\t\"\"\" present state \"\"\"\n\t\tself.frequency_neighbors_states = [0] * self.number_of_states\n\t\t\"\"\" the number of neighbors that have each of the possible states \"\"\"\n\n\tdef update_neighbors_states(self, neighbors_states):\n\t\t\"\"\" record the states of the neighbors \"\"\"\n\t\tself.frequency_neighbors_states = [0] * self.number_of_states\n\t\tfor state in neighbors_states:\n\t\t\tself.frequency_neighbors_states[state] += 1\n\n\tdef update_state(self):\n\t\t\"\"\" update one's own state to the state that is most frequent among neighbors \"\"\"\n\t\tm = max(self.frequency_neighbors_states)\n\t\tmax_states = [state for state, x in enumerate(self.frequency_neighbors_states) if x == m]\n\t\t\"\"\" make a list of the states that have highest frequency, it is possible more than one state has highest frequency \"\"\"\n\t\tself.state = random.choice(max_states)\n","sub_path":"agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"351573704","text":"from CMGTools.RootTools.fwlite.Config import CFG\nfrom CMGTools.HToZZTo4Leptons.tools.EfficiencyCorrector import EfficiencyCorrector\n\n\neff2011 = CFG(\n name='eff',\n muonFile = 'data/eff_mu11.root',\n muonHisto = 'eff2011',\n# muonHisto = 'TH2D_ALL_2012',\n\n eleFile = 'data/eff_ele.root', \n eleHisto = 'heff'\n )\n\n\neff2012 = CFG(\n name='eff',\n muonFile = 'data/eff_mu12.root',\n muonHisto = 'TH2D_ALL_2012',\n eleFile = 'data/eff_ele12.root', \n eleHisto = 'h_electron_scale_factor_RECO_ID_ISO_SIP'\n )\n\n\n","sub_path":"CMGTools/HToZZTo4Leptons/python/setup/Efficiencies.py","file_name":"Efficiencies.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"79439635","text":"import pandas as pd\n\n\ndef word_count(df):\n # by publication\n df['word_count'] = df['content'].apply(lambda x: len(str(x).split(\" \")))\n wc = df.groupby('publication').word_count\n pub_counts = wc.describe().to_dict()\n pub_counts[\"pub_wc\"] = wc.sum().to_dict()\n # just keep article count, mean word count, and total word count\n keys_to_delete = ['sts','min','max','25%','50%','75%']\n for k in keys_to_delete:\n if k in pub_counts:\n del pub_counts[k]\n reorganized = {}\n for i in pub_counts['count'].keys():\n reorganized[i] = {'article_count':pub_counts['count'][i],\n 'mean_word_count':pub_counts['mean'][i],\n 'pub_word_count':pub_counts['pub_wc'][i]}\n # add total word count\n total_wc = df.word_count\n corpus = {'article_count':total_wc.describe().to_dict()['count'],\n 'mean_word_count':total_wc.describe().to_dict()['mean'],\n 'pub_word_count':total_wc.sum()}\n reorganized.update({'corpus':corpus})\n\n # convert to dataframe\n data = pd.DataFrame(reorganized).T\n #data.index.name = 'publication'\n\n return data\n\n","sub_path":"word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"468973995","text":"import threading\nimport time\n\nclass MyThread(threading.Thread):\n def __init__(self):\n super(MyThread, self).__init__()\n self.name = self.name.split('-')[1]\n # print(self.name)\n\n def run(self):\n super(MyThread, self).run()\n print('start on threading', self.name)\n time.sleep(5)\n print('你好毒:', self.name)\n\nif __name__ == '__main__':\n print('开启主线程')\n ttr = []\n for i in range(10):\n t = MyThread()\n ttr.append(t)\n for t in ttr:\n t.setDaemon(True)\n t.start()\n t.setName('呵呵你一脸--{}'.format(i))\n print('启动线程', t.getName())\n for t in ttr:\n t.join()\n\n print('------------end-------------')","sub_path":"python-面向对象/线程进程/继承.py","file_name":"继承.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"303722848","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import naive_bayes\nfrom sklearn import model_selection\nfrom sklearn import feature_extraction\nfrom sklearn import metrics\n\nfrom sklearn.cluster import KMeans, MiniBatchKMeans\nfrom time import time\nimport pandas as pd\n\n\ndef initialize_settings():\n pd.set_option('display.max_rows', 20)\n pd.set_option('display.max_columns', None)\n\n\ndef load_data(file_name):\n data = pd.read_csv(file_name\n , encoding=\"ANSI\"\n , header=0\n , names=['target', 'id', 'date', 'flag', 'user', 'text']\n )\n # print(data)\n return data\n\n\ndef preprocess_data(data):\n features = data.iloc[:, 5] \\\n + data.iloc[:, 4] + data.iloc[:, 2]\n labels = data['target']\n # print(features)\n features_train, features_test, labels_train, labels_test = model_selection.train_test_split(\n features, labels\n , test_size=0.2\n # , random_state=42\n , shuffle=True\n , stratify=None\n )\n return features, labels, features_train, labels_train, features_test, labels_test\n\n\ndef vectorize_features(features_train, features_test):\n vectorizer = feature_extraction.text.TfidfVectorizer(\n encoding='ANSI'\n , decode_error='strict'\n , strip_accents='ascii'\n , lowercase=True\n , preprocessor=None\n , tokenizer=None\n , analyzer='word'\n , stop_words='english'\n , ngram_range=(1, 2)\n , max_df=1.0\n , min_df=1\n , max_features=None\n , vocabulary=None\n , binary=False\n , norm='l2'\n , use_idf=True\n , smooth_idf=True\n , sublinear_tf=False)\n\n vectorized_features_train = vectorizer.fit_transform(features_train)\n vectorized_features_test = vectorizer.transform(features_test)\n\n # vocabulary = vectorizer.get_feature_names()\n # print(pd.DataFrame(data=vectorized_features_train.toarray(), columns=vocabulary))\n # vocab_values_sorted = {k: v for k, v in\n # sorted(dict(zip(vocabulary, vectorized_features_train.toarray().mean(axis=0))).items(),\n # key=lambda item: item[1], reverse=True)}\n # print(f\"Average of tf-idf scores across documents:\\n{vocab_values_sorted}\")\n\n return vectorizer, vectorized_features_train, vectorized_features_test\n\n\ndef apply_model(vectorizer, vectorized_features_train, labels_train, vectorized_features_test, labels_test):\n model = KMeans(n_clusters=10, init='k-means++', max_iter=10, n_init=1, verbose=1)\n\n print(\"Clustering sparse data with %s\" % model)\n t0 = time()\n model.fit(vectorized_features_train)\n print(\"done in %0.3fs\" % (time() - t0))\n print()\n\n print(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels, model.labels_))\n print(\"Completeness: %0.3f\" % metrics.completeness_score(labels, model.labels_))\n print(\"V-measure: %0.3f\" % metrics.v_measure_score(labels, model.labels_))\n print(\"Adjusted Rand-Index: %.3f\"\n % metrics.adjusted_rand_score(labels, model.labels_))\n print(\"Silhouette Coefficient: %0.3f\"\n % metrics.silhouette_score(vectorized_features_train, model.labels_, sample_size=1000))\n\n print()\n\n\n print(\"Top terms per cluster:\")\n\n order_centroids = model.cluster_centers_.argsort()[:, ::-1]\n\n terms = vectorizer.get_feature_names()\n for i in range(10):\n print(\"Cluster %d:\" % i, end='')\n for ind in order_centroids[i, :10]:\n print(' %s' % terms[ind], end='')\n print()\n\nif __name__ == \"__main__\":\n initialize_settings()\n data = load_data('training.1600000.processed.noemoticon.csv')\n features, labels, features_train, labels_train, features_test, labels_test = preprocess_data(data)\n vectorizer, vectorized_features_train, vectorized_features_test = vectorize_features(features_train, features_test)\n apply_model(vectorizer, vectorized_features_train, labels_train, vectorized_features_test, labels_test)\n","sub_path":"python/ml/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"527510559","text":"#objects\nfrom bot import Bot\nfrom superbot import SuperBot\nfrom flyingbot import FlyingBot\n\n\n#define\nbeep = Bot(\"Beep\", 32, 10, 10)\nsuperbeep = SuperBot(\"Super Beep\")\nflyingbeep = FlyingBot(\"Flying Beep\")\n\n\n#display\nbeep.display_name()\nbeep.display_age()\nbeep.display_energy()\nbeep.display_shield()\nbeep.display_summary()\nprint(\"\\n\")\nsuperbeep.display_name()\nsuperbeep.display_age()\nsuperbeep.display_energy()\nsuperbeep.display_shield()\nsuperbeep.display_summary()\nsuperbeep.get_super_power_level()\nprint(\"\\n\")\nflyingbeep.display_name()\nflyingbeep.display_age()\nflyingbeep.display_energy()\nflyingbeep.display_shield()\nflyingbeep.display_summary()\nflyingbeep.get_hover_distance()\nprint(\"\\n\")\n\n#set\nsuperbeep.set_super_power_level(5)\nflyingbeep.set_hover_distance(5)\n\n\n#decrement\nbeep.decrement_energy(5)\nbeep.decrement_shield(5)\n\n#increment\nbeep.increment_age()\nbeep.increment_energy(15)\nbeep.increment_shield(150)\nbeep.set_name(\"Bob\")\n\n#return\nprint(beep)\nprint(beep.get_age())\nprint(beep.get_energy())\nprint(beep.get_name())\nprint(beep.get_shield())\n","sub_path":"2-guis/1-classes-and-objects/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"30832977","text":"import cv2\nimport image_feature_tensorflow as tfeature\nfrom scipy import spatial\nimport numpy as np\n\n'''\nWhat are the dominant colors of an image?\nJust count the number of pixels in the hue space belonging to each color.\n'''\ndef extract_color_distribution(img):\n hsv_img=cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert irgbto hsv\n h,s,v = cv2.split(hsv_img)\n #creating a dictionary of 12 colors sampled from the hue space\n color_names=['Red','Orange','Yellow','Yellow-Green','Green','Aqua',\n 'Cyan','Azure','Blue','Violet','Magenta,','Rose']\n #quantize each pixel from a value in the range (180) to a value between 0 and 11\n h_quant=np.floor(np.divide(h,15))\n #compute distribution over these 12 values\n color_values=np.histogram(h_quant,12)[0]\n color_values=color_values/float(h.shape[0]*h.shape[1])\n #assign a label to each bin of the color distribution\n color_dict={color_names[i]:color_values[i] for i in range(len(color_values))}\n return color_dict\n'''\nWhat are the dominant objects of an image?\nuse Imagenet classifier to get probabilities of 1000 objects\n'''\ndef extract_objects(img):\n return tfeature.classify(img)\n\n'''\nHow distant are 2 images? Use cosine distance to infer similarity between 2 images\n'''\ndef compare_images(dic1,dic2):\n '''\n dic1,dic2: dictionaries with each element being a feature vector from a n image, indexed with the feature name\n '''\n return spatial.distance.cosine(list(dic1.values()),list(dic2.values()))\n","sub_path":"get_features.py","file_name":"get_features.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"59862850","text":"'''\n###Sample Input 1:\n8\nGAAATAAA\n\n###Sample Output 1:\n5\n'''\n\nn = int(input())\ns = input()\nA = []\nfor c in s:\n A.append(\"ACTG\".find(c))\ncount = [0]* 4\nfor x in A:\n count[x] += 1\n\neach = n // 4\nans = n\nif all(x <= each for x in count): print(0)\nelse:\n j = 0\n ans = n\n for i, x in enumerate(s):\n while j < len(s) and any(x > each for x in count):\n count[A[j]] -= 1\n j += 1\n if all(x <= each for x in count):\n ans = min(ans, j - i)\n count[A[i]] += 1\n print(ans)\n\n","sub_path":"codechef/CODEX001.py","file_name":"CODEX001.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"169927921","text":"\nimport sys, heapq\n\ninput = lambda : sys.stdin.readline().rstrip()\n\nmin_heap = [] # 양수\nmax_heap = [] # 음수\n\n\nfor _ in range(int(input())):\n x = int(input())\n if x > 0: # 양수\n heapq.heappush(min_heap, x)\n elif x < 0: # 음수\n heapq.heappush(max_heap, x)\n else:\n if len(min_heap):\n if len(max_heap) == 0 or min_heap[0] < max_heap[0]:\n print(heapq.heappop(min_heap))\n else:\n print(-heapq.heappop(max_heap))\n else:\n print(-heapq.heappush(max_heap) if len(max_heap) else 0)","sub_path":"BaekJoon_Levels/Silver_Ⅰ/Boj11286_절댓값힙/Boj11286_절댓값힙_2.py","file_name":"Boj11286_절댓값힙_2.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"442989343","text":"import gi\nimport os\nimport sqlite3\nimport subprocess\n\ntry:\n gi.require_version('Gtk', '3.0')\n from gi.repository import Gtk\nexcept:\n pass\n \nclass Main:\n\n\n def __init__(self):\n self.builder = Gtk.Builder()\n self.builder.add_from_file(os.getcwd() + \"\\Glade\\Maingui.glade\")\n self.window = self.builder.get_object(\"Mainwindow\")\n self.window.set_position(Gtk.WindowPosition.CENTER)\n self.window.show()\n self.builder.connect_signals(self)\n self.tbox = self.builder.get_object(\"tbox\")\n self.msave = self.builder.get_object(\"mnusave\")\n self.mauto = self.builder.get_object(\"mnuauto\")\n self.tsave = self.builder.get_object(\"savefile\")\n self.tauto = self.builder.get_object(\"autoclean\")\n self.mopen = self.builder.get_object(\"mnuopen\")\n self.topen = self.builder.get_object(\"openfile\")\n self.msave.set_sensitive(False)\n self.mauto.set_sensitive(False)\n self.tauto.set_sensitive(False)\n self.tsave.set_sensitive(False)\n\n self.filename=\"\"\n\n\n\n def on_Destroy(self, *args):\n Gtk.main_quit(*args)\n\n def openfile_clicked_cb(self, *args):\n self.dialog = Gtk.FileChooserDialog(\"Please choose a file\",self.window,\n Gtk.FileChooserAction.OPEN,\n (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n\n self.add_fi(self.dialog)\n\n response = self.dialog.run()\n if response == Gtk.ResponseType.OK:\n print(\"Open clicked\")\n print(\"File selected: \" + self.dialog.get_filename())\n file = self.dialog.get_filename()\n self.filename=file\n fd = open( file , \"r\" )\n str = fd.read()\n buf = self.tbox.get_buffer()\n buf.set_text(str)\n self.tbox.set_buffer(buf)\n fd.close()\n self.mopen.set_sensitive(False)\n self.topen.set_sensitive(False)\n self.msave.set_sensitive(True)\n self.mauto.set_sensitive(True)\n self.tauto.set_sensitive(True)\n self.tsave.set_sensitive(True)\n elif response == Gtk.ResponseType.CANCEL:\n print(\"Cancel clicked\")\n\n\n self.dialog.destroy()\n\n def openclicked(self):\n None\n\n\n def add_fi(self,dialog):\n filter_py = Gtk.FileFilter()\n filter_py.set_name(\"C Files\")\n filter_py.add_pattern(\"*.c\")\n dialog.add_filter(filter_py)\n\n\n def code_cleaner(self, *args):\n try:\n temp_file = self.filename[0:(len(self.filename)-2)] + \"_temp.c\"\t#Adding the \"_temp.c\" to filename\n main_file_ptr = open(self.filename,\"r\")\t#Creating file pointer for the main code file\n temp_file_ptr = open(temp_file,\"w\")\t#Creating file pointer for temp file\n count=0\t\t\t\t#variable to keep count of the number of \"{\" or \"}\"\n count_close = 0\n count_open = 0\n for line in main_file_ptr:\n spaces = '\\t'*count\t\t#Giving count number of tabs from next line onwards\n if \"{\" in line:\n count+=1\t#incrementing count whenever \"{\"\n #print count\n count_open +=1\n if \"}\" in line:\n count-=1\t\t#Decrementing count whenever \"}\"\n spaces = '\\t'*count\n count_close +=1\n #print count\n temp_file_ptr.write(spaces)\t#First writing spaces into every line\n temp_file_ptr.write(line)\t#Then copy contents of every line from main code\n\n\n #print count_close, count_open\n temp_file_ptr.close()\n main_file_ptr.close()\n readfile = open( temp_file , \"r\" )\n str = readfile.read()\n buf = self.tbox.get_buffer()\n buf.set_text(str)\n self.tbox.set_buffer(buf)\n readfile.close()\n\n if((count_close - count_open) > 0):\n print(\"Looks like you have more number of \\\"}\\\" somewhere\")\n if((count_close - count_open) < 0):\n print(\"Looks like you have more number of \\\"{\\\" somewhere\")\n return count\n except IOError:\n print(\"Sorry, but no such file exists in your current working directory\")\n\n def code_execute(exec_filename):\n exec_file = \"./\"+exec_filename\n subprocess.call([exec_file])\n\n\n def save_file(self,*args):\n try:\n temp_file=self.filename[0:(len(self.filename)-2)] + \"_temp.c\"\n os.remove(self.filename)\t\t\t#Deleting main code file\n os.rename(temp_file,self.filename)\t\t#Renaming the temp file with main code file\n\n str = \"\"\n buf = self.tbox.get_buffer()\n buf.set_text(str)\n self.tbox.set_buffer(buf)\n\n self.mopen.set_sensitive(True)\n self.topen.set_sensitive(True)\n self.msave.set_sensitive(False)\n self.mauto.set_sensitive(False)\n self.tauto.set_sensitive(False)\n self.tsave.set_sensitive(False)\n\n\n dialog = Gtk.MessageDialog(self.window, 0, Gtk.MessageType.INFO,\n Gtk.ButtonsType.OK, \"File Saved\")\n dialog.format_secondary_text(\"\")\n dialog.run()\n dialog.destroy()\n except IOError:\n dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,\n Gtk.ButtonsType.CANCEL, \"Cannot Save File\")\n dialog.format_secondary_text(\n \"Some error Occured.\")\n dialog.run()\n dialog.destroy()\n\n\n \nif __name__ == '__main__':\n Window = Main()\n Gtk.main()\n","sub_path":"GUI/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"574167300","text":"from django.contrib import admin\n\nfrom .models import SynapseBank\n\n\nclass SynapseBankAdmin(admin.ModelAdmin):\n list_display = (\n 'bank_id',\n 'account_class',\n 'account_number_string',\n 'account_type',\n 'balance',\n 'bank_name',\n 'email',\n 'synapse_account'\n )\n\nadmin.site.register(SynapseBank, SynapseBankAdmin)\n","sub_path":"debitize/debitize/synapse_banks/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"49232831","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass Net(nn.Module):\r\n def __init__(self, drop=0.025, norm='bn', num_groups=4):\r\n \"\"\"\r\n Input Param -->\r\n . norm:- (normalization technique to be used)\r\n 'bn': Batch Normalization\r\n 'gn': Group Normalization\r\n 'ln': Layer Normalization\r\n \"\"\"\r\n\r\n super(Net, self).__init__()\r\n #drop = 0.025 # droput value\r\n\r\n def normalize(x, w, h):\r\n if norm=='bn':\r\n return nn.BatchNorm2d(x)\r\n elif norm=='ln':\r\n return nn.GroupNorm(1, x)\r\n elif norm=='gn':\r\n return nn.GroupNorm(num_groups,x) # 4 layers to be grouped\r\n else:\r\n return None\r\n\r\n # Input Block\r\n self.convblock1 = nn.Sequential(\r\n nn.Conv2d(in_channels=1, out_channels=8, kernel_size=(3, 3), padding=0, bias=False),\r\n normalize(8, 26, 26),\r\n nn.ReLU(),\r\n nn.Dropout(drop)\r\n ) # output_size = 26 RF: 2\r\n\r\n # CONVOLUTION BLOCK 1\r\n self.convblock2 = nn.Sequential(\r\n nn.Conv2d(in_channels=8, out_channels=16, kernel_size=(3, 3), padding=0, bias=False),\r\n normalize(16, 24, 24), \r\n nn.ReLU(),\r\n nn.Dropout(drop)\r\n ) # output_size = 24 RF: 5\r\n\r\n self.pool1 = nn.MaxPool2d(2, 2) # output_size = 12 RF: 6\r\n\r\n # TRANSITION BLOCK 1\r\n self.trans1 = nn.Sequential(\r\n nn.Conv2d(in_channels=16, out_channels=8, kernel_size=(1, 1), padding=0, bias=False),\r\n normalize(8, 12, 12),\r\n nn.ReLU()\r\n ) # output_size = 12 RF: 6\r\n\r\n # CONVOLUTION BLOCK 2\r\n self.convblock3 = nn.Sequential(\r\n nn.Conv2d(in_channels=8, out_channels=12, kernel_size=(3, 3), padding=0, bias=False), # output_size = 10 RF: 10\r\n normalize(12, 10 ,10),\r\n nn.ReLU(),\r\n nn.Dropout(drop),\r\n nn.Conv2d(in_channels=12, out_channels=16, kernel_size=(3, 3), padding=0, bias=False), # output_size = 8 RF: 14\r\n normalize(16, 8, 8),\r\n nn.ReLU(),\r\n nn.Dropout(drop),\r\n nn.Conv2d(in_channels=16, out_channels=20, kernel_size=(3, 3), padding=0, bias=False), # output_size = 6 RF: 18\r\n normalize(20, 6, 6),\r\n nn.ReLU(),\r\n nn.Dropout(drop)\r\n ) \r\n \r\n # Global average pooling\r\n self.gap = nn.Sequential(\r\n nn.AvgPool2d(6) # output_size = 1 RF: 28\r\n )\r\n\r\n # Fully connected layer\r\n self.convblock5 = nn.Sequential(\r\n nn.Conv2d(in_channels=20, out_channels=16, kernel_size=(1, 1), padding=0, bias=False), # output_size = 1 RF: 28\r\n normalize(16, 1, 1),\r\n nn.ReLU(),\r\n nn.Dropout(drop),\r\n nn.Conv2d(in_channels=16, out_channels=10, kernel_size=(1, 1), padding=0, bias=False), # output RF: 28\r\n )\r\n\r\n def forward(self, x):\r\n x = self.convblock1(x)\r\n x = self.convblock2(x)\r\n x = self.pool1(x)\r\n x = self.trans1(x)\r\n x = self.convblock3(x)\r\n x = self.gap(x)\r\n x = self.convblock5(x)\r\n x = x.view(-1, 10) # convert 2D to 1D\r\n \r\n return F.log_softmax(x, dim=-1)\r\n\r\n","sub_path":"Tushar/ASSIGNMENTS/MNIST_normalization/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"5979149","text":"\"\"\"Utility script to compile artificial neural network C program as python\nextension module.\n\nSee:\n http://docs.python.org/2/extending/building.html\n\n\"\"\"\n\nfrom distutils.core import setup, Extension\n\n\nmod = Extension(\n \"khann\",\n sources=[\"src/khann.c\", \"src/khannmodule.c\", \"src/hashtable.c\"],\n extra_compile_args=[\"--std=c99\"],\n libraries=[\"mongoc\"],\n )\n\nsetup(\n name=\"khann\",\n version=\"1.0\",\n description=\"Kollaborative hosted artificial neural network\",\n ext_modules=[mod])\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"8235659","text":"import sys\n\nfname = '/home/simongle/simulation/auto_rally_catkin_ws/src/autorally/autorally_control/launch/waypoints'\nout_fname = '/home/simongle/simulation/auto_rally_catkin_ws/src/autorally/autorally_control/launch/waypoints_scaled'\n\n\nwith open(fname) as f:\n content = f.readlines()\n \nnew_content = \"\"\nfor line in content:\n\tline = line.split(',')\n\tline[0] = str(float(line[0]) * 3)\n\tline[1] = str(float(line[1]) * 3)\n\tnew_content += str(line[0]) + ',' + str(line[1]) + '\\n'\n\ncontent = \"\".join(new_content)\n\n#print(content)\n \nwith open(out_fname, 'w') as o:\n\to.write(content)\n","sub_path":"msu_autorally/msu_autorally_helper/src/waypoint_modifier.py","file_name":"waypoint_modifier.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"360783856","text":"def factorial(x):\n\tresult = x\n\tnum = x - 1\n\twhile num > 0:\n\t\tresult *= num\n\t\tnum -= 1\n\treturn result\n\nnum = int(input(\"Calcular o fatural de: \"))\nprint(\"Fatorial de\", num, \"é\", factorial(num))","sub_path":"python/others/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"608929650","text":"import tensorflow as tf\nimport pickle\nimport numpy as np\nimport AlexNet, VGG, Layers\nimport TFRecord\n\ndef openPickle(filename):\n\twith open(filename, 'rb') as file:\n\t\tpkl = pickle.load(file)\n\n\treturn pkl\n\ndef input_img_lab(height, width, channel):\n\twith tf.name_scope('inputs'):\n\t\tx = tf.placeholder(tf.float32, [None, height, width, channel], name='x_input')\n\t\ty = tf.placeholder(tf.int64, [None], name='y_input')\n\t\tkeep_prob = tf.placeholder(tf.float32, name='keep_prob')\n\n\treturn x, y, keep_prob\n\ndef loading_tfrecord(filenames, b_size, train=False):\n\tdataset = tf.data.TFRecordDataset(filenames,compression_type='GZIP')\n\tnew_dataset = dataset.map(TFRecord.parse_function)\n\tnew_dataset = new_dataset.batch(b_size)\n\tif train == True:\n\t\t# new_dataset = new_dataset.shuffle(buffer_size=100)\n\t\tnew_dataset = new_dataset.repeat()\n\titerator = new_dataset.make_one_shot_iterator()\n\t# next_element = iterator.get_next()\n\treturn iterator\n\n\nif __name__ == '__main__':\n\n\tIMAGE_HEIGHT = 300\n\tIMAGE_WIDTH = 333\n\tIMAGE_CHANNEL = 1\n\tMODEL = AlexNet\n\n\tx, y, keep_prob = input_img_lab(IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL)\n\tpredictions = MODEL.inference_op(x, keep_prob)\n\n\tloss = MODEL.loss(predictions, y)\n\ttrain_op = tf.train.AdamOptimizer(0.001).minimize(loss)\n\t\n\tlogits = MODEL.get_logits(predictions)\n\taccuracy = MODEL.predict(logits, y)\n\n\t# training_dataset = read_tfrecord([\"validation1017.tfrecord\"])\n\n\n\t# filename_queue = tf.train.string_input_producer(\n\t# \t['test2.tfrecords'], num_epochs=10)\n\t# images, labels = TFRecord.read_and_decode(filename_queue, IMAGE_HEIGHT, IMAGE_WIDTH)\n\n\ttrainingfile = [\"training7119.tfrecords\", \"test_data2034.tfrecords\"]\n\tnext_training = loading_tfrecord(trainingfile, 32, True)\n\n\tvalidfile = [\"validation1017.tfrecords\"]\n\tnext_valid = loading_tfrecord(validfile, 30)\n\n\ttestfile = [\"test_data2034.tfrecords\"]\n\tnext_test = loading_tfrecord(testfile, 2034)\n\n\tchoose = int(input('1: Training CNN model, 2: Generate CNN model graph >> '))\n\n\t\n\tif (choose == 1):\n\t\twith tf.Session() as sess:\n\t\t\tinit_op = tf.group(tf.global_variables_initializer(),\n\t\t\t\t\t\t\ttf.local_variables_initializer())\n\t\t\t\n\t\t\tsess.run(init_op)\n\t\t\t\n\n\t\t\tcoord = tf.train.Coordinator()\n\t\t\tthreads = tf.train.start_queue_runners(coord=coord)\n\t\t\tnext_train_e = next_training.get_next()\n\n\t\t\tfor i in range(5000):\n\t\t\t\timg, lab = sess.run(next_train_e)\n\t\t\t\t# print(sum(lab))\n\t\t\t\t\n\t\t\t\t_, loss_value = sess.run([train_op, loss], feed_dict={x: img, y: lab, keep_prob:0.5})\n\t\t\t\t# print(sess.run(logits, feed_dict={x: img, y: lab, keep_prob:1.0}))\n\t\t\t\tcount = 0\n\t\t\t\tsum_pred = 0.0\n\t\t\t\tif(i % 10 == 0 and i != 0):\n\t\t\t\t\ttry:\n\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\timg_valid, lab_valid = sess.run(next_valid.get_next())\n\t\t\t\t\t\t\tpred = sess.run(accuracy, feed_dict={x: img_valid, y:lab_valid, keep_prob:0.5})\n\t\t\t\t\t\t\tsum_pred = sum_pred + pred\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\t\t\tnext_valid = loading_tfrecord(validfile, 30)\n\n\t\t\t\t\tprint('loss >> {0}, pred >> {1}'.format(loss_value, sum_pred / float(count)))\n\n\t\t\tcoord.request_stop()\n\t\t\tcoord.join(threads)\n\telif (choose == 2):\n\t\twith tf.Session() as sess:\n\t\t\tinit_op = tf.group(tf.global_variables_initializer(),\n\t\t\t\t\t\t\ttf.local_variables_initializer())\n\t\t\t\n\t\t\tsess.run(init_op)\n\t\t\t\n\t\t\twriter = tf.summary.FileWriter(\"logs/\", sess.graph)\n\telif (choose == 3):\n\t\twith tf.Session() as sess:\n\t\t\tinit_op = tf.group(tf.global_variables_initializer(),\n\t\t\t\t\t\t\ttf.local_variables_initializer())\n\t\t\t\n\t\t\tsess.run(init_op)\n\t\t\tcount = 0\n\t\t\tsum_pred = 0.0\n\t\t\tfor i in range(2):\n\t\t\t\ttry:\n\t\t\t\t\twhile True:\n\t\t\t\t\t\timg_valid, lab_valid = sess.run(next_valid.get_next())\n\t\t\t\t\t\tpred = sess.run(accuracy, feed_dict={x: img_valid, y:lab_valid, keep_prob:0.5})\n\t\t\t\t\t\tsum_pred = sum_pred + pred\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\tprint('count{0} >> pred >> {1}'.format(count, sum_pred / float(count)))\n\t\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\t\tprint('end')\n\t\t\t\t\tnext_valid = loading_tfrecord(validfile, 30)\n\n\t\t\t\tprint('pred >> {0}'.format(sum_pred / float(count)))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"158065741","text":"'''\nSame concept as rotate image\nMove in a clockwise direction\nTime complexity O(mn)\nSpace O(1) -> Since output isn't considered\n'''\nclass Solution:\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n \n row_begin = col_begin = 0\n \n row_end = len(matrix) -1 \n col_end = len(matrix[0]) - 1\n \n answer = []\n while row_begin <= row_end and col_begin <= col_end:\n \n # Move along current top column left -> right\n \n for i in range(col_begin,col_end+1):\n answer.append(matrix[row_begin][i])\n \n # Go to next row\n row_begin += 1\n \n \n # Move along rightmost column top -> bottom\n for i in range(row_begin,row_end+1):\n answer.append(matrix[i][col_end])\n \n # Go to previous column\n col_end -= 1\n \n # These cases might happen before breaking off the loop\n if row_begin <= row_end:\n \n # col_begin -1 because of ranges and exclusiveness\n for i in range(col_end,col_begin-1,-1):\n answer.append(matrix[row_end][i])\n \n # Move a row up since last row has been added\n row_end -= 1\n \n if col_begin <= col_end:\n for i in range(row_end,row_begin-1,-1):\n answer.append(matrix[i][col_begin])\n \n # Move a column ahead because column has been added\n col_begin += 1\n \n return answer\n \n ","sub_path":"LeetCode/FAQ/35-Spiral-Matrix-Medium/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"220946877","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"Reducer program to list all the entries with node_type of question and answer.\"\"\"\nimport os\nimport csv\nimport sys\ndef reducer():\n\treader = csv.reader(sys.stdin, delimiter='\\t')\n\twriter = csv.writer(sys.stdout, delimiter='\\t', quotechar='\"', quoting=csv.QUOTE_ALL)\n\tanswer_count = 0\n\tanswer_total_length = 0\n\tquestion_body_length = 0\n\tcurrent_id = None\n\tfor line in reader:\n\t\tif len(line) == 3:\n\t\t\tpost_id = line[0]\n\t\t\t\"\"\"Control break logic. (1) Write Answer Length Average and Question Length. (2) Reset all counts and totals\"\"\"\n\t\t\tif current_id is None or post_id != current_id:\n\t\t\t\tif not current_id is None:\n\t\t\t\t\tif answer_count == 0:\n\t\t\t\t\t\taverage_answer_length = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\taverage_answer_length = float(answer_total_length)/float(answer_count)\t\t\t\t\t\t\n\t\t\t\t\twriter.writerow([current_id, question_body_length, average_answer_length])\t\n\t\t\t\tanswer_count = 0\n\t\t\t\tanswer_total_length = 0\n\t\t\t\tquestion_body_length = 0\n\t\t\t\tcurrent_id = post_id\n\t\t\t\t\n\t\t\tnode_type = line[1]\n\t\t\tbody_length = float(line[2])\n\t\t\tif node_type == \"question\":\n\t\t\t\tquestion_body_length = body_length\n\t\t\telse:\n\t\t\t\tanswer_count += 1\n\t\t\t\tanswer_total_length += body_length\n\t\t\t\t\n\tif answer_count == 0:\n\t\twriter.writerow([current_id, question_body_length, \"0\"])\n\telse:\n\t\twriter.writerow([current_id, question_body_length,float(answer_total_length)/float(answer_count)])\n\t\ndef main():\n\treducer()\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","sub_path":"extras/avg_answer_length_reducer.py","file_name":"avg_answer_length_reducer.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"496064371","text":"from random import randint\nfrom os import getenv\nfrom dotenv import load_dotenv\nimport logging\nfrom aiogram import Bot, Dispatcher, executor, types, exceptions\nfrom asyncio import sleep\nfrom peewee import *\nfrom playhouse.db_url import connect\nimport time\n\nload_dotenv()\nbot = Bot(token=getenv('TG_TOKEN'))\ndp = Dispatcher(bot)\ndb = connect(getenv('DATABASE_URL'))\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger('broadcast')\nmsg_counter = 0\nglobal MSG_PER_SECOND\nMSG_PER_SECOND = 5\n\nclass User(Model):\n id = IntegerField(null=False, unique=True, primary_key=True)\n nickname = CharField(null=False, unique=True, max_length=16)\n class Meta:\n database = db\n db_table = 'users'\n\n\nasync def msg_counter_reset():\n global msg_counter\n while True:\n await sleep(1)\n msg_counter = 0\n\n\nasync def send_message(user_id: int, text: str, disable_notif: bool=False):\n global msg_counter\n while msg_counter > MSG_PER_SECOND:\n print('Too many msgs!')\n log.warning('Too many msgs!')\n await sleep(0.1)\n msg_counter += 1\n try:\n await bot.send_message(user_id, text, \n disable_notification=disable_notif)\n except exceptions.BotBlocked:\n log.error(f\"Target [ID:{user_id}]: blocked by user\")\n except exceptions.ChatNotFound:\n log.error(f\"Target [ID:{user_id}]: invalid user ID\")\n except exceptions.RetryAfter as e:\n log.error(f\"Target [ID:{user_id}]: Flood limit is exceeded.\" +\n \"Sleep {e.timeout} seconds.\")\n await sleep(e.timeout)\n return await send_message(user_id, text, disable_notif)\n except exceptions.UserDeactivated:\n log.error(f\"Target [ID:{user_id}]: user is deactivated\")\n except exceptions.TelegramAPIError:\n log.exception(f\"Target [ID:{user_id}]: failed\")\n else:\n return True\n return False\n\n\n@dp.message_handler(commands=['flood']) # test sender\nasync def flood(message: types.Message):\n start = time.time()\n args = message.text.split()\n if len(args) > 2 and args[2].isdigit():\n MSG_PER_SECOND = args[2]\n if len(args) >= 2 and args[1].isdigit():\n for i in range(int(args[1])):\n await send_message(message.from_user.id, str(i))\n total_time = time.time() - start\n print(f'Общее время: {total_time}')\n print(f'Сообщений в секунду: {(int(args[1]))/total_time}')\n\n\n@dp.message_handler(commands=['sleep']) # TMP TEST\nasync def sleeping(message: types.Message):\n for i in range(30, 0, -10):\n await message.answer(i)\n await sleep(10)\n await message.answer(0)\n\n\n@dp.message_handler(commands=['start'])\nasync def start(message: types.Message):\n id = message.from_user.id\n if len(message.text.split()) > 2: # tmp to test db\n try:\n id = int(message.text.split()[1])\n except:\n message.answer('Error!')\n return\n reply = ''\n user = User.select().where(User.id == id)\n if user.exists():\n await send_message(message.from_user.id, f'{user.get().nickname}, ' +\n 'you are already exist in db!')\n else:\n if len(message.text.split()) > 2: # tmp to test db\n try:\n nickname = ''.join(message.text.split()[2:])[:16]\n except:\n nickname = nickname_generator('Player') # end test\n elif type(message.from_user.username) is str:\n nickname = message.from_user.username[:16]\n elif type(message.from_user.first_name) is str:\n nickname = message.from_user.first_name[:16]\n elif type(message.from_user.last_name) is str:\n nickname = message.from_user.last_name[:16]\n else:\n nickname = nickname_generator('Player')\n user = User.select().where(User.nickname == nickname)\n if user.exists():\n reply += f'{nickname}, your name has already been taken.\\n'\n nickname = nickname_generator(nickname)\n reply += f'We will call you {nickname}.\\n'\n User.create(id=id, nickname=nickname)\n await send_message(message.from_user.id, reply+ f'Hello, {nickname}!')\n\n\n@dp.message_handler(commands=['rename'])\nasync def rename(message: types.Message):\n await message.answer('WIP...') # TO DO\n\n\n@dp.message_handler(commands=['roll']) # tmp\nasync def roll(message: types.Message):\n await message.answer('🎲 ' + str(randint(1, 6)))\n\n\n@dp.message_handler(commands=['whoiam']) # userstat\nasync def whoami(message: types.Message):\n userinfo = 'id: ' + str(message.from_user.id)\n if type(message.from_user.username) is str:\n userinfo += '\\n' + 'Nickname: ' + message.from_user.username\n if type(message.from_user.first_name) is str:\n userinfo += '\\n' + 'F.Name: ' + message.from_user.first_name\n if type(message.from_user.last_name) is str:\n userinfo += '\\n' + 'L.Name: ' + message.from_user.last_name\n await message.answer(userinfo)\n\n\n@dp.message_handler(commands=['db']) # test\nasync def print_db(message: types.Message):\n text = ''\n for user in User.select():\n text += str(user.id) + ' ' + user.nickname + '\\n'\n await message.answer(text)\n\n\n@dp.message_handler(commands=['remove']) # test\nasync def db_remove(message: types.Message):\n try:\n id_list = [int(i) for i in message.text.split()[1:]]\n for id in id_list:\n user = User.select().where(User.id == id)\n if user.exists():\n user.get().delete_instance()\n except:\n await message.answer('Error!')\n await print_db(message)\n\n\n@dp.message_handler(commands=['w']) # test, i think\nasync def whisper(message: types.Message):\n if len(message.text.split()) < 3:\n await message.answer('Usage: /w username message')\n return\n await bot.delete_message(chat_id=message.chat.id,\n message_id=message.message_id)\n input_text = message.text.split()[1:]\n target = User.select().where(User.nickname == input_text[0])\n if target.exists():\n target = target.get().id\n sender = User.get(User.id == message.from_user.id).nickname\n text_to_send = sender +': ' + ' '.join(input_text[1:])\n try:\n await bot.send_message(chat_id=target, text=text_to_send)\n await message.answer(text_to_send)\n except:\n await message.answer('Error :(\\nTarget user stoped the bot?')\n else:\n await message.answer('User not found.')\n\n\n@dp.message_handler() # test?\nasync def echo(message: types.Message):\n await message.answer(message.text)\n\n\ndef nickname_generator(nickname):\n counter = 1\n check_name = User.select().where(User.nickname == nickname + str(counter))\n while check_name.exists():\n counter += 1\n if len(nickname + str(counter)) > 16:\n return nickname_generator('Player')\n check_name = User.select().where(User.nickname == nickname\n + str(counter))\n return nickname + str(counter)\n\n\nif __name__ == '__main__':\n dp.loop.create_task(msg_counter_reset())\n executor.start_polling(dp)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"393724993","text":"# -*- coding: utf8 -*-\n\n#######################################################\n# OpenTSDB telnet session in a TLS connection example #\n#######################################################\n\nfrom __future__ import print_function, unicode_literals\nfrom socket import socket, AF_INET, SOCK_STREAM, SHUT_RDWR\nfrom ssl import wrap_socket, PROTOCOL_TLSv1\n\n# Place your token here\ntoken_id = 'w3eaaaqaa7pff'\ntoken_key = 'xyz123xyz123xyz123xyz123'\nopentsdb_host = 'opentsdb.iot.runabove.io'\nopentsdb_port = 4243\n\n# Create socket\nclient_socket = socket(AF_INET, SOCK_STREAM)\nclient_socket.settimeout(1)\n\ntls_client = wrap_socket(client_socket, ssl_version=PROTOCOL_TLSv1)\n\nprint('Opening connection')\n# Connect to the echo server\ntls_client.connect((opentsdb_host, opentsdb_port))\n\nprint('Authenticating')\n# Send auth command\ntls_client.send(('auth {}:{}\\n'.format(token_id, token_key)).encode('utf-8'))\n\n# Read received data\ndata_in = tls_client.recv(1024)\n\n# Decode and print message\nresponse = data_in.decode()\nprint('Read response: ' + response)\n\nif response == 'ok\\n':\n # Send data (replace metric, timestamp, data and tags with your own values)\n print('Sending data')\n tls_client.send(b'put home.temp.indoor 1437591600 22.5 source=dht22\\n')\n tls_client.send(b'put home.temp.outdoor 1437591600 28.2 source=ds18b20\\n')\n tls_client.send(b'put home.temp.indoor 1437593400 22.1 source=dht22\\n')\n tls_client.send(b'put home.temp.outdoor 1437593400 27.1 source=ds18b20\\n')\n print('Data sent')\nelse:\n print('Auth failed, not sending data.')\n\n# Send exit command to close connection\ntls_client.send(b'exit\\n')\n\n# Close the socket\ntls_client.shutdown(SHUT_RDWR)\ntls_client.close()\n","sub_path":"Python/python-client-telnet.py","file_name":"python-client-telnet.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"548130963","text":"from typing import List\nfrom typing import Optional\n\nFILENAME = \"input.txt\"\n\n\ndef read_file(path: str) -> List[int]:\n with open(path) as f:\n i = [int(i) for i in f]\n return sorted(i)\n\n\ndef find_nums(nums: List[int]) -> Optional[int]:\n l = 0\n r = len(nums) - 1\n while l < r:\n numsum = nums[l] + nums[r]\n if numsum == 2020:\n print(f\"Found: {nums[l]} and {nums[r]} at indexes {l} and {r}\")\n return nums[l] * nums[r]\n elif numsum > 2020:\n r -= 1\n else:\n l += 1\n return None\n\n\ndef main():\n nums = read_file(FILENAME)\n print(find_nums(nums))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"day1/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"362535935","text":"box1 = BitBox()\nitems = []\nfor i in range(20):\n item=Item(str(i), i)\n items.append(item)\nbox1.add(items)\n\nitems = []\nbox2 = BitBox()\nfor i in range(9):\n item=Item(str(i), i)\n items.append(item)\nbox2.add(items)\nitems = []\nbox3 = BitKit()\nfor i in range(5):\n item=Item(str(i), i)\n items.append(item)\nbox3.add(items)\n\nrepack_boxes(box1, box2, box3)\n\nprint(box1.count())\nprint(box2.count())\nprint(box3.count())","sub_path":"Box.py","file_name":"Box.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"387853506","text":"from trex_stl_lib.api import *\n\n# 1 clients MAC override the LSB of destination\nclass STLS1(object):\n\n def __init__ (self):\n self.fsize =64; # the size of the packet \n\n\n def create_stream (self):\n\n # create a base packet and pad it to size\n size = self.fsize - 4; # no FCS\n base_pkt = Ether()/IP(src=\"16.0.0.1\",dst=\"48.0.0.1\")/UDP(dport=12,sport=1025)\n base_pkt1 = Ether()/IP(src=\"16.0.0.2\",dst=\"48.0.0.1\")/UDP(dport=12,sport=1025)\n base_pkt2 = Ether()/IP(src=\"16.0.0.3\",dst=\"48.0.0.1\")/UDP(dport=12,sport=1025)\n pad = max(0, size - len(base_pkt)) * 'x'\n\n\n return STLProfile( [ STLStream( isg = 10.0, # star in delay \n name ='S0',\n packet = STLPktBuilder(pkt = base_pkt/pad),\n mode = STLTXSingleBurst( pps = 10, total_pkts = 1),\n next = 'S1'), # point to next stream \n\n STLStream( self_start = False, # stream is disabled enable trow S0\n name ='S1',\n packet = STLPktBuilder(pkt = base_pkt1/pad),\n mode = STLTXSingleBurst( pps = 10, total_pkts = 2),\n next = 'S2' ),\n\n STLStream( self_start = False, # stream is disabled enable trow S0\n name ='S2',\n packet = STLPktBuilder(pkt = base_pkt2/pad),\n mode = STLTXSingleBurst( pps = 10, total_pkts = 3 ),\n action_count = 2, # loop 2 times \n next = 'S0' # back to S0 loop\n )\n ]).get_streams()\n\n\n def get_streams (self, direction = 0, **kwargs):\n # create 1 stream \n return self.create_stream() \n\n\n# dynamic load - used for trex console or simulator\ndef register():\n return STLS1()\n\n\n\n\n","sub_path":"trex_client/stl/profiles/burst_3st_loop_x_times.py","file_name":"burst_3st_loop_x_times.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"638144671","text":"#File: HW3 hw3_part3.py\n#Author: Caleb Massey\n#Date: 9/16/15\n#Section: 23\n#Email: cmassey1@umbc.edu\n#Description: This takes inputs from the user and outputs the medical diagnosis.\n\ndef main():\n#This is where the user inputs which symptoms they have and depending on what\n# symptoms they have it will ask for other symptoms, and then print the \n# diagnosis.\n fever = input(\"Do you have a fever? (y/n) \")\n if fever == 'y':\n rash = input (\"Do you have a rash? (y/n) \")\n if rash == 'y':\n print (\"You have Measles\")\n else:\n earHurt = input(\"Does your ears hurt? (y/n) \")\n if earHurt == 'y':\n print(\"You have an Ear Infection\")\n else:\n print(\"You have the Flu\")\n else:\n stuffNose = input(\"Is your nose stuffy? (y/n) \")\n if stuffNose == 'y':\n print(\"You have a Head Cold\")\n else:\n print(\"You have Hypochondriac\")\nmain()\n","sub_path":"Computer Science 201 (Fall 2015)/HW3/hw3_part3.py","file_name":"hw3_part3.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"479955897","text":"import logging\nimport signal\nimport sys\nimport time\n\nimport gevent\nimport zmq.green as zmq\nfrom gevent.queue import JoinableQueue, Queue\n\nfrom job_runner_worker.cleanup import reset_incomplete_runs\nfrom job_runner_worker.config import config\nfrom job_runner_worker.enqueuer import enqueue_actions\nfrom job_runner_worker.events import publish\nfrom job_runner_worker.worker import execute_run, kill_run\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef run():\n \"\"\"\n Start consuming runs and executing them.\n \"\"\"\n context = zmq.Context(1)\n\n greenlets = []\n reset_incomplete_runs()\n concurrent_jobs = config.getint('job_runner_worker', 'concurrent_jobs')\n\n run_queue = Queue()\n kill_queue = Queue()\n event_queue = Queue()\n exit_queue = JoinableQueue()\n event_exit_queue = Queue()\n\n greenlets.append(\n gevent.spawn(\n enqueue_actions,\n context,\n run_queue,\n kill_queue,\n event_queue,\n exit_queue,\n )\n )\n\n for x in range(concurrent_jobs):\n greenlets.append(gevent.spawn(\n execute_run,\n run_queue,\n event_queue,\n exit_queue,\n ))\n\n greenlets.append(gevent.spawn(\n kill_run, kill_queue, event_queue, exit_queue))\n greenlets.append(gevent.spawn(\n publish, context, event_queue, event_exit_queue))\n\n def terminate_callback(*args, **kwargs):\n logger.warning('Worker is going to terminate!')\n for i in range(len(greenlets) - 1):\n # we don't want to kill the event greenlet, since we want to\n # publish events of already running jobs\n exit_queue.put(None)\n\n signal.signal(signal.SIGTERM, terminate_callback)\n\n for greenlet in greenlets[:-1]:\n greenlet.join()\n\n # now terminate the event queue\n event_exit_queue.put(None)\n greenlets[-1].join()\n sys.exit('Worker terminated')\n","sub_path":"job_runner_worker/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"4567815","text":"import sys\nimport os\nimport math\n\n\"\"\"\n Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:\n\n Integers in each row are sorted from left to right.\n The first integer of each row is greater than the last integer of the previous row.\n For example,\n\n Consider the following matrix:\n\n [\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n ]\n Given target = 3, return true.\n\"\"\"\ndef search_matrix(mtx, k):\n m = len(mtx)\n if not m:\n return False\n\n n = len(mtx[0])\n if not n:\n return False\n\n i = 0\n j = n - 1\n while i < m and j >= 0:\n if mtx[i][j] == k:\n return True\n elif mtx[i][j] > k:\n j -= 1\n else:\n i += 1\n return False\n\n\n#mtx = [\n# [1, 3, 5, 7],\n# [10, 11, 16, 20],\n# [23, 30, 34, 50]\n#]\n#mtx = [[1], [3]]\n#print search_matrix(mtx, 3)\n","sub_path":"Py_leetcode/074_searchMatrix.py","file_name":"074_searchMatrix.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"239067669","text":"#!/bin/python\n\nimport sys\nimport requests\nimport json\nimport base64\n\n# Global session identifiers\ntok = \"\"\nusr = \"\"\npwd = \"\"\n\ndef do_status():\n\tprint(\"Logged in as: \" + usr)\n\tprint(\"Token: \" + tok)\n\ndef do_create(u, p):\n\t#print(\"create \" + u + \", \" + p)\n\turl \t= 'http://memorydrop.me:2222/_create'\n\tpayload = {\"usr\": u, \"pwd\": p}\n\tr = requests.post(url, data=json.dumps(payload))\n\tif r.status_code != 200:\n\t\tprint(\"Sorry. User already exists.\")\n\t\treturn\n\tglobal tok\n\tglobal usr\n\tglobal pwd\n\ttok = r.text\n\tusr = u\n\tpwd = p\n\tprint(\"Success. User created and logged in.\")\n\ndef do_login(u, p):\n\t# handle logging out current user somehow.\n\tprint(\"login \" + u + \", \" + p)\n\turl = 'http://memorydrop.me:2222/_login'\n\tpayload = {\"usr\": u, \"pwd\": p}\n\tr = requests.post(url, json.dumps(payload))\n\tif r.status_code != 200:\n\t\tprint(\"Login Failed. Bad username or password\")\n\t\treturn\n\tglobal tok\n\tglobal usr\n\tglobal pwd\n\ttok = r.text\n\tusr = u\n\tpwd = p\n\tprint(\"Success. Logged in.\")\n\ndef do_logout():\n\tglobal tok\n\tglobal usr\n\tglobal pwd\n\tif tok == \"null\":\n\t\tprint(\"No logged in user.\")\n\t\treturn\n\turl = 'http://memorydrop.me:2222/_logout'\n\tpayload = {\"usr\": usr, \"tok\": tok}\n\tr = requests.post(url, json.dumps(payload))\n\tif r.status_code != 200:\n\t\tprint(\"Logout failed. Bad username or token.\")\n\t\treturn\n\ttok = \"null\"\n\tusr = \"null\"\n\tpwd = \"null\"\n\tprint(\"Success. Logged out.\")\n\ndef do_drop(infile, f_type, lon, lat):\n\tglobal tok\n\tglobal usr\n\tglobal pwd\n\t#print(\"drop: \" + infile + \" (\" + f_type + \") at \" + location + \".\");\n\tif tok == \"null\":\n\t\tprint(\"Have to be logged in to drop.\")\n\t\treturn\n\t# open the file for reading\n\ttry:\n\t\twith open(infile, \"rb\") as f:\n\t\t\tdata = f.read()\n\t\tf.closed\n\texcept IOError:\n\t\tprint(\"Error reading file.\")\n\t\treturn\n\turl = 'http://memorydrop.me:2222/_drop'\n\tdata = base64.b64encode(data).decode(\"utf-8\")\n \n\tpayload = {\"usr\":usr, \"tok\":tok, \"lon\":float(lon), \"lat\":float(lat), \"type\":f_type, \"data\":data}\n\tr = requests.post(url, data=json.dumps(payload))\n\tif r.status_code != 200:\n\t\tprint(\"Error dropping.\")\n\t\treturn\n\tprint(\"Success. Dropped.\")\n\ndef do_usrlist():\n\tglobal tok\n\tglobal usr\n\tglobal pwd\n\tif tok == \"null\":\n\t\tprint(\"Have to be logged in to get drops.\")\n\t\treturn\n\turl = 'http://memorydrop.me:2222/_usrlist'\n\tpayload = {\"usr\": usr, \"tok\": tok}\n\tr = requests.post(url, data=json.dumps(payload))\n\tif r.status_code != 200:\n\t\tprint(\"Error getting your drops\")\n\t\treturn\n\tdrops = r.json()\n\tif len(drops) == 0:\n\t\tprint(\"Looks like you have no drops...\");\n\t\treturn\n\tprint(\"Your Drops:\")\n\tfor i in range (len(drops)):\n\t\tprint(\"drop #: \" + str(i))\n\t\tprint(drops[i])\n\twhile 1:\n\t\tgetdrop = input(\"Select a drop to get or c to cancel: \")\n\t\tif getdrop == 'c':\n\t\t\tprint(\"okeydoke\")\n\t\t\treturn\n\t\tif int(getdrop) < len(drops):\n\t\t\tprint(\"Get:\")\n\t\t\tgetdrop = drops[int(getdrop)]\n\t\t\tdo_pickup(getdrop['dataid'], getdrop['type'])\n\t\t\treturn\n\t\telse:\n\t\t\tprint(\"Not a valid drop.\")\n\t\t\tcontinue\n\ndef do_pickup(dropid, d_type):\n\tglobal tok\n\tglobal usr\n\turl = 'http://memorydrop.me:2222/_pickup'\n\tpayload = {'usr':usr, 'tok':tok, 'dataid':dropid}\n\tr = requests.post(url, data=json.dumps(payload))\n\tif r.status_code != 200:\n\t\tprint(\"Drop pickup failed.\")\n\t\treturn\n\tdrop = base64.b64decode(r.content)\n\tif (d_type == \"text\"):\n\t\tprint(drop)\n\twith open(dropid, \"wb\") as outfile:\n\t\toutfile.write(drop)\n\toutfile.closed\n\tprint(\"Pickup Success! Written to HD as \" + dropid)\n\ndef do_loclist(lon, lat):\n\tprint(\"Experimental....\");\n\tglobal tok\n\tglobal usr\n\tglobal pwd\n\tif tok == \"null\":\n\t\tprint(\"Have to be logged in to get drops.\")\n\t\treturn\n\turl = 'http://memorydrop.me:2222/_loclist'\n\tpayload = {\"usr\": usr, \"tok\": tok, \"lon\":float(lon), \"lat\":float(lat)}\n\tr = requests.post(url, data=json.dumps(payload))\n\tif r.status_code != 200:\n\t\tprint(\"Error getting the drops\")\n\t\treturn\n\tdrops = r.json()\n\tif len(drops) == 0:\n\t\tprint(\"Sorry no drops nearby\")\n\t\treturn\n\tprint(\"Drops within 20m of :\")\n\tfor i in range (len(drops)):\n\t\tprint(\"drop #: \" + str(i))\n\t\tprint(drops[i])\n\twhile 1:\n\t\tgetdrop = input(\"Select a drop to get (or c to cancel): \")\n\t\tif getdrop == 'c':\n\t\t\tprint(\"okeydoke\")\n\t\t\treturn\n\t\tif int(getdrop) < len(drops):\n\t\t\tprint(\"Get:\")\n\t\t\tgetdrop = drops[int(getdrop)]\n\t\t\tdo_pickup(getdrop['dataid'], getdrop['type'])\n\t\t\treturn\n\t\telse:\n\t\t\tprint(\"Not a valid drop.\")\n\t\t\tcontinue\n\ndef do_help():\n\tprint(\"MemoryDrop Interactive Shell:\")\n\tprint(\"Available commands:\")\n\tprint(\" create -- Creates a new account. Requires username and password\")\n\tprint(\" login -- Logs in as a user. Requires username and password.\")\n\tprint(\" logout -- Logs out the logged in user.\")\n\tprint(\" drop -- Drop a memory. Requires file, filetype and location\")\n\tprint(\" usrlist -- Get a list of your drops, select one to download.\")\n\tprint(\" pickup -- Pickup a specific drop. Requires a dropid.\")\n\tprint(\" loclist -- Experimental. Get a list of drops by location.\")\n\nif __name__ == '__main__':\n\n\tprint (\"+-------------------------------------------+\")\n\tprint (\"| MemoryDrop |\")\n\tprint (\"| Create an account or login to get started |\")\n\tprint (\"| type 'help' for available commands |\")\n\tprint (\"+-------------------------------------------+\")\n\n\twhile (1):\n\t\top = input(\"MD: \").split(' ')\n\t\tif (op[0] == \"create\"):\n\t\t\tif len(op) != 3:\n\t\t\t\tprint (\"Need username and password.\")\n\t\t\t\tcontinue\n\t\t\tdo_create(op[1], op[2])\n\n\t\tif (op[0] == \"login\"):\n\t\t\tif len(op) != 3:\n\t\t\t\tprint (\"Need username and password.\")\n\t\t\t\tcontinue\n\t\t\tdo_login(op[1], op[2])\n\n\t\tif (op[0] == \"logout\"):\n\t\t\tdo_logout()\n\n\t\tif (op[0] == \"drop\"):\n\t\t\tif len(op) != 5:\n\t\t\t\tprint(\"Need file, type, and location (lon, lat).\")\n\t\t\t\tcontinue\n\t\t\tdo_drop(op[1], op[2], op[3], op[4])\n\n\t\tif (op[0] == \"loclist\"):\n\t\t\tif len(op) != 3:\n\t\t\t\tprint(\"Need a location (lon, lat)\")\n\t\t\t\tcontinue\n\t\t\tdo_loclist(op[1], op[2])\n\n\t\tif (op[0] == \"usrlist\"):\n\t\t\tdo_usrlist()\n\n\t\tif (op[0] == \"pickup\"):\n\t\t\tif len(op) != 2:\n\t\t\t\tprint(\"Need dropid.\")\n\t\t\t\tcontinue\n\t\t\tdo_pickup(op[1])\n\n\t\tif (op[0] == \"?\"):\n\t\t\tdo_status()\n\n\t\tif (op[0] == \"help\"):\n\t\t\tdo_help()\n\n\t\tif (op[0] == \"exit\"):\n\t\t\tprint(\"peace out.\")\n\t\t\tsys.exit()\n\n\tprint(\"bad argument.\")\n\tsys.exit()\n","sub_path":"md.py","file_name":"md.py","file_ext":"py","file_size_in_byte":6099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"590327111","text":"# encoding:utf-8\n\n# 分析游戏中文资源的情况\n\nimport os\nimport sys\nimport re\nimport string\nfrom contextlib import contextmanager\nimport json\n\nsplit_res_info_file_name = \"res_split_info.json\"\n\nroot_project_path = \"\"\nroot_project_path_res = \"\"\n\ndef LOG(temp_str):\n temp_str = temp_str.decode('utf-8')\n print(temp_str)\n\ndef specificate_path(source_path):\n source_path = source_path.replace('\\\\', '/')\n return source_path\n\nsub_path_pattern_str = r\"/res/((.*)\\.(.*))\"\nsub_path_pattern = re.compile(sub_path_pattern_str)\n\ndef is_file_exist_in_hy(file_path):\n sub_path_in_res = sub_path_pattern.findall(file_path)\n if sub_path_in_res and len(sub_path_in_res) > 0:\n total_path = os.path.join(root_project_path_res, \"res_hy\", sub_path_in_res[0][0])\n total_path = specificate_path(total_path)\n return os.path.exists(total_path)\n return False\n\ndef is_file_exist_in_wy(file_path):\n sub_path_in_res = sub_path_pattern.findall(file_path)\n if sub_path_in_res and len(sub_path_in_res) > 0:\n total_path = os.path.join(root_project_path_res, \"res_wy\", sub_path_in_res[0][0])\n total_path = specificate_path(total_path)\n return os.path.exists(total_path)\n return False\n\nchinese_res_count = 0\ndef handle_file(file_path):\n if is_file_exist_in_wy(file_path) and is_file_exist_in_hy(file_path):\n if is_file_in_split_res_package(file_path):\n pass\n else:\n global chinese_res_count\n chinese_res_count += 1 \n LOG(\"中文资源:\" + file_path)\n \n\ndef traverse_all_res_in_folder(folder_path, filter_folder):\n folder_path = specificate_path(folder_path)\n list = os.listdir(folder_path)\n for i in range(0, len(list)):\n if list[i] not in filter_folder:\n sub_path = os.path.join(folder_path, list[i])\n sub_path = specificate_path(sub_path)\n if os.path.isdir(sub_path):\n traverse_all_res_in_folder(sub_path, [])\n else:\n handle_file(sub_path)\n\ndef out_put_all_info():\n pass\n\nall_split_res_map = {}\n\ndef is_file_in_split_res_package(file_path):\n if file_path in all_split_res_map:\n return True\n folder_path = os.path.dirname(file_path)\n if folder_path in all_split_res_map:\n return True\n for split_res_key in all_split_res_map:\n if string.find(file_path, split_res_key) == 0:\n return True\n return False\n\ndef get_all_split_res_info_path():\n split_res_path = os.path.join(root_project_path_res, split_res_info_file_name)\n split_res_path = specificate_path(split_res_path)\n\n split_res_file = open(split_res_path)\n file_context = split_res_file.read()\n json_context = json.loads(file_context)\n\n for split_res_name in json_context:\n split_res_info = json_context[split_res_name]\n for sub_path in split_res_info:\n total_path = os.path.join(root_project_path_res, sub_path)\n total_path = specificate_path(total_path)\n all_split_res_map[total_path] = split_res_name\n\ndef main():\n global root_project_path\n root_project_path = sys.argv[1]\n root_project_path = specificate_path(root_project_path)\n assert os.path.isdir(root_project_path)\n\n global root_project_path_res\n root_project_path_res = os.path.join(root_project_path, 'res')\n root_project_path_res = specificate_path(root_project_path_res)\n\n gui_res_path = os.path.join(root_project_path_res, 'gui')\n gui_res_path = specificate_path(gui_res_path)\n\n get_all_split_res_info_path()\n traverse_all_res_in_folder(gui_res_path, ['template', 'ani_template'])\n\n LOG('中文资源个数 ' + str(chinese_res_count))\n out_put_all_info()\n\n LOG(\"分析完成\" + \"\\n\")\n \nif __name__ == \"__main__\":\n main()","sub_path":"分析资源工具/analysis_res_chinese_res.py","file_name":"analysis_res_chinese_res.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"445484722","text":"import tornado.web\nfrom app.domain.deployment import Deployment\nfrom app.database import deployment_db\nimport json\n\n\nclass AbilityApi(tornado.web.RequestHandler):\n\n async def get(self, *args, **kwargs):\n uuid = self.get_argument('uuid', None)\n result = await deployment_db.get_deployments_by_uuid(uuid)\n\n if len(result) > 0:\n deployment = Deployment()\n deployment.__dict__ = result[0]\n deployment.callCount += 1\n await deployment_db.update_deployment_call_count(deployment)\n\n self.write(json.dumps(result))\n","sub_path":"umm-python/app/web/rest/ability_api.py","file_name":"ability_api.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"156413705","text":"import torch\n\n\nclass Sequential(torch.nn.Module):\n \"\"\"An equivalent of the nn.Sequential class that allows the extraction\n of the necessary information to reconstruct the per-example gradients.\n The attributes 'outputs_grad' and 'inputs' contain the required\n information and can be used to construct and maintain a ReduceVar\n object.\n\n Arguments:\n layers (list): a list of the layers of the neural network, in the order\n of their application.\n activations (list): a list of the activation functions that should be\n applied after each layer, in the order of their application.\n \"\"\"\n def __init__(self, layers, activations):\n super().__init__()\n if len(layers) != len(activations):\n raise ValueError('unequal number of layers and activations')\n\n self.layers = layers\n self.activations = activations\n\n for idx, layer in enumerate(layers):\n self.add_module(str(idx), layer)\n\n self.inputs = list()\n self.outputs_grad = list()\n\n def hook(self, grad):\n self.outputs_grad = [grad.detach()] + self.outputs_grad\n\n def forward(self, input, cache=False):\n self.clear()\n\n self.inputs.append(input)\n for (layer, activation) in zip(self.layers, self.activations):\n input = layer(input)\n if input.requires_grad and cache:\n input.register_hook(self.hook)\n input = activation(input)\n if input.requires_grad and cache:\n self.inputs.append(input.detach())\n self.inputs.pop()\n return input\n\n def clear(self):\n self.inputs.clear()\n self.outputs_grad.clear()\n","sub_path":"torch_vr/sequential.py","file_name":"sequential.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"653223824","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport pickle\n\nimport numpy as np\nimport pandas as pd\n\nimport scipy.sparse as sp\n\ndef data_iterator(data, batch_size):\n\t\"\"\"\n\tA simple data iterator from https://indico.io/blog/tensorflow-data-inputs-part1-placeholders-protobufs-queues/\n\t:param data: list of numpy tensors that need to be randomly batched across their first dimension.\n\t:param batch_size: int, batch_size of data_iterator.\n\tAssumes same first dimension size of all numpy tensors.\n\t:return: iterator over batches of numpy tensors\n\t\"\"\"\n\t# shuffle labels and features\n\tmax_idx = len(data[0])\n\tidxs = np.arange(0, max_idx)\n\tnp.random.shuffle(idxs)\n\tshuf_data = [dat[idxs] for dat in data]\n\n\t# Does not yield last remainder of size less than batch_size\n\tfor i in range(max_idx//batch_size):\n\t\tdata_batch = [dat[i*batch_size:(i+1)*batch_size] for dat in shuf_data]\n\t\tyield data_batch\n\n\ndef map_data(data):\n\t\"\"\"\n\tMap data to proper indices in case they are not in a continues [0, N) range\n\n\tParameters\n\t----------\n\tdata : np.int32 arrays\n\n\tReturns\n\t-------\n\tmapped_data : np.int32 arrays\n\tn : length of mapped_data\n\n\t\"\"\"\n\tuniq = list(set(data))\n\n\tid_dict = {old: new for new, old in enumerate(sorted(uniq))}\n\tdata = np.array(map(lambda x: id_dict[x], data))\n\tn = len(uniq)\n\n\treturn data, id_dict, n\n\n\ndef load_data(fname, seed=1234, verbose=True):\n\t\"\"\" Loads dataset and creates adjacency matrix\n\tand feature matrix\n\n\tParameters\n\t----------\n\tfname : str, dataset\n\tseed: int, dataset shuffling seed\n\tverbose: to print out statements or not\n\n\tReturns\n\t-------\n\tnum_users : int\n\t\tNumber of users and items respectively\n\n\tnum_items : int\n\n\tu_nodes : np.int32 arrays\n\t\tUser indices\n\n\tv_nodes : np.int32 array\n\t\titem (movie) indices\n\n\tratings : np.float32 array\n\t\tUser/item ratings s.t. ratings[k] is the rating given by user u_nodes[k] to\n\t\titem v_nodes[k]. Note that that the all pairs u_nodes[k]/v_nodes[k] are unique, but\n\t\tnot necessarily all u_nodes[k] or all v_nodes[k] separately.\n\n\tu_features: np.float32 array, or None\n\t\tIf present in dataset, contains the features of the users.\n\n\tv_features: np.float32 array, or None\n\t\tIf present in dataset, contains the features of the users.\n\n\tseed: int,\n\t\tFor datashuffling seed with pythons own random.shuffle, as in CF-NADE.\n\n\t\"\"\"\n\n\tu_features = None\n\tv_features = None\n\n\tprint('Loading dataset', fname)\n\n\tdata_dir = 'data/' + fname\n\n\n\tif fname == 'dataset_1':\n\t\tstore_repo = 'data/dti_store/'\n\n\t\twith open(store_repo + 'graph_1.pkl') as f:\n\t\t\tgraph_info = pickle.load(f)\n\n\t\tnum_users, num_items, u_nodes_ratings, \\\n\t\tv_nodes_ratings, ratings, u_features, \\\n\t\tv_features = graph_info\n\n\telif fname == 'dataset_2':\n\t\tstore_repo = 'data/dti_store/'\n\n\t\twith open(store_repo + 'graph_2.pkl') as f:\n\t\t\tgraph_info = pickle.load(f)\n\n\t\tnum_users, num_items, u_nodes_ratings, \\\n\t\tv_nodes_ratings, ratings, u_features, \\\n\t\tv_features = graph_info\n\n\telse:\n\t\traise ValueError('Dataset name not recognized: ' + fname)\n\n\tif verbose:\n\t\tprint('Number of users = %d' % num_users)\n\t\tprint('Number of items = %d' % num_items)\n\t\tprint('Number of links = %d' % ratings.shape[0])\n\t\tprint('Fraction of positive links = %.4f' % (float(ratings.shape[0]) / (num_users * num_items),))\n\n\treturn num_users, num_items, u_nodes_ratings, v_nodes_ratings, ratings, u_features, v_features","sub_path":"build/lib/dti_gcmc/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"506164497","text":"# _*_ coding:utf-8 _*_\n# @File : multipart.py\n# @Time : 2020-07-21 15:42\n# @Author: zizle\nfrom PySide2.QtNetwork import QHttpPart, QHttpMultiPart, QNetworkRequest\nfrom PySide2.QtCore import QFileInfo\n\n\ndef generate_multipart_data(text_dict=None, file_dict=None):\n multipart_data = QHttpMultiPart(QHttpMultiPart.FormDataType)\n if text_dict:\n\n for key, value in text_dict.items():\n text_part = QHttpPart()\n text_part.setHeader(QNetworkRequest.ContentDispositionHeader, \"form-data;name=\\\"%s\\\"\" % key)\n text_part.setBody(value.encode(\"utf-8\"))\n multipart_data.append(text_part)\n if file_dict:\n for key, file in file_dict.items():\n file_part = QHttpPart()\n filename = QFileInfo(file.fileName()).fileName()\n file_part.setHeader(QNetworkRequest.ContentDispositionHeader, \"form-data; name=\\\"%s\\\"; filename=\\\"%s\\\"\" % (key, filename))\n file_part.setBodyDevice(file)\n file.setParent(multipart_data)\n multipart_data.append(file_part)\n return multipart_data\n\n","sub_path":"fontGUI/utils/multipart.py","file_name":"multipart.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"628758703","text":"from os import path\r\n\r\nfrom PIL import Image\r\nfrom lxml import etree\r\nimport vk_api\r\nfrom vk_api import VkUpload\r\nimport requests\r\nfrom textwrap import shorten\r\n\r\nfrom config import YTConfig, Config\r\n\r\n\r\nclass Youtube:\r\n def __init__(self, ch_list):\r\n self.authors = []\r\n self.title = []\r\n self.link = []\r\n self.img = []\r\n self.cl = ch_list\r\n if __name__ == '__main__':\r\n self.way = ''\r\n else:\r\n self.way = 'youtube/'\r\n self.channel_parse()\r\n self.pictures_download()\r\n self.pictures_correction()\r\n self.pictures_upload()\r\n\r\n def xml_download(self, id):\r\n url = YTConfig.ROOT_URL + id\r\n response = requests.get(url).content\r\n with open(path.abspath(self.way + 'temp/xml.xml'), 'wb') as f:\r\n f.write(response)\r\n\r\n def channel_parse(self):\r\n authors = []\r\n title = []\r\n link = []\r\n img = []\r\n for i in range(len(self.cl)):\r\n self.xml_download(self.cl[i])\r\n tree = etree.parse(path.abspath(self.way + 'temp/xml.xml'))\r\n root = tree.getroot()\r\n title.append(root[7][8][0].text)\r\n img.append(root[7][8][2].attrib['url'])\r\n link.append(root[7][4].attrib['href'])\r\n authors.append(root[5][0].text)\r\n self.authors = authors\r\n self.title = title\r\n self.link = link\r\n self.img = img\r\n\r\n def pictures_download(self):\r\n for i in range(len(self.cl)):\r\n response = requests.get(self.img[i]).content\r\n with open(path.abspath(self.way + f'temp/preview{i}.jpg'), 'wb') as f:\r\n f.write(response)\r\n\r\n def pictures_correction(self):\r\n for i in range(len(self.cl)):\r\n img = Image.open(path.abspath(self.way + f'temp/preview{i}.jpg'))\r\n chords = int((480 - 439) / 2), int((360 - 270) / 2), int((480 + 439) / 2), int((360 + 270) / 2)\r\n cropped = img.crop(chords).resize((221 * 3, 136 * 3))\r\n cropped.save(path.abspath(self.way + f'temp/preview{i}.jpg'))\r\n\r\n def pictures_upload(self):\r\n url_list = []\r\n paths = []\r\n session = vk_api.VkApi(token=Config.TOKEN, client_secret=Config.TOKEN)\r\n for i in range(len(self.cl)):\r\n paths.append(path.abspath(self.way + f'temp/preview{i}.jpg'))\r\n resp = VkUpload(session).photo_messages(photos=paths)\r\n for i in range(len(self.cl)):\r\n url_list.append(f\"{resp[i]['owner_id']}_{resp[i]['id']}\")\r\n self.img = url_list\r\n\r\n def dict_generator(self):\r\n elements = []\r\n for i in range(len(self.cl)):\r\n part = {\r\n \"title\": shorten(self.title[i], 80),\r\n \"description\": shorten(self.authors[i], 80),\r\n \"action\": {\r\n \"type\": \"open_link\",\r\n \"link\": self.link[i]\r\n },\r\n \"photo_id\": self.img[i],\r\n \"buttons\": [{\r\n \"action\": {\r\n \"type\": \"open_link\",\r\n \"label\": \"Открыть\",\r\n \"link\": self.link[i],\r\n \"payload\": \"{}\"\r\n }\r\n }]\r\n }\r\n elements.append(part)\r\n template = {\"type\": \"carousel\", \"elements\": elements}\r\n return template\r\n\r\n\r\nif __name__ == '__main__':\r\n Youtube(None)\r\n","sub_path":"youtube/youtube_parse.py","file_name":"youtube_parse.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"224748702","text":"\nimport threading\nimport sys\nimport random\nfrom colores import Colores\n#-------------------------------------------Datos generales, se recuperan de los argumentos\nnum_mesas = int(sys.argv[3])\nnum_meseros = int(sys.argv[2])\n#------------------------------------------------------------------------------------------\n#Multiplex con los que se garantiza que no se ocupen más mesas o meseros de los que tenemos\nmesas = threading.Semaphore(num_mesas)\nmeseros = threading.Semaphore(num_meseros)\n#------------------------------------------------------------------------------------------\n#-------------------------------------------------------Para saber que meseros están libres\nmeserosDisp = []\nmutexmd = threading.Semaphore(1)\n#------------------------------------------------------------------------------------------\n#Para saber cuando cierra el restaurante\nclientes_res = 0\nmutex_cr = threading.Semaphore(1)\n#---------------------------------------\n\nclass Mesa:\n #Variables para implementar la barrera cuando todos quieran ordenar\n cuenta = 0\n mutex = threading.Semaphore(1)\n barrera = threading.Semaphore(0)\n #------------------------------------------------------------------\n\n def __init__(self,num_cliente,num_invitados):\n self.num_cliente = num_cliente\n self.num_invitados = num_invitados\n\n class Cliente:\n def __init__(self,num_cliente,num_invitados):\n self.num_cliente = num_cliente\n self.num_invitados = num_invitados\n self.esperarMesa()\n\n #Función desde el inicio de la estadía en el restaurante al fin\n def esperarMesa(self):\n global mesas\n mesas.acquire()\n self.llamarMesero(\"mesa\")\n self.conseguirMesa()\n self.llamarMesero(\"carta\")\n self.ordenarM()\n self.llamarMesero(\"comida\")\n self.llamarMesero(\"cuenta\")\n print(\"La mesa del cliente %d ha sido desocupada\" % (self.num_cliente))\n mesas.release()\n #---------------------------------------------------------------\n\n #--------------Se llama a un mesero y se le pasa la acción para la cual lo requieren\n def llamarMesero(self, accion):\n global meserosDisp, meseros\n esperarMesero = True\n print(Colores.F_NEGRO + Colores.T_BLANCO + \"La mesa del cliente %d esta buscando un mesero\" % (self.num_cliente) + Colores.FIN)\n #-----------Revisar si hay meseros disponibles\n while esperarMesero:\n mutexmd.acquire()\n tam = len(meserosDisp)\n mutexmd.release()\n if tam > 0:\n mutexmd.acquire()\n mesero = meserosDisp.pop(0)\n mutexmd.release()\n esperarMesero = False \n #---------------------------------------------\n #Se despierta al mesero que se saco de la lista de meseros disponibles\n mesero.despertar(accion,self.num_cliente)\n #Después de realizar la acción vuelve a estar disponible para otras mesas\n #meseros.release()\n #---------------------------------------------\n #mutexmd.acquire()\n #print(\"Se desocupo el mesero %d\" % (self.num_cliente)) \n #meserosDisp.append(mesero) \n #mutexmd.release() \n #------------------------------------------------------------------------------------\n\n def conseguirMesa(self):\n print(Colores.F_CIAN + Colores.T_NEGRO + \"El cliente %d ha conseguido una mesa para %d personas\" % (self.num_cliente, self.num_invitados+1) + Colores.FIN)\n\n def revisaCarta(self):\n print(Colores.F_CIAN + Colores.T_NEGRO + \"El cliente número %d está revisando la carta\" % (self.num_cliente) + Colores.FIN)\n\n def decidirOrden(self): \n Mesa.mutex.acquire()\n print(Colores.F_CIAN + Colores.T_NEGRO + \"El cliente número %d está listo para ordenar\" % (self.num_cliente) + Colores.FIN) \n Mesa.cuenta += 1 \n Mesa.mutex.release()\n\n def ordenarM(self): \n self.revisaCarta()\n self.decidirOrden()\n salir = True\n num_hilos = self.num_invitados + 1\n #-------------------------------------------------------------Crea los hilos de sus acompañantes\n for i in range (self.num_invitados):\n threading.Thread(target = Mesa.Invitado, args= [self.num_cliente,i]).start()\n \n #-----------------------------------------------------------------------------------------------\n while salir:\n if Mesa.cuenta == num_hilos:\n Mesa.barrera.release()\n salir = False\n Mesa.barrera.acquire()\n Mesa.barrera.release()\n\n print(Colores.F_VERDE + Colores.T_NEGRO + \"La mesa del cliente número %d ha realizado su orden\" % (self.num_cliente) + Colores.FIN)\n\n class Invitado:\n def __init__(self,num_cliente,num_invitado):\n self.num_cliente = num_cliente\n self.num_invitado = num_invitado\n self.revisaCarta()\n self.decidirOrden() \n\n def revisaCarta(self):\n print(Colores.F_PURPURA + Colores.T_NEGRO + \"El acompañante número %d del cliente %d está revisando la carta\" % (self.num_invitado , self.num_cliente) + Colores.FIN)\n\n def decidirOrden(self): \n Mesa.mutex.acquire() \n print(Colores.F_ROJO + Colores.T_NEGRO + \"El acompañante número %d del cliente %d ha decidido que ordenar\" % (self.num_invitado, self.num_cliente) + Colores.FIN) \n Mesa.cuenta += 1\n Mesa.mutex.release()\n\n #--------------------------------\n def iniciarCena(self):\n threading.Thread(target = Mesa.Cliente , args= [self.num_cliente, self.num_invitados]).start()\n \n\n#Falta definir cuando los meseros dejan de trabajar\nclass Mesero:\n def __init__(self, num_mesero):\n self.num_mesero = num_mesero\n self.dormir = threading.Semaphore(0)\n self.iniciar()\n\n def iniciar(self):\n global meserosDisp\n mutexmd.acquire()\n meserosDisp.append(self)\n mutexmd.release()\n #self.dormirSiesta()\n \n #def dormirSiesta(self):\n #self.dormir.acquire()\n\n #Un sólo será despertado cuando sea requerido que realice una acción\n def despertar(self, accion , num_cliente):\n #self.dormir.release()\n if accion == \"mesa\":\n self.llevarMesa(num_cliente)\n elif accion == \"carta\":\n self.mostrarCarta(num_cliente)\n elif accion == \"comida\":\n self.traerPlatillo(num_cliente)\n elif accion == \"cuenta\":\n self.traerCuenta(num_cliente)\n print(Colores.F_AMARILLO + Colores.T_NEGRO + \"Se desocupo el mesero %d\" % (self.num_mesero) + Colores.FIN)\n self.iniciar()\n \n def llevarMesa(self, num_cliente):\n print(Colores.F_BLANCO + Colores.T_NEGRO + \"El mesero número %d esta llevando al cliente %d a su mesa\" % (self.num_mesero,num_cliente) + Colores.FIN)\n\n def mostrarCarta(self, num_cliente):\n print(Colores.F_BLANCO + Colores.T_NEGRO + \"El mesero número %d ha entregado las cartas a la mesa del cliente %d\" % (self.num_mesero, num_cliente)+ Colores.FIN) \n\n def traerPlatillo(self, num_cliente):\n print(Colores.F_BLANCO + Colores.T_NEGRO + \"El mesero número %d está llevando la orden de la mesa %d\" %(self.num_mesero, num_cliente) + Colores.FIN)\n print(Colores.F_AZUL + Colores.T_NEGRO + \"Los platillos de la mesa %d se están preparando\" % (num_cliente) + Colores.FIN)\n print(Colores.F_BLANCO + Colores.T_NEGRO + \"El mesero número %d ha servido los platillos a la mesa del cliente %d\" % (self.num_mesero, num_cliente)+ Colores.FIN)\n\n def traerCuenta(self, num_cliente):\n print(\"El mesero número %d ha llevado la cuenta a la mesa del cliente %d\" % (self.num_mesero, num_cliente))\n\nclass Restaurante:\n def __init__(self, num_meseros, num_clientes):\n self.num_meseros = num_meseros\n self.num_clientes = num_clientes\n \n def recepcion(self):\n #Creamos los hilos de meseros y se agregan a la lista de meseros disponibles\n for i in range(self.num_meseros):\n threading.Thread(target = Mesero, args= [i]).start()\n \n #Creamos los hilos de clientes\n for i in range(self.num_clientes):\n #Generamos el número de acompañantes de manera aleatoria\n num_invitados = random.randrange(0,9) \n Mesa(i,num_invitados).iniciarCena()\n \n\nif __name__ == '__main__':\n # Recupera el valor de los argumentos\n num_clientes = int(sys.argv[1])\n restaurante = Restaurante(num_meseros,num_clientes)\n restaurante.recepcion()","sub_path":"proyectos/2/FloresEmanuel-GarcíaAndrea/proyecto2.py","file_name":"proyecto2.py","file_ext":"py","file_size_in_byte":9032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"540695633","text":"import pygame\r\nfrom pygame.draw import *\r\n\r\npygame.init()\r\n\r\nFPS = 60\r\n\r\n# defining some colors\r\nyellow = (255, 247, 0)\r\nblack = (0, 0, 0)\r\nred = (255, 0, 0)\r\nwhite = (255, 255, 255)\r\nsky_color = (45, 235, 235) # color of the sky\r\nground_color = (26, 166, 13) # color of the ground\r\ndress_color = (232, 30, 225) # color of woman`s dress\r\nman_color = (129, 143, 148) # color of man`s shirt\r\nice_cream_color = (252, 228, 13) # color of ice cream\r\nchocolate_color = (210, 105, 30) # color of chocolate\r\n\r\n\"\"\"\r\nFOR ALL OBJECTS FURTHER\r\n\r\nsurface = surface where the image is needed to be put\r\nx, y = coordinates of the object(where to put object); may both be float and integer;\r\n(x, y) = (0, 0) is in the upper right corner\r\nr = dimension coefficient - how big is the object (float or integer)\r\n\"\"\"\r\n\r\n\r\n# drawing a face of a man and a woman\r\ndef face_draw(surface, x, y, r):\r\n # drawing head and encircling it\r\n circle(surface, yellow, (int(x), int(y)), int(r))\r\n circle(surface, black, (int(x), int(y)), int(r), 2)\r\n # drawing eyes\r\n circle(surface, red, (int(x - r // 3), int(y - r * 4 // 15)), r // 5) # left eye\r\n circle(surface, red, (int(x + r // 3), int(y - r * 4 // 15)), r // 6) # right eye\r\n circle(surface, black, (int(x - r // 3), int(y - r * 11 // 50)), r // 10) # left pupil\r\n circle(surface, black, (int(x + r // 3), int(y - r * 11 // 50)), r // 15) # right pupil\r\n # drawing hair\r\n line(surface, black, (int(x), int(y - r)), (int(x), int(y - 1.28 * r)))\r\n line(surface, black, (int(x - 0.71 * r), int(y - 0.71 * r)),\r\n (int(x - 1.15 * r), int(y - 1.15 * r)))\r\n line(surface, black, (int(x + 0.71 * r), int(y - 0.71 * r)),\r\n (int(x + 1.15 * r), int(y - 1.15 * r)))\r\n # drawing eyebrows\r\n polygon(surface, black, [(int(x - r * 8 // 15), int(y - r * 2 // 3)),\r\n (int(x - r * 2 // 15), int(y - r * 2 // 5)),\r\n (int(x - r * 11 // 50), int(y - r * 1 // 3)),\r\n (int(x - r * 8 // 15), int(y - r * 8 // 15))])\r\n polygon(surface, black, [(int(x + r * 8 // 15), int(y - r * 2 // 3)),\r\n (int(x + r * 2 // 15), int(y - r * 6 // 15)),\r\n (int(x + r * 37 // 150), int(y - r // 3)),\r\n (int(x + r * 3 // 5), int(y - r * 3 // 5))])\r\n # drawing mouth\r\n polygon(surface, black, [(int(x - r // 5), int(y + r * 2 // 3)),\r\n (int(x + r // 5), int(y + r * 2 // 3)),\r\n (int(x + r // 5), int(y + r * 11 // 15)),\r\n (int(x - r // 5), int(y + r * 11 // 15))])\r\n\r\n\r\n# defining dimensions of a screen\r\nscreen_x = 1200 # width of a display\r\nscreen_y = 700 # height of a display\r\n# drawing a screen\r\nscreen = pygame.display.set_mode((screen_x, screen_y))\r\nscreen.fill((250, 200, 100))\r\n# drawing a scenery\r\npolygon(screen, ground_color, [(0, screen_y // 2), (screen_x, screen_y // 2), (screen_x, screen_y), (0, screen_y)])\r\n\r\n\r\n# drawing sun\r\ndef sun_draw(surface, x, y, r):\r\n pygame.draw.circle(surface, (255, 255, 200), (x, y), r)\r\n\r\n\r\n# drawing balloon that is being hold by woman\r\ndef balloon_draw(surface, x, y, r):\r\n line(surface, black, (int(x), int(y)), (int(x), int(y - r)), 2)\r\n polygon(surface, red, ((int(x), int(y - r)), (int(x - r / 4), int(y - 1.5 * r)),\r\n (int(x + r / 4), int(y - 1.5 * r))))\r\n circle(surface, red, (round(x - 0.14 * r), round(y - 1.58 * r)), r // 6)\r\n circle(surface, red, (round(x + 0.14 * r), round(y - 1.58 * r)), r // 6)\r\n\r\n\r\ndef ice_cream_draw(surface, x, y, r):\r\n # drawing cone\r\n polygon(surface, ice_cream_color, ((int(x), int(y)), (x, int(y - 1.1 * r)),\r\n (int(x - r * 0.9), int(y - r * 0.6))))\r\n # drawing balls\r\n circle(surface, chocolate_color, (round(x - r * 0.7), round(y - r * 0.8)), round(r * 0.3))\r\n circle(surface, red, (round(x - r * 0.3), round(y - r * 1.1)), round(r * 0.3))\r\n circle(surface, white, (round(x - r * 0.65), round(y - r * 1.25)), round(r * 0.3))\r\n\r\n\r\n# drawing arms of a man\r\ndef arms_draw_man(surface, x, y, r):\r\n line(surface, black, (int(x), int(y - r)), (int(x - r * 4 // 5), int(y + r // 5)), 2) # left arm\r\n line(surface, black, (int(x), int(y - r)), (int(x + r * 4 // 5), int(y + r // 5)), 2) # right arm\r\n\r\n\r\n# drawing arms of a woman\r\ndef arms_draw_woman(surface, x, y, r):\r\n # drawing arms(right arm consists of 2 lines(not 1 line as in all previous variations))\r\n line(surface, black, (int(x), int(y - r * 0.6)), (int(x - r * 4 // 5), int(y + r // 5)), 2) # left arm\r\n # first part of right arm\r\n line(surface, black, (int(x), int(y - r * 0.6)), (int(x + r * 2 // 5), int(y - r // 5)), 2)\r\n # second part of right arm\r\n line(surface, black, (int(x + r * 2 // 5), int(y - r // 5)), (int(x + r), int(y - r // 2)), 2)\r\n balloon_draw(surface, int(x + r), int(y - r // 2), int(r))\r\n legs_draw(surface, x, y, r)\r\n\r\n\r\n# drawing legs\r\ndef legs_draw(surface, x, y, r):\r\n # drawing left leg\r\n line(surface, black, (int(x), int(y + r * 3 // 5)), (int(x - r * 0.55), int(y + r * 1.8)), 2)\r\n line(surface, black, (int(x - r * 0.55), int(y + r * 1.8)), (int(x - r * 4 // 5), int(y + r * 1.8)), 2)\r\n # drawing right leg\r\n line(surface, black, (int(x), int(y + r * 3 // 5)), (int(x + r * 0.35), int(y + r * 1.8)), 2)\r\n line(surface, black, (int(x + r * 0.35), int(y + r * 1.8)), (int(x + r * 0.55), int(y + r * 1.8)), 2)\r\n\r\n\r\n# drawing a man using predefined functions\r\ndef man_draw(surface, x, y, r):\r\n arms_draw_man(surface, x, y, r)\r\n ice_cream_draw(surface, int(x - r * 0.75), int(y + r // 5), r // 2) # man holds an ice cream\r\n legs_draw(surface, x, y, r)\r\n # drawing body and encircling it\r\n ellipse(surface, man_color, (int(x - r * 2 // 5), int(y - r), int(r * 4 // 5), int(2 * r)))\r\n ellipse(surface, black, (int(x - r * 2 // 5), int(y - r), int(r * 4 // 5), int(2 * r)), 3)\r\n # using predefined function to draw face\r\n face_draw(surface, x, y - r * 9 // 10, r // 3)\r\n\r\n\r\n# drawing a woman using predefined functions\r\ndef woman_draw(surface, x, y, r):\r\n arms_draw_woman(surface, x, y, r)\r\n # drawing dress and encircling it\r\n polygon(surface, dress_color, ((int(x - r * 0.5), int(y + r)),\r\n (x, int(y - r)), (int(x + r * 0.55), int(y + r))))\r\n polygon(surface, black, ((int(x - r * 0.55), int(y + r)),\r\n (x, int(y - r)), (int(x + r * 0.55), int(y + r))), 3)\r\n # using predefined function to draw face\r\n face_draw(surface, x, y - r * 9 // 10, r // 3)\r\n\r\n\r\n# bringing men and women together(making a pair)\r\ndef family_draw(surface, x, y, r):\r\n man_draw(surface, x - r * 4 // 5, y, int(r))\r\n woman_draw(surface, x + r * 4 // 5, y, int(r))\r\n\r\n\r\nsun_draw(screen, 400, 100, 1)\r\nfamily_draw(screen, 300, 400, 150)\r\nfamily_draw(screen, 900, 455.5, 150)\r\n\r\npygame.display.update()\r\nclock = pygame.time.Clock()\r\nfinished = False\r\n\r\nwhile not finished:\r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n finished = True\r\n\r\npygame.quit()\r\n","sub_path":"semya.py","file_name":"semya.py","file_ext":"py","file_size_in_byte":7228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"164420930","text":"#encoding: utf-8\n\n\nfrom django.conf.urls import url\nfrom food import views\n\n# from .views import RegisterView, ActiveView, \\\n# LoginView, LogoutView, \\\n# ResetPasswordView, ResetPasswordConfirmView, \\\n# ModifyPasswordView, ChangePasswordView, \\\n# UserExtBaseView, UserExtAvatarView,\\\n# UserAddressListView, UserAddressCreateView, UserAddressDeleteView, UserAddressUpdateView, \\\n# TestView\n\napp_name = 'food'\n\nurlpatterns = [\n url(r'^test/', views.test, name=\"test\"),\n url(r'^test1/', views.test1, name=\"test1\"),\n]","sub_path":"food/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"134513335","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef removeNthFromEnd(head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n\n prevNode = None\n firstNode = head\n currentNode = head\n counter = 0\n\n while currentNode:\n counter += 1\n if counter == n + 1:\n if prevNode:\n prevNode = prevNode.next\n else:\n prevNode = firstNode\n counter -= 1\n currentNode = currentNode.next\n\n if prevNode:\n if prevNode.next.next:\n prevNode.next = prevNode.next.next\n else:\n prevNode.next = None\n else:\n if firstNode:\n firstNode = firstNode.next\n\n return firstNode\n","sub_path":"p19_remove_nth_node_from_end_of_list.py","file_name":"p19_remove_nth_node_from_end_of_list.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"413868215","text":"def arrays_46_Couple(n,list = []):\n re = []\n re_0 = []\n if list[len(list)-1]+list[len(list)-2]= self.model.img_model.img_data.shape[1]:\n new_X0 = self.model.img_model.img_data.shape[1] - w - 1\n if new_Y0 < 0:\n new_Y0 = 0\n elif new_Y0+h >= self.model.img_model.img_data.shape[0]:\n new_Y0 = self.model.img_model.img_data.shape[0] - h - 1\n\n self.widget.line_X0_txt.setText(str(new_X0))\n self.widget.line_Y0_txt.setText(str(new_Y0))\n\n self.plot_lineout()\n\n def load_image(self):\n if self.model.RT_file is not None:\n filt_dx = float(self.widget.gauss_x_txt.text())\n filt_dy = float(self.widget.gauss_y_txt.text())\n if (filt_dx != self.filt_dx) or (filt_dy != self.filt_dy):\n self.model.img_model.load(self.model.RT_file, filt_dx=filt_dx, filt_dy=filt_dy)\n self.filt_dx = filt_dx\n self.filt_dy = filt_dy\n\n def plot_image(self):\n sm = self.widget.grad_sm_cb.isChecked()\n dIdx = self.widget.dIdx_btn.isChecked()\n dIdy = self.widget.dIdy_btn.isChecked()\n dI = self.widget.dI_btn.isChecked()\n if dIdx:\n if sm == True:\n self.widget.img_widget.plot_image(self.model.img_model.dIdx_gauss, \n rescale_factor = self.model.img_model.dIdx_gauss_rescale_factor, autoRange= True)\n else:\n self.widget.img_widget.plot_image(self.model.img_model.dIdx, \n rescale_factor = self.model.img_model.dIdx_rescale_factor, autoRange= True)\n elif dIdy:\n if sm == True:\n self.widget.img_widget.plot_image(self.model.img_model.dIdy_gauss, \n rescale_factor = self.model.img_model.dIdy_gauss_rescale_factor, autoRange= True)\n else:\n self.widget.img_widget.plot_image(self.model.img_model.dIdy, \n rescale_factor = self.model.img_model.dIdy_rescale_factor, autoRange= True)\n elif dI:\n if sm == True:\n self.widget.img_widget.plot_image(self.model.img_model.dI_gauss, \n rescale_factor = self.model.img_model.dI_gauss_rescale_factor, autoRange= True)\n else:\n self.widget.img_widget.plot_image(self.model.img_model.dI, \n rescale_factor = self.model.img_model.dI_rescale_factor, autoRange= True)\n else:\n if sm == True:\n self.widget.img_widget.plot_image(self.model.img_model.img_data_gauss, \n rescale_factor = self.model.img_model.gauss_rescale_factor, autoRange= True)\n else:\n self.widget.img_widget.plot_image(self.model.img_model.img_data, \n rescale_factor = self.model.img_model.rescale_factor, autoRange= True)\n self.widget.img_widget.auto_range()\n self.plot_lineout()\n\n\n def plot_lineout(self):\n X0 = int(self.widget.line_X0_txt.text())\n w = int(self.widget.line_w_txt.text())\n Y0 = int(self.widget.line_Y0_txt.text())\n h = int(self.widget.line_h_txt.text())\n\n ind_x, line_x, ind_y, line_y = self.model.img_model.get_lines(self.widget)\n\n if self.line_img_view is not None:\n self.line_img_view.removeItem(self.line_rect)\n del self.line_img_view\n self.line_vals = None\n\n \n self.line_vals = {'X0': X0, 'w':w, 'Y0':Y0, 'h':h}\n self.line_rect, self.line_img_view = self.widget.img_widget.draw_rectangle(self.line_vals)\n\n self.widget.line_x_widget.clear()\n self.widget.line_y_widget.clear()\n self.widget.line_x_widget.plot(x=ind_x, y=line_x)\n self.widget.line_y_widget.plot(x=ind_y, y=line_y)\n\n def dIdy_btn_clicked(self):\n self.widget.dIdx_btn.setChecked(False)\n self.widget.dI_btn.setChecked(False)\n self.update_all()\n\n def dIdx_btn_clicked(self):\n self.widget.dIdy_btn.setChecked(False)\n self.widget.dI_btn.setChecked(False)\n self.update_all()\n\n def dI_btn_clicked(self):\n self.widget.dIdx_btn.setChecked(False)\n self.widget.dIdy_btn.setChecked(False)\n self.update_all()\n\n def update_img_mouse_position_lbl(self, x, y):\n data = self.model.img_model.img_data\n try:\n if (x >= 0 and y > 0) and (x < len(data[1,:]) and y < len(data[:,1])):\n val = (data.T[int(np.round(x)), int(np.round(y))])\n str = 'x: %4.0f y: %4.0f %.2f PSL' % (x, y, val)\n\n else:\n str = 'x: %4.0f y: %4.0f PSL: 0' % (x, y)\n except (IndexError, AttributeError):\n str = 'x: %.0f y: %.0f PSL: 0' % (x, y)\n self.widget.pos_lbl.setText(str)\n\n def update_lineout_mouse_position_lbl(self, x, y):\n try:\n pix_ind = self.model.img_model.x_init\n data = self.model.img_model.y_init\n tm = np.abs(x - pix_ind)\n index = np.argmin(tm)\n if x >= 0:\n val = data.T[index]\n str = u'x: %.1f \\u03BCm y: %.3f \\u03BCm' % (x, val)\n\n else:\n str = u'x: %.1f \\u03BCm y: %.3f \\u03BCm' % (x, y)\n except (IndexError, AttributeError):\n str = u'x: %.1f \\u03BCm y: %.1f \\u03BCm' % (x, y)\n self.widget.pos_lbl.setText(str)\n\n def update_corrected_mouse_position_lbl(self, x, y):\n try:\n pix_ind = self.model.img_model.init_xcor\n data = self.model.img_model.init_ycor\n tm = np.abs(x - pix_ind)\n index = np.argmin(tm)\n if x >= 0:\n val = data.T[index]\n str = u'x: %.1f \\u03BCm y: %.3f \\u03BCm' % (x, val)\n\n else:\n str = u'x: %.1f \\u03BCm y: %.3f \\u03BCm' % (x, y)\n except (IndexError, AttributeError):\n str = u'x: %.1f \\u03BCm y: %.3f \\u03BCm' % (x, y)\n self.widget.pos_lbl.setText(str)","sub_path":"rt/Controller/LineoutController.py","file_name":"LineoutController.py","file_ext":"py","file_size_in_byte":7884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"301979976","text":"#서울시 애견카페 정보,미세먼지 파싱을 위한 .py 파일입니다.\r\n\r\n\r\nimport requests as rq\r\nimport bs4\r\nimport threading\r\nfrom bs4 import BeautifulSoup\r\n# 아래 4줄을 추가해 줍니다.\r\nimport os\r\n# Python이 실행될 때 DJANGO_SETTINGS_MODULE이라는 환경 변수에 현재 프로젝트의 settings.py파일 경로를 등록합니다.\r\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"djangoreactapi.settings\")\r\n# 이제 장고를 가져와 장고 프로젝트를 사용할 수 있도록 환경을 만듭니다.\r\nimport django\r\ndjango.setup()\r\n\r\n# CafeInfo를 import해옵니다\r\nfrom parsed_data.models import CafeInfo, microDustInfo\r\n\r\n#서울시 구에 따라 애견카페 정보를 나타내는 네이버 html 폼이 다릅니다.\r\n#이러한 이유로 2가지 html 폼에 따라 크롤링을 진행합니다.\r\nPlaces1 = [\"도봉구\",\"강북구\",\"은평구\",\"중랑구\",\"서대문구\",\"강서구\", \"마포구\",\"중구\",\\\r\n\"광진구\",\"영등포구\",\"용산구\",\"구로구\",\"동작구\",\"강남구\",\"관악구\",\"금천구\"]\r\nPlaces2 = [\"노원구\",\"종로구\",\"성북구\",\"동대문구\",\"성동구\",\"양천구\",\"송파구\",\"서초구\"]\r\n\r\n#Places1 딕셔너리에 포함된 '구'에 위치한 애견카페를 크롤링하는 함수\r\ndef places1_get():\r\n #각 구들 마다 크롤링을 진행하기 위한 반복문\r\n for i in range(0,len(Places1)):\r\n url = \"https://search.naver.com/search.naver?sm=tab_hty.top&where=nexearch&query=\"+Places1[i]+\"+애견카페\"\r\n res = rq.get(url)\r\n soup = BeautifulSoup(res.content,\"lxml\") #Python HTML parser\r\n\r\n titles = soup.select('span.tit_inner > a') #['title'],['href'] #카페 이름, 카페 상세페이지 url 가져오기\r\n\r\n\r\n #파싱한 카페 개수만큼 반복문 실행. 각 데이터 요소를 DB에 저장합니다.\r\n for j in range(0,len(titles)):\r\n detail_url = titles[j]['href']\r\n detail_res = rq.get(detail_url) #카페 주소 파싱하기\r\n soup = BeautifulSoup(detail_res.content,\"lxml\") #Python HTML parser\r\n\r\n detail_title = titles[j]['title']\r\n print(detail_title) #카페 이름 출력\r\n\r\n #카페 이미지가 있을 경우 출력, 없을 경우 변수에 'none'문자열 할당\r\n if soup.select('div.thumb > img'):\r\n img_pars=soup.select('div.thumb > img') #카페 이미지 파싱\r\n detail_img = img_pars[0].get('src')\r\n print(detail_img)\r\n else:\r\n detail_img = 'none'\r\n print(detail_img)\r\n #카페 전화번호가 있을 경우 출력, 없을 경우 변수에 'none'문자열 할당\r\n if soup.select('div.list_item.list_item_biztel > div.txt') :\r\n detail_call=soup.select('div.list_item.list_item_biztel > div.txt')[0].text # 카페 전화번호 파싱\r\n print(detail_call)\r\n else:\r\n detail_call = 'none'\r\n print(detail_call)\r\n\r\n #카페 주소가 있을 경우 출력, 없을 경우 변수에 'none'문자열 할당\r\n if soup.select('li > span.addr') :\r\n detail_addr=soup.select('li > span.addr')[0].text #카페 주소 파싱\r\n print(detail_addr)\r\n else :\r\n detail_addr = 'none'\r\n print(detail_addr)\r\n\r\n CafeInfo(cafeTitles=detail_title,cafeCallNums=detail_call,cafeImgs=detail_img,cafeAddrs=detail_addr).save()\r\n\r\n#Places2 딕셔너리에 포함된 '구'에 위치한 애견카페를 크롤링하는 함수\r\ndef places2_get():\r\n #각 구들 마다 크롤링을 진행하기 위한 반복문\r\n for i in range(0,len(Places2)):\r\n url = \"https://search.naver.com/search.naver?sm=tab_hty.top&where=nexearch&query=\"+Places2[i]+\"+애견카페\"\r\n res = rq.get(url)\r\n soup = BeautifulSoup(res.content,\"lxml\") #Python HTML parser\r\n\r\n titles = soup.select('dl.info_area > dt > a') #['title'],['href'] #카페 이름, 카페 상세페이지 url 가져오기\r\n\r\n#파싱한 카페 개수만큼 반복문 실행. 각 데이터 요소를 DB에 저장합니다.\r\n for j in range(0,len(titles)):\r\n #카페 상세 페이지 url을 가져와서. 해당 url로 바로 접속하면 크롤링을\r\n #할 수 없는 화면으로 이동한다. url의 code data를 통해 우회 접속한다.\r\n #카페 상세 페이지에 접속하기 위한 code data를 split해서 변수에 저장한다.\r\n split_url = titles[j]['href'];\r\n url_split = split_url.split('code=')\r\n #우회 접속할 접속 url\r\n detail_url =\"https://store.naver.com/restaurants/detail?id=\"+url_split[1]+\"&tab=photo\"\r\n detail_res = rq.get(detail_url) #카페 주소 파싱하기\r\n soup = BeautifulSoup(detail_res.content,\"lxml\") #Python HTML parser\r\n\r\n detail_title = titles[j]['title']\r\n print(detail_title) #카페 이름 출력\r\n\r\n #카페 이미지가 있을 경우 출력, 없을 경우 변수에 'none'문자열 할당\r\n if soup.select('div.thumb > img'):\r\n img_pars=soup.select('div.thumb > img') #카페 이미지 파싱\r\n detail_img = img_pars[0].get('src')\r\n print(detail_img)\r\n else:\r\n detail_img = 'none'\r\n print(detail_img)\r\n\r\n #카페 전화번호가 있을 경우 출력, 없을 경우 변수에 'none'문자열 할당\r\n if soup.select('div.list_item.list_item_biztel > div.txt')[0].text :\r\n detail_call=soup.select('div.list_item.list_item_biztel > div.txt')[0].text # 카페 전화번호 파싱\r\n print(detail_call)\r\n else:\r\n detail_call = 'none'\r\n print(detail_call)\r\n\r\n #카페 주소가 있을 경우 출력, 없을 경우 변수에 'none'문자열 할당\r\n if soup.select('li > span.addr')[0].text :\r\n detail_addr=soup.select('li > span.addr')[0].text #카페 주소 파싱\r\n print(detail_addr)\r\n else :\r\n detail_addr = 'none'\r\n print(detail_addr)\r\n\r\n CafeInfo(cafeTitles=detail_title,cafeCallNums=detail_call,cafeImgs=detail_img,cafeAddrs=detail_addr).save()\r\n\r\n\r\n#서울시 각 구의 미세먼지 농도를 크롤링하는 함수\r\ndef microDustInfoGet_func(second):\r\n url = \"https://search.naver.com/search.naver?where=nexearch&sm=tab_etc&mra=blQ3&query=%EC%84%9C%EC%9A%B8%20%EB%AF%B8%EC%84%B8%EB%A8%BC%EC%A7%80\"\r\n res = rq.get(url)\r\n soup = BeautifulSoup(res.content,\"lxml\") #Python HTML parser\r\n\r\n wheres = soup.select('div.main_box > div.map_area.ct09 > a > span.cityname') # 서울시 각 구 이름 크롤링\r\n values = soup.select('div.main_box > div.map_area.ct09 > a > span.value > em') # 서울시 각 구의 미세먼지 농도 크롤링\r\n when = soup.select('div.info_box > div.guide_bx > div.guide > span.update > em')[0].text# 미세먼지 측정 시간 기록\r\n\r\n for i in range(0,len(wheres)):\r\n print(wheres[i].text)\r\n print(values[i].text)\r\n # 서울시 각 구의 미세먼지 데이터를 저장한다.\r\n #저장하는 순서는 다음과 같다. 1.종로 2.중구 3.용산 4.성동 5.광진 6.동대문 7.중랑 8.성북 9.강북 10.도봉 11.노원 12.은평\r\n # 13.서대문 14.마포 15.양천 16.강서 17.구로 18.금천 19.영등포 20.동작 21.관악 22.서초 23.강남 24.송파 25.강동\r\n\r\n #배열 인덱스 i에 따라 서울시의 각 구의 미세먼지 농도를 db에 입력하는 switch\r\n microDustInfo(측정시간=when,종로=values[0].text,중구=values[1].text,용산=values[2].text,성동=values[3].text,\\\r\n 광진=values[4].text,동대문=values[5].text,중랑=values[6].text,성북=values[7].text,강북=values[8].text,도봉=values[9].text,\\\r\n 노원=values[10].text,은평=values[11].text,서대문=values[12].text,마포=values[13].text,양천=values[14].text,\\\r\n 강서=values[15].text,구로=values[16].text,금천=values[17].text,영등포=values[18].text,동작=values[19].text,\\\r\n 관악=values[20].text,서초=values[21].text,강남=values[22].text,송파=values[23].text,강동=values[24].text).save()\r\n\r\n threading.Timer(second, microDustInfoGet_func, [second]).start()\r\n\r\n\r\n# 이 명령어는 이 파일이 import가 아닌 python에서 직접 실행할 경우에만 아래 코드가 동작하도록 합니다.\r\nif __name__=='__main__':\r\n microDustInfoGet_func(3600.0)\r\n","sub_path":"walkadog-server/backend/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":8639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"228373298","text":"import sys\nimport os\n#input(\"press\")\nnum=23;\nrunning = True;\n\nwhile running:\n\tguess=int(input(\"pls enter an integer:\"))\n\tif guess == num:\n\t\tprint ('Congratuations, you guess it.')\n\t\tprint ('Done')\n\t\trunning = False\n\telif guess < num:\n\t\tprint ('lower.')\n\telse:\n\t\tprint ('higher')\t\nelse:\n\tprint (\"The while loop is over\")\n\t\nos.system(\"pause\")\n\n","sub_path":"while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"417529034","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom web.views import home, about, project, support, members, contact, faq, report, style\nfrom login.views import login\nfrom auth.views import extra_data, entry\nfrom projects.views import dashboard, user, users, temp\nfrom django.conf import settings\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', home.HomeView.as_view(), name='home'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^about\\.html$', about.AboutView.as_view(), name='about'),\n url(r'^elements$', style.StyleView.as_view(), name='styleguide'),\n url(r'^report\\.html$', report.ReportView.as_view(), name='report'),\n url(r'^faq\\.html$', faq.FaqView.as_view(), name='faq'),\n url(r'^support\\.html$', support.SupportView.as_view(),\n name='support'),\n url(r'^members$', members.MembersView.as_view(), name='members'),\n url(r'^contact$', contact.ContactView.as_view(), name='contact'),\n url(r'^project/(?P[^/]+)\\.html$',\n project.ProjectView.as_view(),\n name='project'),\n url(r'', include('auth.urls', namespace='social')),\n url(r'^email/$', extra_data.ExtraDataView.as_view(), name='require_extra_data'),\n url(r'^logout/$', entry.LogoutView.as_view(), name='logout'),\n url(r'^join/$', login.LoginView.as_view(), name='join'),\n url(r'^user/(?P\\d+)$', user.UserView.as_view(), name='user'),\n url(r'^supplies/$', users.UsersView.as_view(), name='users'),\n url(r'^temp/$', temp.TempView.as_view(), name='temp'),\n url(r'^dashboard/$', dashboard.DashboardView.as_view(), name='dash'),\n)\n\nif settings.DEBUG:\n from django.conf.urls.static import static\n import debug_toolbar\n urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += patterns('',\n url(r'^__debug__/', include(debug_toolbar.urls)),\n )","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"290327062","text":"from django.test import TestCase\nfrom datetime import datetime\nimport pytz\nfrom drs.models import Form, User, Division\n\nclass FormModelTest(TestCase):\n @classmethod\n def setUpTestData(cls):\n # Set up non-modified objects used by all test methods\n test_user1 = User.objects.create_user(email='testuser1@test.com', password='1X 0:\n extrapenalty += showsseen * (afstandtijdtable[vorige][index] / 5)\n if index > 26: # this is a show!\n shownumber = index % 27\n for startingtime in showtijdtable[shownumber]:\n if totaletijd < startingtime:\n # take this one.\n # you could take the total time and leverage the time after any show\n # why might that not work ?\n # Because then no objective evaluation would be made when deciding whether to do an\n # attraction after of before a show..\n # What else can you do?\n # take total time as evaluator, penalize for the time spent on attractions after show.\n # This way, they are preferably done before a show. Penalize heavily?\n\n # So. Put show in first following showtime.\n pausetime += startingtime - totaletijd\n totaletijd = startingtime + showduration[shownumber]\n showsseen += 1\n\n break\n\n if totaletijd > 117:\n totaletijd += 20\n else:\n\n totaletijd += wachttijdentable[index][int(round(totaletijd))] / 5\n\n if showsseen > 0:\n extrapenalty += showsseen * (wachttijdentable[index][int(round(totaletijd))] / 5)\n\n totaletijd += duurtijdtable[index] / 300\n vorige = index\n\n # print((totaletijd-startuur-pausetime+extrapenalty)/12)\n\n return (totaletijd - startuur - pausetime + extrapenalty) / 12\n\n @staticmethod\n def evaluate2(afstandtijdtable, wachttijdentable, duurtijdtable, permutatie, translationtable, location, startuur,\n showtijdtable, showduration, indextable):\n # 1-- [2,5,23,13,9,16,24,8,3,12]\n # 2-- [17,7,11,21,10,19,15,14,20,18]\n # 3-- [22,6,1,4,2,5,23,13,9]\n # 4-- [18,20,21,17,7,11,19,15,14]\n # 5-- [6,22,20,18,23,5,13,9,16]\n\n\n totaletijd = startuur\n vorige = location\n\n schedule = []\n pausetime = 0\n showsseen = 0\n extrapenalty = 0\n for i in permutatie:\n index = translationtable[i]\n schedule.append(makeline(getTime(totaletijd),\n [': wandel ', str(round(afstandtijdtable[vorige][index], 2)), ' minuten']))\n # print(makeline(getTime(totaletijd),[': wandel ', str(round(afstandtijdtable[vorige][index])), ' minuten']))\n totaletijd += afstandtijdtable[vorige][index] / 5\n\n if showsseen > 0:\n extrapenalty += showsseen * (afstandtijdtable[vorige][index] / 5)\n if index > 26: # this is a show!\n shownumber = index % 27\n for startingtime in showtijdtable[shownumber]:\n if totaletijd < startingtime:\n # take this one.\n # you could take the total time and leverage the time after any show\n # why might that not work ?\n # Because then no objective evaluation would be made when deciding whether to do an\n # attraction after of before a show..\n # What else can you do?\n # take total time as evaluator, penalize for the time spent on attractions after show.\n # This way, they are preferably done before a show. Penalize heavily?\n\n # So. Put show in first following showtime.\n pausetime += startingtime - totaletijd\n totaletijd = startingtime + showduration[shownumber]\n showsseen += 1\n\n break\n\n if totaletijd > 117:\n print('park closed')\n\n else:\n schedule.append(\n makeline(getTime(totaletijd),\n [': wacht ', str(round(wachttijdentable[index][int(round(totaletijd))], 2)), ' minuten']))\n totaletijd += wachttijdentable[index][int(round(totaletijd))] / 5\n\n if showsseen > 0:\n extrapenalty += showsseen * (wachttijdentable[index][int(round(totaletijd))] / 5)\n schedule.append(\n makeline(getTime(totaletijd),\n [': Joepi! ', str(round(duurtijdtable[index] / 60, 2)), ' minuten op de ',\n str(indextable[index])]))\n totaletijd += duurtijdtable[index] / 300\n vorige = index\n for l in schedule:\n print(''.join(l))\n\n print((totaletijd - startuur - pausetime) / 12, permutatie)\n\n return round(totaletijd)\n\n\ndef makeline(a, b):\n for i in b:\n a.append(i)\n return a\n\n\ndef getTime(uur):\n achtdertig = 102 * 5\n minutentijd = uur * 5\n totalhourinminutes = achtdertig + minutentijd\n hour = totalhourinminutes // 60\n minutes = totalhourinminutes % 60\n if len(str(int(minutes))) == 1:\n minutes = '0' + str(int(minutes))\n else:\n minutes = str(int(minutes))\n return [str(int(hour)), 'h', minutes]\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"genetic_PSO/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"534513569","text":"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\ntest ldc2d_steady_re10\n\"\"\"\n# 导入模块\nimport os\nimport sys\nimport subprocess\n\nimport pytest\nimport numpy as np\n\nfrom tools.log_analysis import get_last_epoch_loss, get_last_eval_metric\n\n\ndef test_ldc2d_steady_re10_exit_code():\n \"\"\"\n 测试函数:测试 ldc2d_steady_Re10.py 脚本的退出码是否为 0 以保证可视化文件的正常保存\n \"\"\"\n # 定义变量\n output_dir = \"./ldc2d_steady_Re10\" # 输出目录\n epoch_num = 10 # 迭代次数\n py_version = os.getenv(\"py_version\", \"3.8\") # Python 版本号,从环境变量中获取,默认值为3.8\n\n # 执行命令行命令,运行 ldc2d_unsteady_Re10.py 脚本\n command = f\"python{py_version} ../../examples/ldc/ldc2d_steady_Re10.py \\\n --epochs={epoch_num} \\\n --output_dir={output_dir}\"\n process = subprocess.Popen(command, shell=True)\n\n # 等待脚本执行完成,并返回退出码\n exit_code = process.wait()\n\n # 断言退出码为 0\n assert exit_code == 0\n\n\ndef test_ldc2d_steady_re10_loss():\n \"\"\"\n test loss\n \"\"\"\n epoch_num = 10 # 迭代次数\n output_dir = \"./ldc2d_steady_Re10\" # 输出目录\n base_loss = 56.74956 # 基准损失值\n\n # 获取训练过程的日志文件并计算最后一轮迭代的损失值\n log_file = os.path.join(output_dir, \"train.log\")\n last_loss = get_last_epoch_loss(log_file, epoch_num)\n\n # 断言最后一轮迭代的损失值与基准\n assert np.allclose(float(last_loss), base_loss, rtol=1e-6)\n\n\ndef test_ldc2d_steady_re10_metric():\n \"\"\"\n 测试函数:测试 ldc2d_steady_Re10.py 的评估值\n \"\"\"\n output_dir = \"./ldc2d_steady_Re10\" # 输出目录\n loss_function = \"Residual\" # 损失函数``\n base_metric = 2347.78743 # 基准评估值\n\n # 获取训练过程的日志文件并计算最后一轮迭代的评估值\n log_file = os.path.join(output_dir, \"train.log\")\n last_metric = get_last_eval_metric(log_file, loss_function)\n\n assert np.allclose(float(last_metric), base_metric, rtol=1e-6)\n\n\nif __name__ == \"__main__\":\n # 使用 pytest 模块运行测试函数\n code = pytest.main([sys.argv[0]])\n sys.exit(code)\n","sub_path":"models/PaddleScience/CI/test_models/test_ldc2d_steady_re10.py","file_name":"test_ldc2d_steady_re10.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"110508547","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 27 11:39:00 2017\n\n@author: muratunal\n\nDefinitions and computations\n http://arxiv.org/abs/1212.2831\n \n\"\"\"\n\nimport numpy as np\nfrom trajectory_entropy import local_entropy\n\ndef calculate_alfa_matrix(P,IND,u,d):\n L = np.copy(P)\n UI = IND[u]\n DI = IND[d]\n \n L[UI,:] = np.zeros((1,L.shape[1]))\n L[UI,UI] = 1\n L[DI,:] = np.zeros((1,L.shape[1]))\n L[DI,DI] = 1\n \n perr = [x for x in range(L.shape[0] - 2)]\n if UI < DI:\n perr.insert(UI,L.shape[0] - 2)\n perr.insert(DI,L.shape[0] - 1)\n elif DI < UI:\n perr.insert(DI,L.shape[0] - 1)\n perr.insert(UI,L.shape[0] - 2)\n per = np.argsort(perr)\n A = L[:,per]\n A = A[per,:]\n \n Q = A[0:-2,0:-2] \n I = np.identity(Q.shape[0])\n temp = (I - Q)\n N = np.linalg.inv(temp)\n R = A[0:-2,-2:]\n res = np.dot(N,R)\n \n ALFAIND = {}\n for area in IND.keys():\n ALFAIND[area] = perr[IND[area]]\n \n return res[:,-2],ALFAIND\n\ndef p_transformation(P,IND,s,u,d):\n L = np.copy(P)\n UI = IND[u]\n DI = IND[d]\n \n L[UI,:] = np.zeros((1,L.shape[1]))\n L[UI,UI] = 1\n L[DI,:] = np.zeros((1,L.shape[1]))\n L[DI,DI] = 1\n \n alfas,ALFAIND = calculate_alfa_matrix(P,IND,u,d)\n INDR = {y:x for x,y in IND.items()}\n \n for row in range(L.shape[0]): \n if row != UI and row != DI:\n if alfas[ALFAIND[INDR[row]]] < 1:\n for col in range(L.shape[1]):\n ai = alfas[ALFAIND[INDR[row]]]\n \n if col == UI:\n aj = 1\n elif col == DI:\n aj = 0\n else:\n aj = alfas[ALFAIND[INDR[col]]]\n \n coff = (1-aj)/(1-ai)\n res = coff*L[row,col]\n L[row,col] = res\n \n return L\n\ndef compute_int_ent(P,IND,s,u,d):\n L = p_transformation(P,IND,s,u,d) \n SI = IND[s]\n UI = IND[u]\n DI = IND[d]\n \n L2 = np.copy(L)\n \n partition = L[:,UI]\n pnodes = np.where(partition == 1)[0]\n for node in reversed(pnodes):\n L2 = np.delete(L2, (node), axis=0)\n L2 = np.delete(L2, (node), axis=1)\n \n SI -= sum(i < SI for i in pnodes)\n DI -= sum(i < DI for i in pnodes)\n \n Q = np.delete(L2, (DI), axis=0)\n Q = np.delete(Q, (DI), axis=1)\n I = np.identity(Q.shape[0])\n temp = (I - Q)\n N = np.linalg.inv(temp)\n ent = local_entropy(Q)\n \n if SI > DI:\n SI -= 1\n \n try:\n res = np.dot(N[SI,:],np.transpose(ent[:,0]))\n except:\n print('Problematic path sequence?')\n \n return res\n\ndef compute_ent(P,IND,s,traj,d):\n DI = IND[d]\n \n summation = 0.0 \n traj.insert(0,s)\n\n ULI = IND[traj[-1]]\n\n if len(traj) > 1:\n for k in range(len(traj) - 1):\n u1 = traj[k]\n u2 = traj[k+1]\n try:\n summation += compute_int_ent(P,IND,u1,d,u2)\n except:\n print('Error in Path Calculation.')\n \n Q = np.delete(P, (DI), axis=0)\n Q = np.delete(Q, (DI), axis=1)\n I = np.identity(Q.shape[0])\n temp = (I - Q)\n N = np.linalg.inv(temp)\n ent = local_entropy(Q)\n \n if ULI > DI:\n ULI -= 1\n \n try:\n last = np.dot(N[ULI,:],np.transpose(ent[:,0]))\n except:\n print('Problematic path sequence?')\n \n summation += last\n \n return summation","sub_path":"cond_traj_entropy.py","file_name":"cond_traj_entropy.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"501686888","text":"import csv\nimport os\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom peer_review.decorators.adminRequired import admin_required\nfrom peer_review.forms import DocumentForm, UserForm\nfrom peer_review.models import User, Document\nfrom peer_review.view.userFunctions import user_error\nfrom peer_review.views import generate_otp, hash_password\n\n\n@admin_required\ndef add_csv_info(user_list):\n for row in user_list:\n otp = generate_otp()\n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir)\n file = open(file_path + '/../text/otp_email.txt', 'a+')\n file.seek(0)\n email_text = file.read()\n file.close()\n\n password = hash_password(otp)\n\n user = User(userId=row['user_id'], password=password, status=\"U\", title=row['title'],\n initials=row['initials'], name=row['name'], surname=row['surname'],\n cell=row['cell'], email=row['email'])\n\n user.save()\n return # todo return render request\n\n\n@admin_required\ndef submit_csv(request):\n if not request.user.is_authenticated():\n return user_error(request)\n\n global errortype\n file_path = \"\"\n if request.method == 'POST':\n users = User.objects.all\n user_form = UserForm()\n doc_form = DocumentForm()\n\n module_dir = os.path.dirname(__file__)\n file_path = os.path.join(module_dir)\n file = open(file_path + '/../text/email.txt', 'r+')\n email_text = file.read()\n file.close()\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n newdoc = Document(docfile=request.FILES['docfile'])\n newdoc.save()\n\n file_path = newdoc.docfile.url\n file_path = file_path[1:]\n\n user_list = list()\n error = False\n\n # documents = Document.objects.all()\n\n count = 0\n with open(file_path) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n valid = validate(row)\n count += 1\n if valid == 1:\n # title = row['title']\n # initials = row['initials']\n # name = row['name']\n # surname = row['surname']\n # email = row['email']\n # cell = row['cell']\n #\n # userId = row['user_id']\n # status = row['status']\n # OTP = generate_OTP()\n # generate_otp_email(OTP, name, surname, email, userID)\n # password = hash_password(OTP)\n\n user_list.append(row)\n # ToDo check for errors in multiple rows\n else:\n error = True\n if valid == 0:\n message = \"The format of the CSV is incorrect.\"\n errortype = 0\n return render(request, 'peer_review/userAdmin.html',\n {'message': message,\n 'error': errortype,\n 'users': users,\n 'userForm': user_form,\n 'docForm': doc_form,\n 'email_text': email_text})\n else:\n message = str(count)\n\n rowlist = list()\n rowlist.append(row['title'])\n rowlist.append(row['initials'])\n rowlist.append(row['name'])\n rowlist.append(row['surname'])\n rowlist.append(row['email'])\n rowlist.append(row['cell'])\n rowlist.append(row['user_id'])\n\n if valid == 2:\n errortype = 2\n if valid == 3:\n errortype = 3\n if valid == 4:\n errortype = 4\n\n csvfile.close()\n\n if os.path.isfile(file_path):\n os.remove(file_path)\n\n return render(request, 'peer_review/userAdmin.html',\n {'message': message, \n 'row': rowlist, \n 'error': errortype, \n 'users': users,\n 'userForm': user_form,\n 'docForm': doc_form,\n 'email_text': email_text})\n else:\n form = DocumentForm()\n message = \"Oops! Something seems to be wrong with the CSV file.\"\n errortype = \"No file selected.\"\n return render(request, 'peer_review/csvError.html', {'message': message, 'error': errortype})\n\n if not error:\n #todo: add confirmation dialog, and print out names of new users\n add_csv_info(user_list)\n return render(request, 'peer_review/userAdmin.html',\n {'new_users': user_list, \n 'users': users,\n 'userForm': user_form,\n 'docForm': doc_form,\n 'email_text': email_text})\n\n if os.path.isfile(file_path):\n os.remove(file_path)\n return HttpResponseRedirect('../')\n\n\ndef validate(row):\n # 0 = incorrect number of fields\n # 1 = correct\n # 2 = missing value/s\n # 3 = incorrect format\n # 4 = user already exists\n\n if len(row) < 7:\n return 0\n\n for key, value in row.items():\n if value is None:\n return 2\n\n for key, value in row.items():\n if key == \"cell\":\n try:\n int(value)\n except ValueError:\n return 3\n\n user = User.objects.filter(userId=row['user_id'])\n\n if user.count() > 0:\n return 4\n\n return 1","sub_path":"peer_review/view/userAdmin.py","file_name":"userAdmin.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"252020038","text":"#! /usr/bin/env python\n\nimport rospy\nfrom my_python_class.srv import MyCustomServiceMessage, MyCustomServiceMessageResponse \nfrom bb8_move_circle_class import MoveBB8\n\ndef my_callback(request):\n rospy.loginfo(\"The Service move_bb8_in_circle has been called\")\n movebb8_object = MoveBB8()\n i = 0\n while i < request.duration:\n movebb8_object.move_bb8(0.2, 0.2)\n rate.sleep()\n i += 1\n\n movebb8_object.move_bb8(0, 0) \n rospy.loginfo(\"Finished service move_bb8_in_circle\")\n \n response = MyCustomServiceMessageResponse()\n response.success = True\n return response \n\nrospy.init_node('service_move_bb8_in_circle_server') \nmy_service = rospy.Service('/move_bb8_in_circle', MyCustomServiceMessage, my_callback)\nrate = rospy.Rate(1)\nrospy.loginfo(\"Service /move_bb8_in_circle Ready\")\nrospy.spin() # keep the service open.","sub_path":"Robotics/The Construct/ROS Basics in Python/my_python_class/bb8_move_circle_service_server.py","file_name":"bb8_move_circle_service_server.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"642983418","text":"from chalice import Chalice\nfrom os import environ\nfrom random import uniform\nfrom json import loads\nimport pymysql.cursors\n\napp = Chalice(app_name='dvfaas-event-injection-sqli')\ntopic_arn = environ['SNS_TOPIC']\n\ndb = pymysql.connect(\n host = environ['DB_HOST'],\n user = environ['DB_USER'],\n password = environ['DB_PASS'],\n db = environ['DB_DB']\n)\n\n\n@app.route('/test_insert')\ndef index():\n try:\n with db.cursor() as cur:\n cur.execute(\"INSERT INTO sensor_temperature (sensor_name, temperature) VALUES ('%s', '%s')\" % ('sensor_name_2', '60.0'))\n\n db.commit()\n return {\"success\": \"inserted sensor value\"}\n except Exception as e:\n print(e)\n return {\"error\": e.__str__()}\n\n@app.on_sns_message(topic=topic_arn)\ndef sensor_react(event):\n try:\n event_dict = loads(event.message)\n sensor_name = event.subject\n if 'reading' in event_dict:\n with db.cursor() as cur:\n cur.execute(\"INSERT INTO sensor_temperature (sensor_name, temperature) VALUES ('%s',%s)\" % (sensor_name, event_dict['reading']))\n\n db.commit()\n print(\"Successfully inserted values: {} and {} into the sensor temperature table\".format(sensor_name, event_dict['reading']))\n except Exception as e:\n print(e)","sub_path":"injection/mqtt_rds_sql_event_injection/sqli/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"523756049","text":"import numpy as np\nimport tensorflow as tf\n\n\nclass Sampling(tf.keras.layers.Layer):\n def call(self, inputs):\n z_mean, z_log_var = inputs\n batch = tf.shape(z_mean)[0]\n dim = tf.shape(z_mean)[1]\n epsilon = tf.random.normal(shape=(batch, dim))\n return z_mean + tf.math.exp(.5 * z_log_var) * epsilon\n\n\nclass VAE(tf.keras.models.Model):\n def __init__(self, seq_len, input_dim, hidden_dim, beta, warm_up_iters):\n super(VAE, self).__init__()\n self.hidden_dim = hidden_dim\n self.beta = tf.cast(beta, tf.float32)\n self.warm_up_iters = tf.cast(warm_up_iters, tf.float32)\n self.encoder = tf.keras.models.Sequential([\n tf.keras.layers.Input(shape=(seq_len, input_dim)),\n tf.keras.layers.LSTM(hidden_dim),\n tf.keras.layers.Dropout(.5),\n tf.keras.layers.Dense(hidden_dim * 2)\n ])\n self.decoder = tf.keras.models.Sequential([\n tf.keras.layers.Input(shape=(hidden_dim,)),\n tf.keras.layers.RepeatVector(seq_len),\n tf.keras.layers.LSTM(hidden_dim, return_sequences=True),\n tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(input_dim, activation='sigmoid'))\n ])\n self.train_loss = tf.keras.metrics.Mean(name='train_loss')\n self.train_reconstr_loss = tf.keras.metrics.Mean(name='reconstr_loss')\n self.train_kl_loss = tf.keras.metrics.Mean(name='kl_loss')\n self.dev_loss = tf.keras.metrics.Mean(name='dev_loss')\n\n def call(self, x):\n z_mean, z_log_var = self.encode(x)\n z = self.reparameterize(z_mean, z_log_var)\n return self.decoder(z)\n\n def encode(self, x):\n z_mean, z_log_var = tf.split(self.encoder(x), num_or_size_splits=2, axis=1)\n return z_mean, z_log_var\n\n def reparameterize(self, z_mean, z_log_var):\n batch = tf.shape(z_mean)[0]\n dim = tf.shape(z_mean)[1]\n eps = tf.random.normal(shape=(batch, dim))\n return eps * tf.exp(z_log_var * .5) + z_mean\n\n def compute_loss(self, x):\n def log_normal_pdf(sample, mean, logvar):\n log2pi = tf.math.log(2. * np.pi)\n return tf.reduce_sum(-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi), axis=1)\n mean, logvar = self.encode(x)\n z = self.reparameterize(mean, logvar)\n x_reconstr = self.decoder(z)\n logpx_z = tf.reduce_sum(tf.keras.losses.mse(x, x_reconstr), axis=1)\n logpz = log_normal_pdf(z, 0., 1.)\n logqz_x = log_normal_pdf(z, mean, logvar)\n reconstr_loss = tf.reduce_mean(logpx_z)\n kl_loss = tf.reduce_mean(logqz_x - logpz)\n return reconstr_loss, kl_loss\n\n @tf.function\n def train_step(self, x):\n warmup_coef = tf.math.minimum(tf.cast(self.optimizer.iterations, tf.float32) / self.warm_up_iters, tf.cast(1., tf.float32)) ** 3.\n beta = self.beta * warmup_coef\n with tf.GradientTape() as tape:\n reconstr_loss, kl_loss = self.compute_loss(x)\n loss = reconstr_loss + beta * kl_loss\n gradients = tape.gradient(loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n self.train_loss(loss)\n self.train_reconstr_loss(reconstr_loss)\n self.train_kl_loss(kl_loss)\n return {\n 'loss': self.train_loss.result(),\n 'reconstr_loss': self.train_reconstr_loss.result(),\n 'kl_loss': self.train_kl_loss.result(),\n 'beta': beta\n }\n\n @tf.function\n def test_step(self, x):\n reconstr_loss, kl_loss = self.compute_loss(x)\n loss = reconstr_loss + self.beta * kl_loss\n self.dev_loss(loss)\n return {\n 'loss': self.dev_loss.result()\n }\n","sub_path":"autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"229799106","text":"#!/usr/bin/env python\nimport argparse\nfrom db_scripts.data_load_to_db import load_data_to_sqlite_db\n\nparser=argparse.ArgumentParser()\nparser.add_argument('-d','--db_name', required=True, help='SQLite db name')\nparser.add_argument('-f','--json_data', required=True, help='Message data json file')\nargs=parser.parse_args()\n\ndb_name=args.db_name\njson_data=args.json_data\n\ntry:\n load_data_to_sqlite_db(db_name,json_data)\nexcept Exception as e:\n print('Error: {0}'.format(e))","sub_path":"scripts/data_load_to_db.py","file_name":"data_load_to_db.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"95121660","text":"#!/usr/bin/env python\n\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\nfrom flask_bootstrap import Bootstrap\nfrom flask import Flask, render_template, redirect, url_for, send_from_directory, abort, jsonify\nimport pandas as pd\nimport os\nimport json\ndf = pd.read_table('lisa_results_meta_table_mouse_with_gene_sets.xls')\n\ndf.drop('GEO_id', inplace=True, axis=1)\ndf.drop('DE_col', inplace=True, axis=1)\nindex = [ i for i in range(0, df.shape[0], 2)] \n\ndf = df.iloc[index,:]\ndf = df.iloc[:,:-2]\ninc = pd.read_table('lisa_results_meta_table_mouse_new_selected.xls')\ndf = df.loc[df.ID.isin(inc.loc[:, 'ID']), :]\n\nlabels = [ \"%s_%s\" % (i, j) for i, j in zip(df.iloc[:, 0], df.iloc[:, -1]) ]\nlabels2 = [ \"%s_up\" % (i) for i, j in zip(df.iloc[:, 0], df.iloc[:, -1]) ]\ndf.iloc[:, -1] = list(map(lambda x, y: '{1} & {3}'.format(x, x.split('_')[1], y, y.split('_')[1]), labels, labels2))\ncombined = list(map(lambda x:'' %(x), df.iloc[:,0]))\ndf.drop(['Accession'], inplace=True, axis=1)\n#df.drop(['geo_id'], inplace=True, axis=1)\ndf['geo_id'] = list(map(lambda x:'' %(x, x), df.loc[:, 'geo_id']))\nids = df.ID\ndf.drop(['ID'], inplace=True, axis=1)\ndf['Combined'] = combined\n\ndef generate_page():\n loader = FileSystemLoader('.')\n env = Environment(loader=loader) #autoescape=select_autoescape(['html']))\n template = env.get_template('gallery_template.html')\n #print(template)\n x = template.render(header=df.columns, table=df.values)\n with open('new_gallery_mm.html', 'w') as outf:\n outf.write(x)\n os.system('cp new_gallery_mm.html ../templates/new_gallery_mm.html')\n #os.system('cp new_gallery.html ../templates/new_gallery_mm.html')\n\ndef clean_coef(x, mark, prefix):\n x.columns = ['id', 'coefficients', 'cell_type', 'cell_line', 'tissue']\n print(x.coefficients)\n x.coefficients = x.coefficients.map(lambda x: \"%.2E\" % x)\n x.loc[:, \"coefficient\"] = x.apply(lambda x: \"%s|%s\" % (x[0], x[1]), axis=1)\n x.loc[:, \"download\"] = [\"http://lisa.cistrome.org/gallery/download/%s_gs1.txt.%s.%s.csv\"%(prefix, mark, i) for i in x.id]\n x.drop([\"id\", \"coefficients\"], axis=1, inplace=True)\n x.to_csv(os.path.join('.', '%s.%s.coefs1.csv' % (prefix, mark)), index=False)\n\ndef get_collapse_tf(z, prefix, mark, t):\n a = {}\n p = {}\n \n print(z.head())\n z = z.sort_values('0.1') # pick the top five ones\n \n for i in range(z.shape[0]):\n a[z.iloc[i, 0].split('|')[1]] = a.get(z.iloc[i, 0].split('|')[1], []) + [z.iloc[i,0].split('|')[0]]\n p[z.iloc[i, 0].split('|')[1]] = p.get(z.iloc[i, 0].split('|')[1], []) + [z.iloc[i,1]]\n # p[z.iloc[i, 0].split('|')[1]] = min(p.get(z.iloc[i, 0].split('|')[1],1000), z.iloc[i,1])\n\n out = os.path.join('.', \"%s.%s.%scsv\" % (prefix, mark, t))\n nas = []\n with open(out, 'w') as fout:\n fout.write(\"%s,%s,%s,%s,%s,%s,%s\\n\" % (\"Transcription Factor\", \"1st Sample p-value\", \"2nd Sample p-value\", \"3rd Sample p-value\", \"4th Sample p-value\", \"5th Sample p-value\",\"p\"))\n for j in p:\n # fout.write(\"%s,%s,%s\\n\" % (j, \" | \".join(a[j][:5]), p[j])) # pick the top five ones\n temp = []\n if len(a[j]) < 5:\n nas = (5-len(a[j])) * ['NA']\n else:\n nas = []\n for i, k in list(zip(a[j], p[j]))[:5]:\n temp.append(\"%s;%.2E\" % (i, k))\n temp += nas\n fout.write(\"%s,%s,%s\\n\" % (j.replace(',',' | '), ','.join(temp), min(p[j])))\n z = pd.read_csv(out)\n z = z.sort_values('p')\n z.drop(['p'], axis=1, inplace=True)\n ##z.p = z.p.map(lambda x: \"%.2E\" % x)\n z.to_csv(out, index=False)\n return out\n \ndef generate_htmls(ids):\n bench = '/project/dev/qqin/LISA/lisa_web/figure1/mouse_combined/'\n loader = FileSystemLoader('.')\n env = Environment(loader=loader) #autoescape=select_autoescape(['html']))\n for i in ids:\n template = env.get_template('combined_gallery_multiple_display_mm.html')\n d1 = os.path.join(bench, \"%s_down.gene_symbol_chipseq_cauchy_combine_raw.csv\" % (i))\n d2 = os.path.join(bench, \"%s_up.gene_symbol_chipseq_cauchy_combine_raw.csv\" % (i))\n os.system('cp %s .' %d1)\n os.system('cp %s .' %d2)\n\n if (not os.path.exists(d1)) or (not os.path.exists(d2)):\n template = env.get_template('combined_gallery_single_display_mm.html')\n json_dict = {}\n if not os.path.exists(d1):\n m2 = os.path.join(bench, '%s_up.gene_symbol_motif_cauchy_combine_raw.csv' % i)\n os.system('cp %s .' %d2)\n os.system('cp %s .' %m2)\n\n os.system('zip -r %s.zip %s %s' % (i, os.path.basename(d2), os.path.basename(m2)))\n d2 = pd.read_csv(d2)\n print('%s not exists!' % d1)\n di2 = get_collapse_tf(d2, '%s_up' % i, 'combined', 'chipseq.')\n print(m2)\n m2 = pd.read_csv(m2)\n mo2 = get_collapse_tf(m2, '%s_up' % i, 'combined', 'motif.')\n\n json_dict['status'] = '100%'\n json_dict['result'] = di2\n json_dict['result_2'] = mo2\n json_dict['result_zip'] = '%s.zip' % i\n test = '%s_combined.json' % (i)\n with open('%s_combined.html' % (i), 'w') as fout:\n fout.write(template.render(method='all', task_id=i, labels1='Down-regulated', labels2='Up-regulated', download_zip='%s.zip' % i))\n with open(test, 'w') as jsonf:\n json.dump(json_dict, jsonf)\n continue\n if not os.path.exists(d2):\n m1 = os.path.join(bench, '%s_up.gene_symbol_motif_cauchy_combine_raw.csv' % i)\n os.system('cp %s .' %d1)\n os.system('cp %s .' %m1)\n\n os.system('zip -r %s.zip %s %s' % (i, os.path.basename(d1), os.path.basename(m1)))\n\n with open('%s_combined.html' % (i), 'w') as fout:\n fout.write(template.render(method='all', task_id=i, labels1='Down-regulated', labels2='Up-regulated', download_zip='%s.zip' % i))\n\n d1 = pd.read_csv(d1)\n print('%s not exists!' % d1)\n di1 = get_collapse_tf(d1, '%s_up' % i, 'combined', 'chipseq.')\n m1 = pd.read_csv(m1)\n mo1 = get_collapse_tf(m1, '%s_up' % i, 'combined', 'motif.')\n\n json_dict = {}\n json_dict['status'] = '100%'\n json_dict['result'] = di1\n json_dict['result_2'] = mo1\n json_dict['result_zip'] = '%s.zip' % i\n print('%s not exists!' % d1)\n test = '%s_combined.json' % (i)\n with open(test, 'w') as jsonf:\n json.dump(json_dict, jsonf)\n continue\n with open('%s_combined.html' % (i), 'w') as fout:\n fout.write(template.render(method='all', task_id=i, labels1='Down-regulated', labels2='Up-regulated', download_zip='%s.zip' % i))\n\n m1 = os.path.join(bench, '%s_down.gene_symbol_motif_cauchy_combine_raw.csv' % i)\n m2 = os.path.join(bench, '%s_up.gene_symbol_motif_cauchy_combine_raw.csv' % i)\n os.system('cp %s .' %m1)\n os.system('cp %s .' %m2)\n os.system('zip -r %s.zip %s %s %s %s %s_up.combined.chipseq.csv %s_down.combined.motif.csv %s_up.combined.motif.csv %s_down.combined.motif.csv' % (i, os.path.basename(d1), os.path.basename(d2), os.path.basename(m1), os.path.basename(m2), i, i, i, i))\n\n os.system('python ../../plotly_scatter_oldoutput.py %s %s %s Combined_TR_ChIP-seq down-regulated up-regulated' % (d1, d2, '%s_combined.fig1' % i))\n\n d1 = pd.read_csv(d1)\n d2 = pd.read_csv(d2)\n di1 = get_collapse_tf(d1, '%s_down' % i, 'combined', 'chipseq.')\n di2 = get_collapse_tf(d2, '%s_up' % i, 'combined', 'chipseq.')\n\n json_dict = {}\n json_dict['status'] = '100%'\n json_dict['result'] = di1\n json_dict['result_2'] = di2\n json_dict['result1_fig'] = '%s_combined.fig1.html' % i\n json_dict['result_zip'] = '%s.zip' % i\n\n os.system('python ../../plotly_scatter_oldoutput.py %s %s %s Combined_TR_motif down-regulated up-regulated' % (m1, m2, '%s_combined.fig2' % i))\n\n m1 = pd.read_csv(m1)\n m2 = pd.read_csv(m2)\n mo1 = get_collapse_tf(m1, '%s_down' % i, 'combined', 'motif.')\n mo2 = get_collapse_tf(m2, '%s_up' % i, 'combined', 'motif.')\n json_dict['result2'] = mo1\n json_dict['result2_2'] = mo2\n json_dict['result2_fig'] = '%s_combined.fig2.html' % i\n\n test = '%s_combined.json' % (i)\n with open(test, 'w') as jsonf:\n json.dump(json_dict, jsonf)\n\ngenerate_page()\n#generate_htmls(['307'])\ngenerate_htmls(ids)\n","sub_path":"lisa_web/lisa_web/generate_combined_gallery_mm.py","file_name":"generate_combined_gallery_mm.py","file_ext":"py","file_size_in_byte":9109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"180827945","text":"# Tony Liang and hari Shanmugaraja\n# Feb 9 2020\n# read in text file for lidar and text file for car motor and steering and match them up\n# sample line\n# 1581083570.228828 b'8597 1407\\r\\n'\n#\n#\n# Ultra simple LIDAR data grabber for RPLIDAR.\n# Version: 1.10.0\n# 1582122195.06\n# RPLIDAR S/N: BE9B9AF2C1EA98D4BEEB9CF031483517\n# Firmware Ver: 1.25\n# Hardware Rev: 5\n# RPLidar health status : 0\n# theta: 0.30 Dist: 00781.00 Q: 47\n# theta: 1.05 Dist: 00780.00 Q: 47\n# theta: 1.55 Dist: 00789.00 Q: 47\n# theta: 2.17 Dist: 00782.00 Q: 47\n# theta: 3.09 Dist: 00000.00 Q: 0\n# theta: 3.55 Dist: 01044.00 Q: 47\n\n\n#output: big list: inside list: 360 (theta, dist) tuples followed by motor, steering\nimport sys\nimport random\nimport subprocess\n#import serial\nimport time\nimport os\nimport signal\nimport pickle\n\nfilename = \"steeringdata.txt\"\nprint(\"cardatafile lidardatafile offset -copy and paste: ctd39.txt ltd39.txt .03 pkl39.pkl\")\nthreeinputs = input()\nsplitupinputs = threeinputs.split(\" \")\nlidarFile = splitupinputs[1]\ncarFile = splitupinputs[0]#\"steeringdatacarsample.txt\"\npickleOut = splitupinputs[3]\narduinoTimeToLidarTime = dict()#maps arduino time to closest lidar time\nlidarTimeToArduinoTime = dict()\n\noffset = float(splitupinputs[2]) # offset accounts for time difference between pi and tony's computer - this is\n#tony's computer - pi computer. Positive if tonys computer is ahead\ntimeToValues = dict()#arduino time to steering data\ntimeList = list()#list of times from arduino\n\nlistofMaxes = list()\nlistofMins = list()\n\ntimeListLidar = list()#list of times from pi\ntimeToLidarPoints = dict()#list of times from pi + offset to lidar points\nfinalList = list()#list of lists, each sublist is length 362. 360 (theta,distance) tuples + 2 integers at the end\nfinalListOutput = list()# # list of lists, each sublist is length 2 with outputs only\ndef findClosestTime(time, options):\n absolute_difference_function = lambda list_value: abs(list_value - time)\n\n closest_value = min(options, key=absolute_difference_function)\n\n return closest_value\n\ndef main():\n global timeList, timeToValues\n with open(carFile) as f:\n for line in f.readlines():\n splitupline = line.split(\" \")\n if len(splitupline) == 3:\n time = float(splitupline[0])\n motor = int(splitupline[1].lstrip(\"b'\"))\n steering = int(splitupline[2][0:4])\n timeList.append(time)\n valueList = [motor, steering]#changed for the neural net, all values are divided by 100\n valueTuple = tuple(valueList)\n timeToValues[time] = valueList\n f.close()\n timeList = tuple(timeList)\n #print(timeList)\n #print(timeToValues)\n\n with open(lidarFile) as fl:\n lineNumber = 0\n currentPoints = list()\n currentTime = None\n for line in fl.readlines():\n lineNumber = lineNumber + 1\n strippedLine = line.strip(\" \")\n splitWords = strippedLine.split(\" \")\n if \"theta\" in line and \"S\" not in line:# and len(splitWords)==6:\n #print(splitWords)#debug\n theta = float(splitWords[1])\n dist = float(splitWords[3])\n #pointList = [theta, dist]\n #pointTuple = tuple(pointList)\n currentPoints.append(dist)\n\n elif len(line.split(\" \")) == 1 and len(currentPoints) > 10: #this is the line where only time is printed, reset currentpoints and currenttime\n #print(currentPoints)\n #print(len(currentPoints))\n while len(currentPoints) > 361: #cut currentPoints to exactly 360 or add if youre short\n random_item = random.choice(currentPoints)#361 bc we dump the first data point\n currentPoints.remove(random_item)\n while len(currentPoints) < 361:#rare occasion that points is below 360\n random_item = random.choice(currentPoints)#duplicate some\n currentPoints.append(random_item)\n currentTime = float(line.strip(\"\\n\"))\n timeListLidar.append(currentTime + offset)\n timeToLidarPoints[currentTime + offset] = currentPoints[1:]\n listofMaxes.append(max(currentPoints))\n listofMins.append(min(currentPoints))\n currentPoints = list()\n currentTime = None\n if lineNumber % 10000 == 0 or lineNumber>4430000:\n print(lineNumber)\n#print(\" \")\n #for t in timeToLidarPoints:\n # print(t, timeToLidarPoints[t])\n #print(timeListLidar)\n #print(timeList)\n #print(timeToLidarPoints.keys())\n for t in timeList:#Do not comment out\n closestLidarTime = findClosestTime(t, timeListLidar)\n arduinoTimeToLidarTime[t] = closestLidarTime\n #lidarTimeToArduinoTime[closestLidarTime] = t\n print(\" \")\n #print(arduinoTimeToLidarTime)#continue working here\n #for t in timeToLidarPoints:\n # print (len(timeToLidarPoints[t]))\n\n kingMax = max(listofMaxes)\n kingMin = min(listofMins)\n print(\"max \" + str(kingMax) + \" min \" + str(kingMin))\n\n for t in timeList:#DO not comment out\n if abs(arduinoTimeToLidarTime[t] - t) < .5 + offset:\n #print(t)\n toAppend = timeToLidarPoints[arduinoTimeToLidarTime[t]]# + timeToValues[t]#testing this\n\n\n toAppend = [(number - kingMin) / (kingMax - kingMin) for number in toAppend]\n toAppend = toAppend + timeToValues[t]\n finalListOutput.append(timeToValues[t])#undos the add 0\n finalList.append(toAppend)\n\n\n \"\"\"for finalForm in finalList:\n print(finalForm)\n for finalOutput in finalListOutput:\n print(finalOutput)\"\"\"\n with open(pickleOut, \"wb\") as outfile:\n pickle.dump(finalList, outfile)\n pickle.dump(finalListOutput, outfile)\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"SourceCode/Training_Data/crunchdatapn.py","file_name":"crunchdatapn.py","file_ext":"py","file_size_in_byte":5968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"568736835","text":"import click\nimport os\n@click.command()\n@click.argument('subjects', nargs=-1, required=False, default=None)\n@click.option('-config_file', type=click.Path(exists=True, dir_okay=False, file_okay=True), default=None,\n help='Use a given configuration file. If left blank, uses the default config file, requiring definition of BIDS, working and output directories.')\n@click.option('-submit', is_flag=True, default=False, help='Flag to submit commands to the HPC')\n@click.option('-debug', is_flag=True, help='Flag to enable detailed error messages and traceback')\ndef test_func( config_file=None, subjects=None,submit=False, debug=False):\n ctx = click.get_current_context()\n ctx.info_name\n\n\ndef command_log(config):\n ctx = click.get_current_context()\n print(ctx.info_name)\n print(os.getlogin())\n","sub_path":"clpipe/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"607663198","text":"from Memory import Memories\nfrom Task import RTTask, NonRTTask\nimport sys\nfrom Processor import Processor\n\n\ndef get_configuration(input_file=\"input/input_configuration.txt\"):\n try:\n processor = None\n memories = None\n\n with open(input_file, \"r\", encoding='UTF-8') as f:\n while True:\n line = f.readline()\n if not line:\n break # EOF\n\n line = line.split()\n if len(line) == 0:\n continue\n if line[0] == '##':\n if line[1] == 'Memory':\n memories = get_memory(f)\n elif line[1] == 'Processor':\n processor = get_processor(f)\n elif line[1] == 'Simulation':\n sim_time, verbose = get_sim_time_and_verbose(f)\n\n assert processor, memories\n return sim_time, verbose, processor, memories\n\n except FileNotFoundError:\n print(\"Cannot find {}\".format(input_file))\n sys.exit(0)\n\n\ndef get_processor(f):\n processor = Processor(int(f.readline()))\n while True:\n line = f.readline().split()\n if len(line) == 0:\n break\n processor.insert_processor_mode(*map(float, line))\n return processor\n\n\ndef get_memory(f):\n memories = Memories()\n while True:\n line = f.readline().split()\n if len(line) == 0:\n break\n memories.insert_memory(int(line[1]), *map(float, line[2:]))\n return memories\n\n\ndef get_sim_time_and_verbose(f):\n sim_time = int(f.readline().split()[1])\n verbose = int(f.readline().split()[1])\n return sim_time, verbose\n\n\ndef get_rt_tasks(input_file=\"input_rt_tasks.txt\"):\n try:\n rt_tasks = []\n with open(input_file, \"r\", encoding='UTF-8') as f:\n for i in range(int(f.readline())):\n line = f.readline().split()\n rt_tasks.append(RTTask(i, *map(int, line[:3]), float(line[3])))\n\n return rt_tasks\n\n except FileNotFoundError:\n print(\"Cannot find {}\".format(input_file))\n sys.exit(0)\n\n\ndef get_non_rt_tasks(input_file=\"input_nonrt_tasks.txt\"):\n try:\n non_rt_tasks = []\n with open(input_file, \"r\", encoding='UTF-8') as f:\n for i in range(int(f.readline())):\n line = f.readline().split()\n non_rt_tasks.append(NonRTTask(i + 1, *map(int, line[:3]), float(line[3])))\n\n return non_rt_tasks\n\n except FileNotFoundError:\n print(\"Cannot find {}\".format(input_file))\n sys.exit(0)\n\n\ndef set_ga_results(rt_tasks, input_file=\"input_ga_result.txt\"):\n try:\n with open(input_file, \"r\", encoding='UTF8') as f:\n f.readline()\n max_core, min_core = tuple(map(int, f.readline().split()))\n\n for task in rt_tasks:\n task.ga_processor_modes = [0 for _ in range(max_core + 1)]\n task.ga_memory_modes = [0 for _ in range(max_core + 1)]\n\n for core in range(max_core, min_core - 1, -1):\n f.readline()\n for task in rt_tasks:\n line = list(map(int, f.readline().split()))\n task.ga_processor_modes[core] = line[0]\n task.ga_memory_modes[core] = line[1]\n\n return max_core, min_core\n\n except FileNotFoundError:\n print(\"Cannot find {}\".format(input_file))\n sys.exit(0)","sub_path":"Input.py","file_name":"Input.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"292378090","text":"# ---------------- User Configuration Settings for speed-cam.py ---------------------------------\n# Ver 7.0 speed-cam.py webcam480 Stream Variable Configuration Settings\n\n#######################################\n# speed-cam.py plugin settings\n#######################################\n\n# Calibration Settings\n# --------------------\ncal_obj_px = 180 # Length of a calibration object in pixels\ncal_obj_mm = 4330.0 # Length of the calibration object in millimetres\n\n# Crop Area for motion detection Tracking\n# ---------------------------------------\nx_left = 150 # Exclude event if x less than this px position Default=25\nx_right = 490 # Exclude event if x greater than this px position Default=295\ny_upper = 140 # Exclude event if y less that this value default=100\ny_lower = 340 # Exclude event if y greater than this value default=175\n\n# Motion Event Settings\n# ---------------------\ntrack_len_trig = 75 # Default=75 Length of track to trigger speed photo\ntrack_timeout = 1 # Number of seconds to wait after track End (prevents dual tracking)\nevent_timeout = 2 # Number of seconds to wait for next motion event before starting new track\n\n# Camera Settings\n# ---------------\nWEBCAM = True # default = False False=PiCamera True=USB WebCamera\n\n# Web Camera Settings\nWEBCAM_SRC = 0 # default = 0 USB opencv connection number\nWEBCAM_WIDTH = 640 # default = 640 USB Webcam Image width\nWEBCAM_HEIGHT = 480 # default = 480 USB Webcam Image height\n\n# Camera Image Settings\n# ---------------------\nimage_font_size = 15 # Default = 15 Font text height in px for text on images\nimage_bigger = 1.5 # Default = 1.5 Resize saved speed image by value\n\n# Motion Event Exclusion Settings\n# -------------------------------\nMIN_AREA = 170 # Exclude all contours less than or equal to this sq-px Area\nx_diff_min = 1 # Exclude if min px away exceeds last event x pos\nx_diff_max = 50 # Exclude if max px away for last motion event x pos\n\n# ---------------------------------------------- End of User Variables -----------------------------------------------------\n","sub_path":"plugins/webcam480.py","file_name":"webcam480.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"150555479","text":"from urllib.parse import urljoin\r\n\r\nfrom .core import *\r\nfrom .vparsers import *\r\nfrom .utils import attributeerror_wrapper\r\n\r\n\r\nclass AtriumSkoryszeFloorParser(ValueParser):\r\n \r\n def parse(self, text):\r\n mapper = { \r\n \"0\": 0, \"I\": 1, \"II\": 2, \"III\": 3, \"IV\": 4, \"V\": 5, \"VI\": 6,\r\n \"VII\": 7, \"VIII\": 8, \"IX\": 9, \"X\": 10\r\n }\r\n return mapper.get(text, None)\r\n \r\n \r\nclass AtriumSkoryszeParser(MultipleRequestsGeneratorMixin, MultipleWebpageParser):\r\n url = \"http://www.atriumskorosze.pl/index.php\"\r\n method = \"POST\"\r\n fixed_params = { \"a\": \"wyszukaj\"}\r\n fixed_data = {\r\n \"klatka\": 0,\r\n \"cena\": 0,\r\n \"nowy\": \"Szukaj\"\r\n } \r\n var_data = [ dict(kat=x) for x in [\"M2\", \"M3\", \"M4\", \"M5\"] ]\r\n headers = {\r\n \"Host\": \"www.atriumskorosze.pl\",\r\n \"Referer\": \"http://www.atriumskorosze.pl/index.php?a=wyszukaj\",\r\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0\",\r\n \"Upgrade-Insecure-Requests\": \"1\"\r\n }\r\n middlewares = [ DecodeMiddleware(encoding=None), BeautifulSoupMiddleware() ]\r\n\r\n schema = [\r\n DataUnit(label=\"Typ\", parser=StringParser(), id=\"type\"),\r\n DataUnit(label=\"Klatka\", parser=StringParser(), id=\"entrance\"),\r\n DataUnit(label=\"Piętro\", parser=AtriumSkoryszeFloorParser(), id=\"floor\"),\r\n DataUnit(label=\"Numer\", parser=StringParser(), id=\"number\"),\r\n DataUnit(label=\"Pow. m^2\", parser=AreaParser(), id=\"area\"),\r\n DataUnit(label=\"Cena\", parser=PriceParser(), id=\"price\"),\r\n DataUnit(label=\"Cena m^2\", parser=PriceParser(), id=\"price_m2\"),\r\n DataUnit(label=\"Taras\", parser=NoneParser(), id=\"tarrace_none\"),\r\n DataUnit(label=\"Loggia\", parser=NoneParser(), id=\"loggia_none\"),\r\n DataUnit(label=\"Balkon\", parser=AreaParser(), id=\"balcony\"),\r\n DataUnit(label=\"Zielony taras\", parser=NoneParser(), id=\"garden_none\"),\r\n DataUnit(label=\"None\", parser=NoneParser(), id=\"none\"),\r\n DataUnit(label=\"Plan\", parser=LinkParser(), id=\"plan\")\r\n ]\r\n\r\n @attributeerror_wrapper(return_value=[])\r\n def find_records(self, soup):\r\n return soup.find(\"div\", {\"id\": \"cennik\"}).find(\"table\")\\\r\n .find_all(\"tr\", id=lambda x: x != \"ignore\")\r\n \r\n def split_record(self, record):\r\n return [ td.find(\"a\") or td.text for td in record.find_all(\"td\") ]\r\n \r\n def modify_record(self, record, soup=None):\r\n record[\"plan\"] = urljoin(self.url, record[\"plan\"])\r\n record[\"number\"] = \"{entrance}/{floor}/{number}\".format(**record)\r\n record[\"fid\"] = record[\"number\"]\r\n return record\r\n","sub_path":"parsers/atriumskorysze.py","file_name":"atriumskorysze.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"62"} +{"seq_id":"627028800","text":"\"\"\"\nCompute the data that is used for producing the health report.\n\n\"\"\"\nfrom collections import OrderedDict\nimport datetime\nfrom functools import partial\nimport glob\nimport json\nimport os\nimport logging\nimport shutil\nimport traceback\n\nimport fasteners\nimport tornado.ioloop\n\nimport git\nfrom github import Github\nimport plotly.graph_objs as go\nimport plotly.offline.offline as pl_offline\n\nimport repohealth\nimport repohealth.git\nimport repohealth.github.stargazers\nimport repohealth.github.issues\nimport repohealth.github.emojis\nfrom repohealth.analysis import PLOTLY_PLOTS\n\n\nCACHE_ROOT = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(repohealth.__file__))),\n 'ephemeral_storage')\n\n\nCACHE_EXCEPTION = os.path.join(CACHE_ROOT, '{}.exception.json')\nCACHE_GH = os.path.join(CACHE_ROOT, '{}.github.json')\nCACHE_COMMITS = os.path.join(CACHE_ROOT, '{}.commits.json')\nCACHE_CLONE = os.path.join(CACHE_ROOT, '{}')\nCACHE_PLOTS = os.path.join(CACHE_ROOT, '{}.plots.json')\nSTATUS_FILE = os.path.join(CACHE_ROOT, '{}.status.json')\nSTATUS_LOCK_FILE = os.path.join(CACHE_ROOT, '{}.status.lock.json')\n\n\ndef clear_cache(uuid):\n logging.info(\"Spoiling the cache for {}\".format(uuid))\n if os.path.exists(CACHE_EXCEPTION.format(uuid)):\n os.remove(CACHE_EXCEPTION.format(uuid))\n if os.path.exists(CACHE_GH.format(uuid)):\n os.remove(CACHE_GH.format(uuid))\n if os.path.exists(CACHE_COMMITS.format(uuid)):\n os.remove(CACHE_COMMITS.format(uuid))\n if os.path.exists(CACHE_CLONE.format(uuid)):\n shutil.rmtree(CACHE_CLONE.format(uuid))\n\n\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef no_raise(uuid):\n cache_file = CACHE_EXCEPTION.format(uuid)\n try:\n yield\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception as err:\n result = {'status': getattr(err, 'code', 500),\n 'message': str(err),\n 'traceback': traceback.format_exc()}\n with open(cache_file, 'w') as fh:\n json.dump(result, fh)\n return result\n\n\ndef cache_available(uuid):\n avail = ((os.path.exists(CACHE_GH.format(uuid)) and\n os.path.exists(CACHE_COMMITS.format(uuid))) or\n os.path.exists(CACHE_EXCEPTION.format(uuid)))\n return avail\n\n\ndef in_cache():\n \"\"\"\n Return all of the uuids of packages with sucessful & valid caches.\n\n \"\"\"\n patterns = [CACHE_GH.format('*/*'), CACHE_COMMITS.format('*/*')]\n\n gh = sorted(glob.glob(patterns[0]))\n cm = sorted(glob.glob(patterns[1]))\n\n # One particularly sneaky (and unpleasant) way of getting the uuid from the\n # filename is to inject something that shouldn't be there, and then\n # figure out the indices that we need to pick off...\n split_char = '&/&/&/&'\n gh_pick = CACHE_GH.format(split_char).split(split_char)\n gh = [path[len(gh_pick[0]) : -len(gh_pick[1])] for path in gh]\n cm_pick = CACHE_COMMITS.format(split_char).split(split_char)\n cm = [path[len(gh_pick[0]) : -len(cm_pick[1])] for path in cm]\n \n available = set(gh) & set(cm)\n return sorted(available)\n\n\ndef job_status(uuid):\n status_file = STATUS_FILE.format(uuid)\n if not os.path.exists(status_file):\n status = {}\n else:\n with fasteners.InterProcessLock(STATUS_LOCK_FILE.format(uuid)):\n with open(status_file, 'r') as fh:\n status = json.load(fh)\n return status\n\n\ndef prepare_repo_data(uuid, token):\n # A function that doesn't give you the data, it just makes\n # sure it is all available in the cache.\n result = repo_data(uuid, token)\n status = result.get('status', 200)\n return status\n\n\ndef repo_data(uuid, token):\n def update_status(message=None, clear=False, update=False):\n status_file = STATUS_FILE.format(uuid)\n status_lock = fasteners.InterProcessLock(STATUS_LOCK_FILE.format(uuid))\n\n with status_lock:\n if not os.path.exists(status_file) or clear:\n status = []\n else:\n with open(status_file, 'r') as fh:\n status = json.load(fh)\n\n if status and not update:\n # Log the last status item as complete.\n now = datetime.datetime.utcnow()\n status[-1]['end'] = now.strftime('%Y-%m-%dT%H:%M:%SZ')\n\n # Allow for the option of not adding a status message so that we can\n # call this function to close off the previous message once it is\n # complete.\n if message is not None:\n if update:\n status[-1]['status'] = message\n else:\n now = datetime.datetime.utcnow()\n status.append(dict(start=now.strftime('%Y-%m-%dT%H:%M:%SZ'),\n status=message))\n\n with open(status_file, 'w') as fh:\n json.dump(status, fh)\n\n cache_file = CACHE_EXCEPTION.format(uuid)\n if os.path.exists(cache_file):\n with open(cache_file, 'r') as fh:\n result = json.load(fh)\n return result\n\n with no_raise(uuid):\n cache = CACHE_GH.format(uuid)\n dirname = os.path.dirname(cache)\n # Ensure the storage location exists.\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n if os.path.exists(cache):\n update_status('Load GitHub API data from ephemeral cache', clear=True)\n with open(cache, 'r') as fh:\n report = json.load(fh)\n # We don't stop here - there is more to the report to add...\n else:\n update_status('Initial validation of repo', clear=True)\n g = Github(token)\n gh_repo = g.get_repo(uuid)\n\n # Check that this is actually a valid repository. If not, return a known\n # status so that our report can deal with it with more grace than simply\n # catching the exception.\n try:\n gh_repo.raw_data\n except Exception:\n report = {'status': 404,\n 'message': 'Repository \"{}\" not found.'.format(uuid)}\n with open(CACHE_EXCEPTION.format(uuid), 'w') as fh:\n json.dump(report, fh)\n return report\n\n report = {}\n\n loop = tornado.ioloop.IOLoop()\n\n update_status('Fetching GitHub API data')\n report['repo'] = gh_repo.raw_data\n\n update_status('Fetching GitHub issues data')\n\n issues_fn = partial(repohealth.github.issues.repo_issues, gh_repo, token)\n issues = loop.run_sync(issues_fn)\n user_keys = ['login', 'id']\n issue_keys = ['number', 'comments', 'created_at', 'state', 'closed_at']\n\n def handle_issue(issue):\n return dict(**{'user/{}'.format(key): issue['user'][key]\n for key in user_keys},\n **{key: issue[key] for key in issue_keys})\n report['issues'] = [handle_issue(issue) for issue in issues]\n\n update_status('Fetching GitHub stargazer data')\n stargazers_fn = partial(repohealth.github.stargazers.repo_stargazers,\n gh_repo, token)\n stargazers = loop.run_sync(stargazers_fn)\n\n star_keys = ['starred_at']\n\n def handle_star(star):\n return dict(**{'user/{}'.format(key): star['user'][key]\n for key in user_keys},\n **{key: star[key] for key in star_keys})\n\n report['stargazers'] = [handle_star(stargazer)\n for stargazer in stargazers\n if isinstance(stargazer, dict)]\n\n with open(cache, 'w') as fh:\n json.dump(report, fh)\n\n cache = CACHE_COMMITS.format(uuid)\n if not os.path.exists(cache):\n clone_target = CACHE_CLONE.format(uuid)\n clone_exists = os.path.exists(clone_target)\n\n if clone_exists:\n # For local dev, we just fetch anything that already sits in the ephemeral cache.\n update_status('Fetching remotes from cached clone')\n repo = git.Repo(clone_target)\n for remote in repo.remotes:\n remote.fetch()\n else:\n update_status('Cloning repo')\n\n class Progress(git.remote.RemoteProgress):\n def update(self, op_code, cur_count, max_count=None, message=''):\n if message:\n update_status('Cloning repo: {}'.format(message), update=True)\n\n repo = git.Repo.clone_from(report['repo']['clone_url'], clone_target,\n progress=Progress())\n\n update_status('Analysing commits')\n repo_data = repohealth.git.commits(repo)\n with open(cache, 'w') as fh:\n json.dump(repo_data, fh)\n\n if not clone_exists and os.path.exists(clone_target):\n # This was ours to clone, so nuke it now.\n shutil.rmtree(clone_target)\n\n else:\n update_status('Load commit from ephemeral cache')\n with open(cache, 'r') as fh:\n repo_data = json.load(fh)\n\n # Round off the status so that the last task has an end time.\n update_status()\n\n repo_data['github'] = report\n return repo_data\n\n\ndef visualisations(payload):\n def html(fig):\n config = dict(showLink=False, displaylogo=False)\n plot_html, plotdivid, w, h = pl_offline._plot_html(\n fig, config, validate=True,\n default_width='100%', default_height='100%',\n global_requirejs=False)\n\n script_split = plot_html.find('