diff --git "a/1330.jsonl" "b/1330.jsonl" new file mode 100644--- /dev/null +++ "b/1330.jsonl" @@ -0,0 +1,389 @@ +{"seq_id": "521117128", "text": "import numpy as np\nimport matplotlib.pyplot as plt\na=np.array([[75,89,92,78],[90,76,88,83],[55,99,81,88],[80,67,75,91]])\nb=np.array(['A','B','C','D'])\nc=np.array(['Kor', 'Math', 'Eng', 'Com'])\nA=np.sum(a, axis=1)\nx=np.divide(A,4) #학생평균\nB=np.sum(a, axis=0)\ny=np.divide(B,4) #과목평균\nplt.plot(b,x,'go--') #학생 그래프\nplt.show()\nplt.plot(c,y,'go--') #과목 그래프\nplt.show()\n\n\n", "sub_path": "L211582_실습12_1.py", "file_name": "L211582_실습12_1.py", "file_ext": "py", "file_size_in_byte": 397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.array", "line_number": 3, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "205909887", "text": "import os\nimport setuptools\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, 'README.md')).read()\n\nrequires = [\n 'requests==2.20.0',\n]\n\nsetuptools.setup(name='pijaz-sdk',\n version='0.1',\n description='Pijaz Platform Software Development Kit',\n long_description=README,\n classifiers=[\n \"Programming Language :: Python\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Artistic Software\",\n \"Topic :: Multimedia :: Graphics\",\n \"Topic :: Multimedia :: Graphics :: Editors\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n author='Chad Phillips',\n author_email='chad@pijaz.com',\n url='',\n keywords='pijaz graphics sdk synthesizer platform',\n packages=setuptools.find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n)\n\n", "sub_path": "python/pijaz-sdk/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.abspath", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 11, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "250486314", "text": "## various functions\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n\n# given the ordered list of local contributions\n# returns a list of tuples (filtration, euler characteristic)\n\n\ndef euler_characteristic_list_from_all(local_contributions):\n\n euler_characteristic = []\n\n current_characteristic = 0\n old_f = 0\n\n for filtration, contribution in local_contributions:\n if filtration > old_f:\n euler_characteristic.append([old_f, current_characteristic])\n old_f = filtration\n\n current_characteristic += contribution\n\n # add last contribution\n euler_characteristic.append([filtration, current_characteristic])\n\n return np.array(euler_characteristic)\n\n\n# In[6]:\n\n\n# WARNING\n# when plotting a lot of points, drawing the lines can take some time\n\ndef plot_euler_curve(e_list, with_lines=False, title = None):\n plt.figure()\n plt.scatter([f[0] for f in e_list], [f[1] for f in e_list])\n\n # draw horizontal and vertical lines b/w points\n\n if with_lines:\n #plt.hlines(y = e_list[0][1], xmin=0, xmax=e_list[1][0])\n\n for i in range(1, len(e_list)):\n plt.vlines(x=e_list[i][0], ymin=min(e_list[i-1][1], e_list[i][1]),ymax=max(e_list[i-1][1], e_list[i][1]))\n\n plt.hlines(y=e_list[i-1][1], xmin=e_list[i-1][0], xmax=e_list[i][0])\n\n plt.xlabel(\"filtration\")\n plt.ylabel(\"euler characteristic\")\n plt.title(title)\n\n\n\n\n\n# given the list of changes to the EC and a filtration value\n# returns the EC at that filtration value\ndef EC_at_filtration(ecc_list, f):\n\n ec = ecc_list[0][1]\n\n for current_ec in ecc_list:\n if current_ec[0] > f:\n break\n ec = current_ec[1]\n\n return ec\n\n\n# computes the difference between two ECC from 0 to a max filtration value\ndef difference_ECC(ecc1, ecc2, max_f):\n # find full list of filtration points\n filtration_steps = list(set(([f[0] for f in ecc1] + [f[0] for f in ecc2] + [max_f])))\n filtration_steps.sort()\n\n difference = 0\n\n for i in range(1, len(filtration_steps)):\n if filtration_steps[i] > max_f:\n break\n\n ec_1 = EC_at_filtration(ecc1, filtration_steps[i-1])\n ec_2 = EC_at_filtration(ecc2, filtration_steps[i-1])\n\n difference += abs(ec_1 - ec_2) * (filtration_steps[i] - filtration_steps[i-1])\n\n return difference\n\n\n\n\n\n# # given a distance matrix between two pointclouds\n# # returns the matrix with the optimal 1-haussdorf matching and the H1 distance\n# def hausdorff_1_pointclouds(C):\n# h_matrix = np.zeros(C.shape)\n#\n# minInRows = np.amin(C, axis=1)\n# for i, m in enumerate(minInRows):\n# j = np.where(C[i] == np.amin(m))[0]\n# h_matrix[i, j] = 1\n#\n# return h_matrix, np.sum(ot_h1 * C)\n#\n#\n# # given a distance matrix between two pointclouds\n# # returns the matrix with the optimal 1W and the 1W distance\n# import ot # ot needed to compute wasserstein distance\n# def wasserstein_1_pointclouds(C):\n# ot_emd = ot.emd([], [], C)\n# ot_emd *= ot_emd.shape[0]\n#\n# return ot_emd, np.sum(ot_emd * C)\n", "sub_path": "ecc_utils.py", "file_name": "ecc_utils.py", "file_ext": "py", "file_size_in_byte": 3069, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "477803654", "text": "import glob\nfrom sklearn.utils import shuffle\n\n\ndef get_vehicle_classification_data():\n \"\"\"\n Return list of file paths corresponding to vehicle and non-vehicle images.\n\n All of the images in this dataset have shape, 64 x 64 x 3.\n\n :return: (2-tuple) list of vehicle images, list of non-vehicle images\n \"\"\"\n vehicles = glob.glob(\"vehicles/*/*.png\")\n shuffle(vehicles)\n\n non_vehicles = glob.glob(\"non-vehicles/*/*.png\")\n shuffle(non_vehicles)\n\n return vehicles, non_vehicles", "sub_path": "libp5/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "glob.glob", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 14, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "615313819", "text": "import numpy as np\nfrom rdkit import Chem\nimport os\nimport pickle\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem import Draw\n\ndef get_molecules(filename) :\n if os.path.exists('data/data.dataset') :\n with open('data/data.dataset', 'rb') as f :\n data = pickle.load(f)\n return data['A'], data['X']\n data = list(filter(lambda x: x is not None and x.GetNumAtoms() > 1, Chem.SDMolSupplier(filename)))\n max_atom_nb = max(mol.GetNumAtoms() for mol in data)\n As = np.zeros((len(data), max_atom_nb, max_atom_nb), dtype=int)\n Xs = np.zeros((len(data), max_atom_nb), dtype=int)\n for i, mol in enumerate(data) :\n n = mol.GetNumAtoms()\n bonds = np.array([[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx(), bond.GetBondType()] for bond in mol.GetBonds()]).reshape(-1, 3)\n As[i][bonds[:, 0], bonds[:, 1]] = bonds[:, 2]\n As[i][bonds[:, 1], bonds[:, 0]] = bonds[:, 2]\n Xs[i][:n] = [atom.GetAtomicNum() for atom in mol.GetAtoms()]\n with open('data/data.dataset', 'wb') as f : \n pickle.dump({'A' : As, 'X' : Xs}, f)\n return As, Xs\n\n\ndef matrices2mol(node_labels, edge_labels, strict=False):\n mol = Chem.RWMol()\n for node_label in node_labels:\n mol.AddAtom(Chem.Atom(int(node_label)))\n for start, end in zip(*np.nonzero(edge_labels)):\n if start > end:\n mol.AddBond(int(start), int(end), Chem.BondType.values[edge_labels[start, end]])\n if strict:\n try:\n Chem.SanitizeMol(mol)\n except:\n mol = None\n return mol\n\ndef mols2grid_image(mols, molsPerRow):\n mols = [e if e is not None else Chem.RWMol() for e in mols]\n for mol in mols:\n AllChem.Compute2DCoords(mol)\n return Draw.MolsToGridImage(mols, molsPerRow=molsPerRow, subImgSize=(150, 150))\n\ndef get_valid_scores(mols) :\n isValid = lambda x : x is not None and Chem.MolToSmiles(x) != ''\n return np.array(list(map(isValid, mols)), dtype=np.float32).mean()\n", "sub_path": "utils/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1993, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.exists", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 12, "usage_type": "call"}, {"api_name": "rdkit.Chem.SDMolSupplier", "line_number": 14, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 25, "usage_type": "call"}, {"api_name": "rdkit.Chem.RWMol", "line_number": 30, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 30, "usage_type": "name"}, {"api_name": "rdkit.Chem.Atom", "line_number": 32, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.nonzero", "line_number": 33, "usage_type": "call"}, {"api_name": "rdkit.Chem.BondType", "line_number": 35, "usage_type": "attribute"}, {"api_name": "rdkit.Chem", "line_number": 35, "usage_type": "name"}, {"api_name": "rdkit.Chem.SanitizeMol", "line_number": 38, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 38, "usage_type": "name"}, {"api_name": "rdkit.Chem.RWMol", "line_number": 44, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 44, "usage_type": "name"}, {"api_name": "rdkit.Chem.AllChem.Compute2DCoords", "line_number": 46, "usage_type": "call"}, {"api_name": "rdkit.Chem.AllChem", "line_number": 46, "usage_type": "name"}, {"api_name": "rdkit.Chem.Draw.MolsToGridImage", "line_number": 47, "usage_type": "call"}, {"api_name": "rdkit.Chem.Draw", "line_number": 47, "usage_type": "name"}, {"api_name": "rdkit.Chem.MolToSmiles", "line_number": 50, "usage_type": "call"}, {"api_name": "rdkit.Chem", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "282176452", "text": "from flask import Flask\nfrom websocket_server import start_server\nimport multiprocessing as mp\n\napp = Flask(__name__)\napp.debug = True\n\n@app.route(\"/\", methods = [\"GET\", \"POST\"])\ndef root():\n with open('websocket_client.html', \"r+\") as f:\n return f.read()\n\ndef main():\n app.run(host = \"0.0.0.0\", port = 3742)\n \nif __name__ == \"__main__\":\n main()\n", "sub_path": "web_server.py", "file_name": "web_server.py", "file_ext": "py", "file_size_in_byte": 365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "41394599", "text": "from django.test import TestCase\nfrom django.urls import reverse\nfrom smartHouseApp.models import SmartHouseUser\nfrom smartHouseApp.views import ModbusConnect, ModbusRegister, state_mask_converting, change_room_color\n\n\n# Create your tests here.\nclass ProjectTestCase(TestCase):\n def test_view_redirect(self):\n \"\"\"Проверяем ответ на главную страницу, если с неё идет перенаправление, то код ответа 302\"\"\"\n response = self.client.get('http://127.0.0.1:8000/')\n self.assertEqual(response.status_code, 302)\n\n def test_view_response(self):\n \"\"\"Проверяем ответ на страницу-редирект, код ответа 200\"\"\"\n response = self.client.get(reverse('login'))\n self.assertEqual(response.status_code, 200)\n\n def test_view_no_response(self):\n \"\"\"Проверяем от��ет на несуществующую страницу код ответа 404\"\"\"\n no_response = self.client.get('/post')\n self.assertEqual(no_response.status_code, 404)\n\n def test_create_user(self):\n \"\"\"Проверяем создание обЪекта модели User\"\"\"\n test_user = SmartHouseUser.objects.create_user(username=\"test_user\")\n self.assertEqual(test_user.username, \"test_user\")\n self.assertNotEqual(test_user.username, \"user\")\n\n def test_connection(self):\n \"\"\"Проверяем подключение к COM порту, is_open\n возвращает True, если подключение успешно\"\"\"\n self.modbus_master = ModbusConnect()\n self.modbus_master.connect_modbus()\n self.assertTrue(self.modbus_master.ser.is_open)\n\n def test_create_register(self):\n \"\"\"Проверяем создание объекта регистра\n переменная cmd_light1_on по умолчанию\n создаётся в булевом значении False\"\"\"\n test_register = ModbusRegister(0, \"holding_registers\")\n data = test_register.cmd_light1_on\n self.assertFalse(data)\n\n def test_read_register(self):\n \"\"\"Проверяем чтение регистра\n для этого в ПЛК в 8 регистр\n предварительно записано значение 10\"\"\"\n test_read = ModbusRegister(8, \"input_registers\")\n test_read.setDaemon(True)\n test_read.start()\n test_read.reading_register()\n data = test_read.mask\n self.assertEqual(data, 10)\n\n def test_write_register(self):\n \"\"\"Проверяем запись регистра\n для этого в ПЛК в 0 регистр\n пробуем произвести запись\"\"\"\n test_write = ModbusRegister(0, \"holding_registers\")\n test_write.setDaemon(True)\n test_write.start()\n test_write.reading_register()\n test_write.test_switch_on = True\n test_write.write_register()\n test_write.reading_register()\n data = test_write.test_switch_on\n self.assertTrue(data)\n\n def test_mask_converting(self):\n \"\"\"Проверяем работу нашего конвертора битовых масок\n Число 202 в двоичной системе выглядит как 1100 1010\n Функция возвращает нам булевые значения, поэтому\n правильный результат начиная с первого бита будет:\n False, True, False, True, False, False, True, True\"\"\"\n bit_mask = 202\n self.assertFalse(state_mask_converting(bit_mask, 1))\n self.assertTrue(state_mask_converting(bit_mask, 2))\n self.assertFalse(state_mask_converting(bit_mask, 3))\n self.assertTrue(state_mask_converting(bit_mask, 4))\n self.assertFalse(state_mask_converting(bit_mask, 5))\n self.assertFalse(state_mask_converting(bit_mask, 6))\n self.assertTrue(state_mask_converting(bit_mask, 7))\n self.assertTrue(state_mask_converting(bit_mask, 8))\n\n def test_room_color(self):\n \"\"\"Проверяем работу нашего конвертора значения температуры в RGB цвет\n окраса комнаты, для 25 градусов значение должно быть 255, 200, 0, 0.4\"\"\"\n temperature = 25\n self.assertEqual(change_room_color(temperature), \"255, 200, 0, 0.4\")\n", "sub_path": "smartHouseApp/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 4567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.test.TestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 16, "usage_type": "call"}, {"api_name": "smartHouseApp.models.SmartHouseUser.objects.create_user", "line_number": 26, "usage_type": "call"}, {"api_name": "smartHouseApp.models.SmartHouseUser.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "smartHouseApp.models.SmartHouseUser", "line_number": 26, "usage_type": "name"}, {"api_name": "smartHouseApp.views.ModbusConnect", "line_number": 33, "usage_type": "call"}, {"api_name": "smartHouseApp.views.ModbusRegister", "line_number": 41, "usage_type": "call"}, {"api_name": "smartHouseApp.views.ModbusRegister", "line_number": 49, "usage_type": "call"}, {"api_name": "smartHouseApp.views.ModbusRegister", "line_number": 60, "usage_type": "call"}, {"api_name": "smartHouseApp.views.state_mask_converting", "line_number": 77, "usage_type": "call"}, {"api_name": "smartHouseApp.views.state_mask_converting", "line_number": 78, "usage_type": "call"}, {"api_name": "smartHouseApp.views.state_mask_converting", "line_number": 79, "usage_type": "call"}, {"api_name": "smartHouseApp.views.state_mask_converting", "line_number": 80, "usage_type": "call"}, {"api_name": "smartHouseApp.views.state_mask_converting", "line_number": 81, "usage_type": "call"}, {"api_name": "smartHouseApp.views.state_mask_converting", "line_number": 82, "usage_type": "call"}, {"api_name": "smartHouseApp.views.state_mask_converting", "line_number": 83, "usage_type": "call"}, {"api_name": "smartHouseApp.views.state_mask_converting", "line_number": 84, "usage_type": "call"}, {"api_name": "smartHouseApp.views.change_room_color", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "75618524", "text": "import os\nimport sys\nimport io\nimport unittest\nfrom mock import patch, Mock\nimport tempfile\nimport shutil\n\nimport roslib.stack_manifest\n\nfrom rosdoc_rosorg.megastack import _create_megastack, _get_all_stacks, _generate_files\n\nclass MegastackTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n self.root_directory = tempfile.mkdtemp()\n self.directories = dict(setUp=self.root_directory)\n\n self.create_stack(self.root_directory, 'foo')\n\n @staticmethod\n def create_stack(root_directory, name):\n pack_path = os.path.join(root_directory, name)\n os.makedirs(pack_path)\n mani_path = os.path.join(pack_path, roslib.stack_manifest.STACK_FILE)\n content = \"\"\"\n\n\nBSD\n\nfoo.url\n\n\"\"\"\n with open(mani_path, 'w') as fhand:\n fhand.write(unicode(content))\n\n @classmethod\n def tearDownClass(self):\n for d in self.directories:\n shutil.rmtree(self.directories[d])\n\n def test_get_all_stacks_empty(self):\n repos = []\n all_pkgs = _get_all_stacks(repos, checkouts_dir=self.root_directory)\n self.assertEqual({}, all_pkgs)\n\n def test_get_all_stacks_one(self):\n repomock = Mock()\n repos = [('foo', repomock)]\n all_pkgs = _get_all_stacks(repos, checkouts_dir=self.root_directory)\n self.assertEqual({'foo': ['foo', 'foo', os.path.join(self.root_directory, 'foo')]}, all_pkgs)\n\n def test_create_megastack(self):\n all_stacks = {}\n manifests = {}\n mega_yaml = _create_megastack(all_stacks, manifests)\n self.assertEqual([], mega_yaml)\n\n def test_create_megastack(self):\n foo_attrs = ['foo', 'foorepo', os.path.join(self.root_directory, 'foo')]\n all_stacks = {'foo': foo_attrs}\n manimock = Mock()\n manifests = {'foo': manimock}\n mega_yaml = _create_megastack(all_stacks, manifests)\n self.assertEqual([{'brief': manimock.brief,\n 'description': manimock.description,\n 'name': 'foo',\n 'repo': 'foorepo'}], mega_yaml)\n\n def test_generate_files(self):\n pkgdict = {'foo': 'bar'}\n files = _generate_files(target_dir=self.root_directory,\n mega_yaml=pkgdict)\n self.assertEqual(files, [os.path.join(self.root_directory, 'megastack.yaml')])\n self.assertTrue(os.path.isfile(os.path.join(self.root_directory,\n 'megastack.yaml')))\n", "sub_path": "rosdoc_rosorg/test/local/test_megastack.py", "file_name": "test_megastack.py", "file_ext": "py", "file_size_in_byte": 2668, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "unittest.TestCase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tempfile.mkdtemp", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "roslib.stack_manifest.stack_manifest", "line_number": 26, "usage_type": "attribute"}, {"api_name": "roslib.stack_manifest", "line_number": 26, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 41, "usage_type": "call"}, {"api_name": "rosdoc_rosorg.megastack._get_all_stacks", "line_number": 45, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 49, "usage_type": "call"}, {"api_name": "rosdoc_rosorg.megastack._get_all_stacks", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "rosdoc_rosorg.megastack._create_megastack", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "mock.Mock", "line_number": 63, "usage_type": "call"}, {"api_name": "rosdoc_rosorg.megastack._create_megastack", "line_number": 65, "usage_type": "call"}, {"api_name": "rosdoc_rosorg.megastack._generate_files", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "124088121", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 11 15:05:17 2020\r\n\r\n@author: ANDRE BORGATO MORELLI\r\n\"\"\"\r\n\r\nimport pickle\r\nimport igraph as ig\r\nimport networkx as nx\r\nimport osmnx as ox\r\nimport os\r\nfrom shapely.geometry import Point\r\nfrom shapely.ops import cascaded_union\r\nimport pyproj\r\nfrom functools import partial\r\nfrom shapely.ops import transform, unary_union\r\nimport geopandas as gpd\r\nfrom tools.graph_operations import *\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport random\r\n\r\ndef get_edges_in_route(G, z_origin, z_destination, weight): #função para gerar as rotas de uma zona para outra\r\n origins = [n for n, zone in G.nodes(data=\"zone\") if zone==z_origin]\r\n dests = [n for n, zone in G.nodes(data=\"zone\") if zone==z_destination]\r\n if len(origins)==0 or len(dests)==0:\r\n return[]\r\n while True: # encontra uma rota válida\r\n o = random.choice(origins)\r\n d = random.choice(dests)\r\n \r\n if o==d:\r\n continue\r\n elif not nx.has_path(G, o, d):\r\n continue\r\n break\r\n \r\n path = nx.dijkstra_path(G, o, d, weight) #Nota p/ futuro - trocando para o iGraph isso fica mais rápido\r\n edges = [(path[i], path[i+1], 0) for i in range(len(path)-1)]\r\n return edges\r\n\r\n\r\ndef allocate_od_trips_from_bulk_data(G, od_df, origin_column, destination_column, trip_reason_column = None, \r\n trip_mode_column = None, weight=None, reasons='all', modes='all', k=1):\r\n edge_counts = {}.fromkeys(G.edges,0)\r\n for row in tqdm_notebook(df.index):\r\n \r\n if reasons=='all': # Pula a iteração se o motivo não pertence aos fornecidos\r\n pass\r\n elif (trip_reason_column is not None) and (od_df[trip_reason_column][row] not in reasons):\r\n continue\r\n if modes=='all': # Pula a iteração se o modo não pertence aos fornecidos\r\n pass\r\n elif (trip_reason_column is not None) and (od_df[trip_mode_column][row] not in modes):\r\n continue\r\n for i in range(k):\r\n try:\r\n origin = int(od_df['Zorigem'][row]) \r\n destination = int(od_df['Zdestino'][row])\r\n except ValueError: # alguns valores não podem ser convertidos p/ int como strings com \"#N/A\"\r\n break\r\n edges = get_edges_in_route(G, origin, destination, weight = weight)\r\n for edge in edges: #adiciona as arestas da rota na contagem total\r\n edge_counts[edge] += 1\r\n return edge_counts", "sub_path": "tools/od.py", "file_name": "od.py", "file_ext": "py", "file_size_in_byte": 2537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "random.choice", "line_number": 31, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 32, "usage_type": "call"}, {"api_name": "networkx.has_path", "line_number": 36, "usage_type": "call"}, {"api_name": "networkx.dijkstra_path", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "215666081", "text": "'''\nCreated on 06-Jun-2018\n\n@author: Pritika\n'''\nimport paramiko\nfrom sys import stdin, stdout, stderr\n\nclass CustomSSHClient:\n def __init__(self,host,port=22,user=None,pwd=None):\n self.host =host\n self.user = user\n self.port =port\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh.connect(self.host, self.port, self.user, pwd)\n \n def check_output(self,cmd):\n stdin,stdout,stderr = self.ssh.exec_command(cmd)\n return stdout.read().decode('ascii') #convert byte string to unicode\n \n def __del__(self):\n self.ssh.close()\n\n\nif __name__ == '__main__':\n ssh = CustomSSHClient('****.info',user='training', pwd='training')\n op = ssh.check_output('lscpu')\n print(op)\n \n\n ", "sub_path": "test/objoriented/psoopsssh.py", "file_name": "psoopsssh.py", "file_ext": "py", "file_size_in_byte": 817, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "paramiko.SSHClient", "line_number": 14, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 19, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 19, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 19, "usage_type": "name"}, {"api_name": "sys.stdout.read", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "308500645", "text": "\"\"\" This file defines utilities for the ROS agents. \"\"\"\nimport numpy as np\n\nimport rospy\n\nfrom gps.algorithm.policy.lin_gauss_policy import LinearGaussianPolicy\nfrom gps_agent_pkg.msg import ControllerParams, LinGaussParams, TfParams, CaffeParams, TfActionCommand\nfrom gps.sample.sample import Sample\nfrom gps.proto.gps_pb2 import LIN_GAUSS_CONTROLLER, CAFFE_CONTROLLER, TF_CONTROLLER\nimport logging\nLOGGER = logging.getLogger(__name__)\n\ndef msg_to_sample(ros_msg, agent):\n \"\"\"\n Convert a SampleResult ROS message into a Sample Python object.\n \"\"\"\n sample = Sample(agent)\n for sensor in ros_msg.sensor_data:\n sensor_id = sensor.data_type\n shape = np.array(sensor.shape)\n data = np.array(sensor.data).reshape(shape)\n sample.set(sensor_id, data)\n return sample\n\n\ndef policy_to_msg(policy, noise, use_TfController=False):\n \"\"\"\n Convert a policy object to a ROS ControllerParams message.\n \"\"\"\n msg = ControllerParams()\n if use_TfController:\n msg.controller_to_execute = TF_CONTROLLER\n msg.tf = TfParams()\n msg.tf.dU = policy.dU\n elif isinstance(policy, LinearGaussianPolicy):\n msg.controller_to_execute = LIN_GAUSS_CONTROLLER\n msg.lingauss = LinGaussParams()\n msg.lingauss.dX = policy.dX\n msg.lingauss.dU = policy.dU\n msg.lingauss.K_t = \\\n policy.K.reshape(policy.T * policy.dX * policy.dU).tolist()\n msg.lingauss.k_t = \\\n policy.fold_k(noise).reshape(policy.T * policy.dU).tolist()\n else:\n raise NotImplementedError(\"Caffe not imported or Unknown policy object: %s\" % policy)\n return msg\n\n\ndef tf_policy_to_action_msg(deg_action, action, action_id):\n \"\"\"\n Convert an action to a TFActionCommand message.\n \"\"\"\n msg = TfActionCommand()\n msg.action = action.tolist()\n msg.dU = deg_action\n msg.id = action_id\n return msg\n\n\ndef tf_obs_msg_to_numpy(obs_message):\n # ToDo: Reshape this if needed.\n return np.array(obs_message.data)\n\n\nclass TimeoutException(Exception):\n \"\"\" Exception thrown on timeouts. \"\"\"\n def __init__(self, sec_waited):\n Exception.__init__(self, \"Timed out after %f seconds\", sec_waited)\n\n\nclass ServiceEmulator(object):\n \"\"\"\n Emulates a ROS service (request-response) from a\n publisher-subscriber pair.\n Args:\n pub_topic: Publisher topic.\n pub_type: Publisher message type.\n sub_topic: Subscriber topic.\n sub_type: Subscriber message type.\n \"\"\"\n def __init__(self, pub_topic, pub_type, sub_topic, sub_type):\n self._pub = rospy.Publisher(pub_topic, pub_type)\n self._sub = rospy.Subscriber(sub_topic, sub_type, self._callback)\n\n self._waiting = False\n self._subscriber_msg = None\n\n def _callback(self, message):\n if self._waiting:\n self._subscriber_msg = message\n self._waiting = False\n\n def publish(self, pub_msg):\n \"\"\" Publish a message without waiting for response. \"\"\"\n self._pub.publish(pub_msg)\n\n def publish_and_wait(self, pub_msg, timeout=5.0, poll_delay=0.01,\n check_id=False):\n \"\"\"\n Publish a message and wait for the response.\n Args:\n pub_msg: Message to publish.\n timeout: Timeout in seconds.\n poll_delay: Speed of polling for the subscriber message in\n seconds.\n check_id: If enabled, will only return messages with a\n matching id field.\n Returns:\n sub_msg: Subscriber message.\n \"\"\"\n if check_id: # This is not yet implemented in C++.\n raise NotImplementedError()\n\n self._waiting = True\n self.publish(pub_msg)\n\n time_waited = 0\n while self._waiting:\n rospy.sleep(poll_delay)\n time_waited += 0.01\n if time_waited > timeout:\n raise TimeoutException(time_waited)\n return self._subscriber_msg\n", "sub_path": "python/gps/agent/ros_jaco/ros_utils.py", "file_name": "ros_utils.py", "file_ext": "py", "file_size_in_byte": 4029, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "gps.sample.sample.Sample", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "gps_agent_pkg.msg.ControllerParams", "line_number": 30, "usage_type": "call"}, {"api_name": "gps.proto.gps_pb2.TF_CONTROLLER", "line_number": 32, "usage_type": "name"}, {"api_name": "gps_agent_pkg.msg.TfParams", "line_number": 33, "usage_type": "call"}, {"api_name": "gps.algorithm.policy.lin_gauss_policy.LinearGaussianPolicy", "line_number": 35, "usage_type": "argument"}, {"api_name": "gps.proto.gps_pb2.LIN_GAUSS_CONTROLLER", "line_number": 36, "usage_type": "name"}, {"api_name": "gps_agent_pkg.msg.LinGaussParams", "line_number": 37, "usage_type": "call"}, {"api_name": "gps_agent_pkg.msg.TfActionCommand", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "rospy.Publisher", "line_number": 82, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 83, "usage_type": "call"}, {"api_name": "rospy.sleep", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "197326725", "text": "# -.- encoding: utf-8 -.-\n#\n# RESTful API\n#\n\nimport json\n\nfrom time import ctime\n\nfrom bottle import hook\nfrom bottle import route\nfrom bottle import request\nfrom bottle import response\n\nfrom db import cur\n\nAPI_VERSION = 0.1\n\n@hook('after_request')\ndef enable_cors():\n response.headers['Access-Control-Allow-Origin'] = '*'\n\n@route('/api', methohd='GET')\n@route('/api/status', method='GET')\ndef api_status():\n return {'api_version':API_VERSION, 'server_status':'online', 'server_time': ctime()}\n\n@route('/api/summary/:target/:query', method='GET')\ndef api_summary(target, query):\n responseData = gen_summary_json(target, query)\n\n response.content_type = 'application/json'\n return json.dumps(responseData)\n\ndef _summary_dev_send():\n \"\"\"統計開發單位送出審核案件數\n\n 以開發單位提列環評案件數,得知有哪些營利單位或政府單位,對環境異動有極大影響作用。\n \"\"\"\n\n sql_script = \"\"\"\n SELECT devunit, count(*) count FROM details\n WHERE devunit != \"\"\n GROUP BY devunit ORDER BY count DESC\n LIMIT 10\n \"\"\"\n cur.execute(sql_script)\n row = cur.fetchall()\n\n return row\n\ndef _summary_dev_pass():\n \"\"\"統計開發單位送出審核案件通過數\n\n 總計營利單位或政府單位通過提列環評案件。\n \"\"\"\n\n sql_script = \"\"\"\n SELECT devunit, count(*) count FROM details\n WHERE devunit != \"\" AND TRIM(examinestatus) in (\n '審核修正通過',\n '審核通過',\n '有條件通過環境影響評估',\n '通過環境影響評估審查'\n )\n GROUP BY devunit ORDER BY count DESC\n LIMIT 10\n \"\"\"\n\n cur.execute(sql_script)\n row = cur.fetchall()\n return row\n\ndef gen_summary_json(target,query):\n json_data = {\n 'city': {\n 'area': [\n {\n \"city\" : \"台北市\",\n \"area\" : 100,\n \"unit\" : \"公頃\"\n },\n {\n \"city\" : \"新北市\",\n \"area\" : 80,\n \"unit\" : \"公頃\"\n }\n ]\n },\n 'dev': {\n 'send' : _summary_dev_send(),\n 'pass' : _summary_dev_pass()\n }\n }\n\n if json_data.has_key(target) and json_data[target].has_key(query):\n return json_data[target][query]\n\n return {}\n\n", "sub_path": "routes/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 2474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "bottle.response.headers", "line_number": 21, "usage_type": "attribute"}, {"api_name": "bottle.response", "line_number": 21, "usage_type": "name"}, {"api_name": "bottle.hook", "line_number": 19, "usage_type": "call"}, {"api_name": "time.ctime", "line_number": 26, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 23, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 24, "usage_type": "call"}, {"api_name": "bottle.response.content_type", "line_number": 32, "usage_type": "attribute"}, {"api_name": "bottle.response", "line_number": 32, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 28, "usage_type": "call"}, {"api_name": "db.cur.execute", "line_number": 47, "usage_type": "call"}, {"api_name": "db.cur", "line_number": 47, "usage_type": "name"}, {"api_name": "db.cur.fetchall", "line_number": 48, "usage_type": "call"}, {"api_name": "db.cur", "line_number": 48, "usage_type": "name"}, {"api_name": "db.cur.execute", "line_number": 70, "usage_type": "call"}, {"api_name": "db.cur", "line_number": 70, "usage_type": "name"}, {"api_name": "db.cur.fetchall", "line_number": 71, "usage_type": "call"}, {"api_name": "db.cur", "line_number": 71, "usage_type": "name"}]} +{"seq_id": "236053288", "text": "import torch\nfrom torch.autograd import Variable\nimport os\nimport numpy as np\nimport scipy\nimport scipy.misc\n\ndef to_var(x, volatile=False):\n if torch.cuda.is_available():\n # print('cuda')\n x = x.cuda()\n return Variable(x, volatile=volatile)\n\ndef idx2onehot(idx, n):\n\n assert idx.size(1) == 1\n assert torch.max(idx).data[0] < n\n\n onehot = torch.zeros(idx.size(0), n)\n onehot.scatter_(1, idx.cpu().data, 1)\n onehot = to_var(onehot)\n \n return onehot\n\ndef save_img(args, x, num_iter, recon=False):\n display_row = 5\n display_col = 5\n\n if not(os.path.exists(args.figroot)):\n os.mkdir(os.path.join(args.figroot))\n save_root = os.path.join(args.figroot, args.data)\n if not(os.path.exists(save_root)):\n os.mkdir(os.path.join(save_root))\n path_recon = os.path.join(save_root, 'recon')\n if not(os.path.exists(path_recon)):\n os.mkdir(path_recon)\n\n img_sz = args.img_size\n if args.img_channel == 1:\n fig_img = np.zeros((img_sz*display_row, img_sz*display_col))\n else:\n fig_img = np.zeros((img_sz*display_row, img_sz*display_col, 3))\n \n x = x.view(x.size(0), args.img_channel, img_sz, img_sz).data.cpu().detach().numpy()\n x = x.transpose(0, 2, 3, 1)\n\n for row in range(display_row):\n for col in range(display_col):\n t = row*args.num_labels + col\n if t >= x.shape[0]:\n continue\n\n if args.img_channel == 1:\n fig_img[row*img_sz:(row+1)*img_sz, col*img_sz:(col+1)*img_sz] = x[t,:,:,0]\n else:\n fig_img[row*img_sz:(row+1)*img_sz, col*img_sz:(col+1)*img_sz, :] = x[t,:,:,:]\n if recon == False:\n scipy.misc.imsave(os.path.join(save_root, str(num_iter)+'.jpg'), fig_img)\n else:\n scipy.misc.imsave(os.path.join(path_recon, str(num_iter)+'_r.jpg'), fig_img)\n\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1901, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torch.cuda.is_available", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.misc.imsave", "line_number": 58, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "scipy.misc.imsave", "line_number": 60, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "388710713", "text": "from django import template\nfrom datetime import datetime, timedelta\n\nregister = template.Library()\n\n\n@register.filter\ndef format_date(value):\n date = datetime.fromtimestamp(int(value))\n date_now = datetime.today()\n res = date_now - date\n if res.total_seconds()/60 < 10:\n return 'Только что'\n elif res.total_seconds()/60 > 10 and res.total_seconds()/3600 < 24:\n return f'{int(res.total_seconds()/3600)} часов назад'\n else:\n return date\n\n\n@register.filter\ndef score_filter(value):\n if int(value) <= -5:\n return 'Все пло��о'\n elif int(value) > -5 and int(value) <=5:\n return 'Нейтрально'\n else:\n return 'Хорошо'\n\n\n@register.filter\ndef format_num_comments(value):\n if int(value) == 0:\n return 'Остаьвте комментарий'\n elif int(value) > 0 and int(value) <= 50:\n return value\n else:\n return '50+'\n\n@register.filter\ndef format_selftext(value, count):\n if value == '':\n return ''\n else:\n value = value.split(' ')\n if len(value) > 2 * count:\n start = ' '.join(value[0:count])\n end = value[-count:]\n end = ' '.join(end)\n return f'{start} ... {end}'\n elif len(value) == 1:\n return f'{value[0]}'\n else:\n count = int(len(value) / 2)\n start = ' '.join(value[0:count])\n end = value[-count:]\n end = ' '.join(end)\n if count == 1:\n return f'{start} {end}'\n else:\n return f'{start} ... {end}'\n\n\n", "sub_path": "dynamic-templates/task3/app/templatetags/news_filters.py", "file_name": "news_filters.py", "file_ext": "py", "file_size_in_byte": 1628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.template.Library", "line_number": 4, "usage_type": "call"}, {"api_name": "django.template", "line_number": 4, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "132158122", "text": "#!/usr/bin/python\n# -*-coding:Utf-8 -*\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize as opt\nimport math\nimport deformAux as aux\nimport outils.affiche as disp\nimport outils.outils as tools\n\n\"\"\"\n.. module:: deformAlgo\n\n\"\"\"\n\ndef algo(forme,cible,line,nb_iterations,mu_coef,beta_coef):\n\n \"\"\"\n Applique l'algorithme de déformation et retourne la ligne obtenue\n\n Décommenter les lignes 68 à 75 pour afficher sur l'interface les cibles des agrandissements et réductions\n \n Utiliser coef = coefmax, ligne 59, au lieu de opt.fminbound(...), ligne 58, donnera un résultat plus rapidement, la courbe sera cependant moins lisse (les déformations se font de manière plus prononcées).\n\n :param forme: Forme à envoyer sur la cible pour calculer la déformation \n :type forme: numpy.array\n :param cible: Forme cible\n :type cible: numpy.array\n :param line: Ligne à déformer\n :type line: numpy.array\n :param nb_iterations: Nombre d'itérations\n :type nb_iterations: int\n :param mu_coef: mu dans l'algorithme, l'augmenter \"aplatira\" la gaussienne\n :type mu_coef: float\n :param beta_coef: beta dans l'algorithme\n :type beta_coef: float\n :rtype: numpy.array\n \"\"\"\n\n frm = np.copy(forme)\n\n centers = np.zeros((nb_iterations,2))\n targets = np.zeros((nb_iterations,2))\n coefs = np.zeros(nb_iterations)\n\n for j in range(nb_iterations):\n\t # calcul de m (argmax)\n\t S = np.sum((forme - cible)**2, axis=1)\n\t idmax = S.argmax()\n\t themax = S[idmax]\n\t # calcul de pj, q et vj\n\t f = lambda x: np.linalg.norm(aux.func_iter(forme[idmax,:], cible[idmax,:], forme, (1.0/beta_coef), x) - cible)\n\t coefmax = mu_coef * 1.0/(np.sqrt(2) * np.exp(-0.5) * np.linalg.norm(cible[idmax,:] - forme[idmax,:]))\n\t # calcul de rhoj\n\t coef = opt.fminbound(func = f, x1 = 0.0, x2 = min(coefmax, 50), disp = 0)\n\t #coef = coefmax\n\t centers[j,:] = forme[idmax,:]\n\t targets[j,:] = cible[idmax,:]\n\t coefs[j] = coef\n\t # calcul de psi(Z)\n\t forme = aux.func_iter(forme[idmax,:], cible[idmax,:], forme, 1.0/beta_coef, coef)\n fun = lambda pt: aux.func_result(centers, targets, coefs, (1.0/beta_coef), nb_iterations, pt);\n fun_reverse = lambda pt: aux.func_result_reverse(centers, targets, coefs, (1.0/beta_coef), nb_iterations, pt);\n\t\t\n '''\n obs = np.array([fun(pt) for pt in frm])\n\n newobs = np.array([fun_reverse(pt) for pt in cible])\n\n plt.plot(obs[:,0],obs[:,1],'brown')\n plt.plot(newobs[:,0],newobs[:,1],'orange')\n '''\n\n PT1 = fun_reverse(line[0][:])\n PT2 = fun_reverse(line[-1][:])\n\n trans0 = [-(PT1[0]-line[0][0]),-(PT1[1]-line[0][1])]\n transN = [-(PT2[0]-line[-1][0]),-(PT2[1]-line[-1][1])]\n\n N = len(line)\n n = float(N)\n\n Z_rev = np.zeros((N,2))\n for i in range(N):\n\t Z_rev[i] = fun_reverse(line[i])\n\n res = np.array([[Z_rev[i][0]+(i/n)*transN[0]+((n-i)/n)*trans0[0],Z_rev[i][1]+(i/n)*transN[1]+((n-i)/n)*trans0[1]] for i in range(N)])\n\n return res\n", "sub_path": "source/algo/deformAlgo.py", "file_name": "deformAlgo.py", "file_ext": "py", "file_size_in_byte": 3008, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.copy", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 53, "usage_type": "attribute"}, {"api_name": "deformAux.func_iter", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 54, "usage_type": "attribute"}, {"api_name": "scipy.optimize.fminbound", "line_number": 56, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 56, "usage_type": "name"}, {"api_name": "deformAux.func_iter", "line_number": 62, "usage_type": "call"}, {"api_name": "deformAux.func_result", "line_number": 63, "usage_type": "call"}, {"api_name": "deformAux.func_result_reverse", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "560333922", "text": "\"\"\"\nQ347\nTop K Frequent Elements\nMedium\n\n\n\nGiven an integer array nums and an integer k, return the k most\nfrequent elements. You may return the answer in any order.\n\nConstraints:\n\n1 <= nums.legth <= 105\nk is in the range [1, the number of unique elements in the array].\nIt is guaranteed that the answer is unique.\n\n\nFollow up: Your algorithm's time complexity must be better than O(n log n),\n where n is the array's size.\n\n\n\"\"\"\n\nfrom typing import List\n\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n\n # heap using built-in\n from collections import Counter\n import heapq\n counter = Counter(nums)\n\n heap = [key for key in counter]\n for i in range(k):\n heapq.heapify(heap[i:])\n\n return heapq.nlargest(k, heap, key=lambda x: counter[x])\n\n\n\n\nnums = [3,0,1,0]\nk = 2\nnums2 = [1,1,1,2,2,3]\nk2 = 2\n\nnums3= [4,1,-1,2,-1,2,3]\nk3 = 2\n\nnums4 = [2,3,4,1,4,0,4,-1,-2,-1]\nk4 = 2\nsol = Solution()\nprint(sol.topKFrequent(nums4, k4))\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "Q347-v3.py", "file_name": "Q347-v3.py", "file_ext": "py", "file_size_in_byte": 1026, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 32, "usage_type": "call"}, {"api_name": "heapq.heapify", "line_number": 36, "usage_type": "call"}, {"api_name": "heapq.nlargest", "line_number": 38, "usage_type": "call"}, {"api_name": "{'Counter': 'collections.Counter', 'heapq': 'heapq'}", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "448932789", "text": "import abp\nimport board\nimport reflex\n\n\ndef change_turn(turn, maximizing):\n if turn == 0:\n return 1, not maximizing\n else:\n return 0, not maximizing\n\n\nif __name__ == \"__main__\":\n win = 0\n reflex_board = reflex.ChessBoard()\n reflex_agent = reflex.reflexAgent(0)\n abp_board = board.Board()\n abp_agent = abp.AlphaBeta(\"o\")\n maximizing = False\n turn = 1\n reflex_board.first_move(0)\n abp_board.placeMove((0, 0), \"x\")\n print(\"Reflex Moves:\")\n reflex_board.print_board()\n \"\"\"\"\n first_spot = abp_board.first_move(\"o\")\n reflex_board.move(first_spot, 1)\n print(\"Alpha-Beta Moves:\")\n reflex_board.print_board()\n \"\"\"\n while win == 0:\n if turn == 0:\n print(\"Reflex Moves:\")\n (win, spot) = reflex_agent.reflex_move(reflex_board)\n if win != 2:\n abp_board.placeMove(spot, \"x\")\n else:\n print(\"Alpha-Beta Moves:\")\n spot = abp_agent.alphabeta(abp_board, 3, -100000, 100000, maximizing)\n abp_board.placeMove(spot, \"o\")\n reflex_board.move(spot, 1)\n if abp_board.endGame():\n win = 1\n print(\"Alpha-Beta wins\")\n (turn, maximizing) = change_turn(turn, maximizing)\n reflex_board.print_board()\n if win == 2:\n print(\"Tie\")\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1340, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "reflex.ChessBoard", "line_number": 15, "usage_type": "call"}, {"api_name": "reflex.reflexAgent", "line_number": 16, "usage_type": "call"}, {"api_name": "board.Board", "line_number": 17, "usage_type": "call"}, {"api_name": "abp.AlphaBeta", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "572637811", "text": "# -*- coding: utf-8 -*-\n\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom nessie.items import Article\n\n\nclass ThanhnienSpider(scrapy.Spider):\n name = 'thanhnien'\n allowed_domains = ['thanhnien.com.vn']\n start_urls = ['http://www.thanhnien.com.vn']\n link_extractor = LinkExtractor(allow_domains=allowed_domains)\n\n def parse(self, response):\n \"\"\"Parse the front page for article URLs.\"\"\"\n for link in self.link_extractor.extract_links(response):\n yield scrapy.Request(link.url, callback=self.parse_article)\n\n def parse_article(self, response):\n article = Article()\n article['url'] = response.url\n article['title'] = response.css('title::text').extract_first()\n article['author'] = response.css('.article-ds strong::text').extract_first()\n article['date'] = response.css('.date-line::text').extract_first()\n article['content'] = response.css('.article-content div::text').extract()\n return article\n", "sub_path": "nessie/spiders/thanhnien_spider.py", "file_name": "thanhnien_spider.py", "file_ext": "py", "file_size_in_byte": 1005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "scrapy.Spider", "line_number": 8, "usage_type": "attribute"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 12, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 17, "usage_type": "call"}, {"api_name": "nessie.items.Article", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "348456546", "text": "import os\nimport logging\n\nimport unittest2\nfrom boto import cloudformation\nfrom boto.ec2 import autoscale\nfrom boto.exception import BotoServerError\n\nfrom cfn_sphere import StackActionHandler\nfrom cfn_sphere.stack_configuration import Config\n\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(logging.INFO)\nlogging.getLogger('cfn_sphere').setLevel(logging.DEBUG)\n\n\ndef get_resources_dir():\n return os.path.join(os.path.dirname(__file__), '../resources')\n\n\ndef verify_stacks_are_gone(cfn_conn, config):\n for stack_name in config.stacks.keys():\n try:\n stack = cfn_conn.describe_stacks(stack_name)[0]\n if stack.stack_status != \"DELETE_COMPLETE\":\n raise Exception(\"Stack {0} seems to exist but should not\".format(stack_name))\n except BotoServerError:\n pass\n\n\ndef get_output_dict_from_stack(stack):\n result = {}\n for output in stack.outputs:\n result[output.key] = output.value\n return result\n\n\ndef get_parameter_dict_from_stack(stack):\n result = {}\n for parameter in stack.parameters:\n result[parameter.key] = parameter.value\n return result\n\n\nclass CreateStacksTest(unittest2.TestCase):\n @classmethod\n def setUpClass(cls):\n test_resources_dir = get_resources_dir()\n cls.cfn_conn = cloudformation.connect_to_region(\"eu-west-1\")\n cls.config = Config(config_file=os.path.join(test_resources_dir, \"stacks.yml\"))\n cls.stack_handler = StackActionHandler(cls.config)\n\n LOGGER.info(\"Syncing stacks\")\n cls.stack_handler.create_or_update_stacks()\n\n @classmethod\n def tearDownClass(cls):\n LOGGER.info(\"Cleaning up\")\n cls.stack_handler.delete_stacks()\n verify_stacks_are_gone(cls.cfn_conn, cls.config)\n\n def test_stacks_are_in_create_complete_state(self):\n LOGGER.info(\"Verifying stacks are in CREATE_COMPLETE state\")\n\n for stack_name in self.config.stacks.keys():\n stack = self.cfn_conn.describe_stacks(stack_name)[0]\n self.assertEqual(\"CREATE_COMPLETE\", stack.stack_status)\n\n def test_instance_stack_uses_vpc_outputs(self):\n vpc_stack = self.cfn_conn.describe_stacks(\"cfn-sphere-test-vpc\")[0]\n instance_stack = self.cfn_conn.describe_stacks(\"cfn-sphere-test-instances\")[0]\n\n vpc_stack_outputs = get_output_dict_from_stack(vpc_stack)\n instance_stack_parameters = get_parameter_dict_from_stack(instance_stack)\n\n self.assertEqual(vpc_stack_outputs[\"id\"], instance_stack_parameters[\"vpcID\"])\n self.assertEqual(vpc_stack_outputs[\"subnet\"], instance_stack_parameters[\"subnetID\"])\n\n def test_userdata(self):\n autoscale_conn = autoscale.connect_to_region(\"eu-west-1\")\n instance_stack_resources = self.cfn_conn.describe_stack_resource(\"cfn-sphere-test-instances\", \"lc\")\n lc_name = \\\n instance_stack_resources[\"DescribeStackResourceResponse\"][\"DescribeStackResourceResult\"][\n \"StackResourceDetail\"][\n \"PhysicalResourceId\"]\n lc = autoscale_conn.get_all_launch_configurations(names=[lc_name])[0]\n\n user_data_lines = lc.user_data.split('\\n')\n\n self.assertEqual(\"#taupage-ami-config\", user_data_lines[0])\n\n self.assertTrue(\"application_version: 1\" in user_data_lines)\n self.assertTrue(\" stack: cfn-sphere-test-instances\" in user_data_lines)\n\n dockercfg_root_index = user_data_lines.index(\"dockercfg:\")\n self.assertEqual(\" https://my-private-registry:\", user_data_lines[dockercfg_root_index + 1])\n self.assertEqual(\" email: test@example.com\", user_data_lines[dockercfg_root_index + 2])\n self.assertEqual(\" auth: my-secret-string\", user_data_lines[dockercfg_root_index + 3])\n\n environment_root_index = user_data_lines.index(\"environment:\")\n self.assertEqual(\" DYNAMO_DB_PREFIX: cfn-sphere-test-instances\", user_data_lines[environment_root_index + 1])\n\n notify_cfn_root_index = user_data_lines.index(\"notify_cfn:\")\n self.assertEqual(\" resource: asg\", user_data_lines[notify_cfn_root_index + 1])\n self.assertEqual(\" stack: cfn-sphere-test-instances\", user_data_lines[notify_cfn_root_index + 2])\n\n ports_root_index = user_data_lines.index(\"ports:\")\n self.assertEqual(\" 8080: 9000\", user_data_lines[ports_root_index + 1])\n\n\nif __name__ == \"__main__\":\n unittest2.main()\n", "sub_path": "src/integrationtest/python/stack_management_tests.py", "file_name": "stack_management_tests.py", "file_ext": "py", "file_size_in_byte": 4388, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "boto.exception.BotoServerError", "line_number": 27, "usage_type": "name"}, {"api_name": "unittest2.TestCase", "line_number": 45, "usage_type": "attribute"}, {"api_name": "boto.cloudformation.connect_to_region", "line_number": 49, "usage_type": "call"}, {"api_name": "boto.cloudformation", "line_number": 49, "usage_type": "name"}, {"api_name": "cfn_sphere.stack_configuration.Config", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cfn_sphere.StackActionHandler", "line_number": 51, "usage_type": "call"}, {"api_name": "boto.ec2.autoscale.connect_to_region", "line_number": 80, "usage_type": "call"}, {"api_name": "boto.ec2.autoscale", "line_number": 80, "usage_type": "name"}, {"api_name": "unittest2.main", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "201891411", "text": "'''\nfilmlinks4u deccandelight plugin\nCopyright (C) 2016 Gujal\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n'''\nfrom main import Scraper\nfrom BeautifulSoup import BeautifulSoup, SoupStrainer\nimport urllib, re, requests, xbmc\nimport HTMLParser\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nfrom requests.packages.urllib3.exceptions import SNIMissingWarning\nfrom requests.packages.urllib3.exceptions import InsecurePlatformWarning\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nrequests.packages.urllib3.disable_warnings(SNIMissingWarning)\nrequests.packages.urllib3.disable_warnings(InsecurePlatformWarning)\n\nclass flinks(Scraper):\n def __init__(self):\n Scraper.__init__(self)\n self.bu = 'https://www.filmlinks4u.is/category/'\n self.icon = self.ipath + 'flinks.png'\n self.list = {'01Tamil Movies': self.bu + 'tamil',\n '02Telugu Movies': self.bu + 'telugu',\n '03Malayalam Movies': self.bu + 'malayalam',\n '04Kannada Movies': self.bu + 'kannada',\n '05Hindi Movies': self.bu + 'hindi',\n '06English Movies': self.bu + 'hollywood',\n '07Animation Movies': self.bu + 'animation',\n '08Biography Movies': self.bu + 'biography',\n '09Documentary Movies': self.bu + 'documentary',\n '10Bengali Movies': self.bu + 'bengali',\n '11Bhojpuri Movies': self.bu + 'bhojpuri',\n '12Gujarati Movies': self.bu + 'gujarati',\n '13Marathi Movies': self.bu + 'marathi',\n '14Oriya Movies': self.bu + 'oriya',\n '15Punjabi Movies': self.bu + 'punjabi',\n '16Rajasthani Movies': self.bu + 'rajasthani',\n '17Urdu Movies': self.bu + 'urdu',\n '18Nepali Movies': self.bu + 'nepali',\n '19[COLOR cyan]Hindi Adult Softcore[/COLOR]': self.bu + 'adult-hindi-short-films',\n '20[COLOR cyan]Adult Movies[/COLOR]': self.bu + 'adult',\n '21[COLOR yellow]** Search **[/COLOR]': self.bu[:-9] + '?s='}\n \n def get_menu(self):\n return (self.list,7,self.icon)\n \n def get_items(self,url):\n h = HTMLParser.HTMLParser()\n movies = []\n if url[-3:] == '?s=':\n search_text = self.get_SearchQuery('Film Links 4U')\n search_text = urllib.quote_plus(search_text)\n url = url + search_text\n\n html = requests.get(url, headers=self.hdr, verify=False).text\n #mlink = SoupStrainer('div', {'class':re.compile('content')})\n #mdiv = BeautifulSoup(html, parseOnlyThese=mlink)\n plink = SoupStrainer('div', {'class':'pagination'})\n Paginator = BeautifulSoup(html, parseOnlyThese=plink)\n items = re.findall('(.*?)<.*?post-cats\">(.*?) 2 -----------------------')\r\nprint('\\n')\r\nprint('------------------statistics using a depth = 3 gini index -----------------')\r\nprint('\\n') \r\n###############################################################################\r\nprint('\\n')\r\nprint('\\n')\r\ndef train_using_gini_depth3(X_train,y_train): \r\n \r\n # Creating the classifier object \r\n clf_gini = DecisionTreeClassifier(criterion = \"gini\", \r\n random_state = 100,max_depth=3, min_samples_leaf=5) \r\n \r\n # Performing training \r\n clf_gini.fit(X_train, y_train) \r\n return clf_gini \r\n\r\nclf_gini = train_using_gini_depth3(X_train, y_train) \r\nprint('\\n')\r\ny_pred_gini = prediction(X_test, clf_gini) \r\nprint('\\n')\r\ncal_accuracy(y_test, y_pred_gini) \r\nprint('\\n')\r\n\r\n###visualize tree\r\nimport graphviz\r\nfrom sklearn import tree\r\ntree.export_graphviz(clf_gini,out_file='tree_depth3.dot') \r\n\r\nimport pydot\r\n(graph,) = pydot.graph_from_dot_file('tree_depth3.dot')\r\ngraph.write_png('tree_depth3.png')\r\n\r\nfrom IPython.display import Image\r\nImage(filename = 'tree_depth3.png')\r\n\r\nprint('\\n')\r\nprint('------------------statistics using a depth = 4 gini index -----------------')\r\n\r\ndef train_using_gini_depth4(X_train, X_test, y_train): \r\n \r\n # Creating the classifier object \r\n clf_gini = DecisionTreeClassifier(criterion = \"gini\", \r\n random_state = 100,max_depth=4, min_samples_leaf=5) \r\n \r\n # Performing training \r\n clf_gini.fit(X_train, y_train) \r\n return clf_gini \r\nclf_gini = train_using_gini_depth4(X_train, X_test, y_train) \r\nprint('\\n')\r\ny_pred_gini = prediction(X_test, clf_gini) \r\nprint('\\n')\r\ncal_accuracy(y_test, y_pred_gini) \r\nprint('\\n')\r\nprint('\\n')\r\n###visualize tree\r\n\r\n\r\nimport graphviz\r\nfrom sklearn import tree\r\ntree.export_graphviz(clf_gini,out_file='tree_depth4.dot') \r\n\r\nimport pydot\r\n(graph,) = pydot.graph_from_dot_file('tree_depth4.dot')\r\ngraph.write_png('tree_depth4.png')\r\n\r\nfrom IPython.display import Image\r\nImage(filename = 'tree_depth4.png')\r\n\r\n\r\nprint('------------------statistics none max_depth gini index-----------------')\r\n\r\ndef train_using_gini_no_max(X_train, X_test, y_train): \r\n \r\n # Creating the classifier object \r\n clf_gini = DecisionTreeClassifier(criterion = \"gini\")\r\n \r\n # Performing training \r\n clf_gini.fit(X_train, y_train) \r\n return clf_gini \r\nclf_gini = train_using_gini_no_max(X_train, X_test, y_train) \r\ny_pred_gini = prediction(X_test, clf_gini) \r\ncal_accuracy(y_test, y_pred_gini)\r\n\r\n\r\n###visualize tree\r\nimport graphviz\r\nfrom sklearn import tree\r\ntree.export_graphviz(clf_gini,out_file='tree_nodepth.dot') \r\n\r\nimport pydot\r\n(graph,) = pydot.graph_from_dot_file('tree_nodepth.dot')\r\ngraph.write_png('tree_nodepth.png')\r\n\r\nfrom IPython.display import Image\r\nImage(filename = 'tree_nodepth.png')\r\n\r\nprint('\\n')\r\nprint('---------------------------10_k-fold_Cross_validation----------------------')\r\nprint('\\n')\r\n\r\n\r\nimport pandas as pd\r\nfrom sklearn.tree import DecisionTreeClassifier \r\nfrom sklearn.model_selection import cross_val_score\r\nimport matplotlib.pyplot as plt \r\n\r\n#k-Fold Cross-Validation\r\n#setting n_jobs=-1 select all physical cores and maximises their usage.\r\nX = Data_train.iloc[:,:25]\r\nX_n = X.apply(lambda x:(x-x.min()) / (x.max()-x.min()))\r\n\r\ny = Data_train.iloc[:,25].values\r\ndepth_range = range(2,30)\r\ndepth = []\r\nfor i in depth_range:\r\n clf = DecisionTreeClassifier(criterion = \"gini\", \r\n random_state = 100,max_depth=i, min_samples_leaf=5)\r\n # Perform 10-fold cross validation \r\n scores = cross_val_score(estimator=clf, X=X_n, y=y, cv=10, n_jobs=1)\r\n depth.append((scores.mean()))\r\nprint(depth)\r\n\r\nprint('Length of list', len(depth))\r\nprint('Max Gini index', max(depth))\r\n# plot how accuracy changes as we vary depth\r\n# plot the value of depth for KNN (x-axis) versus the cross-validated accuracy (y-axis)\r\nplt.plot(depth_range, depth)\r\nplt.xlabel('Value of depth for decision tree')\r\nplt.ylabel('Cross-validated accuracy')\r\nplt.show()\r\n#We can extract the position of max with:\r\nmax_gini_position= max(enumerate(depth), key=(lambda x: x[1]))\r\n\r\nprint('using cross_validation on our decision tree the gini index maximum is reached for a depth equal to', max_gini_position[0])\r\nprint('\\n')\r\nprint( ' We can now process and get the Accuracy for this maximum')\r\n\r\nprint('----------------------statistics for maximum gini index------------------- ')\r\nprint('\\n')\r\nprint('\\n')\r\n\r\ndef train_using_gini_max(X_train, X_test, y_train): \r\n \r\n # Creating the classifier object \r\n clf_gini = DecisionTreeClassifier(criterion = \"gini\", \r\n random_state = 100,max_depth=max_gini_position[0], min_samples_leaf=5) \r\n \r\n # Performing training \r\n clf_gini.fit(X_train, y_train) \r\n return clf_gini \r\n\r\nprint('\\n')\r\n\r\nBEST_clf_gini = train_using_gini_max(X_train, X_test, y_train) \r\nprint('\\n')\r\ny_pred_gini = prediction(X_test, BEST_clf_gini) \r\nprint('\\n')\r\ncal_accuracy(y_test, y_pred_gini) \r\nprint('\\n')\r\nprint('\\n')\r\n\r\nprint('Accuracy for a 10_cross_validation correspond to ',accuracy_score(y_test, y_pred_gini))\r\nprint((accuracy_score(y_test, y_pred_gini)) * 100, '% of our data are well classified')\r\n\r\nprint('\\n')\r\nprint('\\n')\r\nprint(' ----------------------Final Prediction for Data_test ----------------------------')\r\nprint('\\n')\r\nprint('\\n')\r\n\r\nprint(\"The prediction for the unclassified data:\")\r\ny_pred = prediction(Data_test, BEST_clf_gini)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "DT.py", "file_name": "DT.py", "file_ext": "py", "file_size_in_byte": 8828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 102, "usage_type": "name"}, {"api_name": "pydot.graph_from_dot_file", "line_number": 105, "usage_type": "call"}, {"api_name": "IPython.display.Image", "line_number": 110, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 142, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 142, "usage_type": "name"}, {"api_name": "pydot.graph_from_dot_file", "line_number": 145, "usage_type": "call"}, {"api_name": "IPython.display.Image", "line_number": 149, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 157, "usage_type": "call"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 175, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 175, "usage_type": "name"}, {"api_name": "pydot.graph_from_dot_file", "line_number": 178, "usage_type": "call"}, {"api_name": "IPython.display.Image", "line_number": 182, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 190, "usage_type": "call"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 203, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 203, "usage_type": "name"}, {"api_name": "pydot.graph_from_dot_file", "line_number": 206, "usage_type": "call"}, {"api_name": "IPython.display.Image", "line_number": 210, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 231, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 242, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 244, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 244, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 245, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 245, "usage_type": "name"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 260, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 277, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 278, "usage_type": "call"}]} +{"seq_id": "471512606", "text": "from typing import Optional\nimport sys\nsys.path.append('/Users/umeco/projects/zero_DL2/src/')\nfrom common.trainer import Trainer\nfrom common.optimizer import Adam\nfrom ch3.simple_cbow import SimpleCBOW\nfrom common.utils import preprocess, create_context_target, convert_one_hot\n\nwindow_size = 1\nhidden_size = 5\nbatch_size = 3\nmax_epoch = 1000\n\ntext = 'You say goodbye and I say hello .'\ncorpus, word_to_id, id_to_word = preprocess(text)\n\nvocab_size = len(word_to_id)\ncontexts, target = create_context_target(corpus, window_size)\ntarget = convert_one_hot(target, vocab_size)\ncontexts = convert_one_hot(contexts, vocab_size)\n\nmodel = SimpleCBOW(vocab_size, hidden_size)\noptimizer = Adam()\ntrainer = Trainer(model, optimizer)\n\ntrainer.fit(contexts, target, max_epoch, batch_size)\ntrainer.plot()", "sub_path": "src/ch3/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 791, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "common.utils.preprocess", "line_number": 15, "usage_type": "call"}, {"api_name": "common.utils.create_context_target", "line_number": 18, "usage_type": "call"}, {"api_name": "common.utils.convert_one_hot", "line_number": 19, "usage_type": "call"}, {"api_name": "common.utils.convert_one_hot", "line_number": 20, "usage_type": "call"}, {"api_name": "ch3.simple_cbow.SimpleCBOW", "line_number": 22, "usage_type": "call"}, {"api_name": "common.optimizer.Adam", "line_number": 23, "usage_type": "call"}, {"api_name": "common.trainer.Trainer", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "376371840", "text": "__author__ = 'http://pymotw.com/2/smtpd/'\nimport smtplib\nimport email.utils,os\nfrom email.mime.text import MIMEText\n\n# Create the message\nmsg = MIMEText('This is the body of the message.')\nmsg['To'] = email.utils.formataddr(('Recipient', 'soheil_paper@yahoo.com'))\nmsg['From'] = email.utils.formataddr(('Author', 'soheilpaper@gmail.com'))\nmsg['Subject'] = 'Simple test message'\n\ntry:\n ip=(os.environ['OPENSHIFT_DIY_IP'])\n port=int('15030')\nexcept:\n ip=('127.0.0.1')\n port=15030\nserver = smtplib.SMTP(ip, 15030)\n#server = smtplib.SMTP(ip+':15030')\nserver.set_debuglevel(True) # show communication with the server\ntry:\n # server.sendmail(From, [To], msg.as_string())\n server.sendmail('soheilpaper@gmail.com', ['soheil_paper@yahoo.com'], msg.as_string())\nfinally:\n server.quit()\n", "sub_path": "all_functions/email/smtpd_custom.py", "file_name": "smtpd_custom.py", "file_ext": "py", "file_size_in_byte": 797, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "email.mime.text.MIMEText", "line_number": 7, "usage_type": "call"}, {"api_name": "email.utils.utils.formataddr", "line_number": 8, "usage_type": "call"}, {"api_name": "email.utils.utils", "line_number": 8, "usage_type": "attribute"}, {"api_name": "email.utils", "line_number": 8, "usage_type": "name"}, {"api_name": "email.utils.utils.formataddr", "line_number": 9, "usage_type": "call"}, {"api_name": "email.utils.utils", "line_number": 9, "usage_type": "attribute"}, {"api_name": "email.utils", "line_number": 9, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "smtplib.SMTP", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "634519973", "text": "import sys\nfrom bs4 import BeautifulSoup\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\nfrom django.core.management.base import BaseCommand\nimport requests\nimport grequests\n\nfrom core.util import extract_url_from_skimlinks\n\nSITEMAP_URL = 'http://s3.amazonaws.com/pennywyse/sitemap.xml'\n\nclass Command(BaseCommand):\n urls = []\n error_urls = []\n checked_urls = 0\n\n def _check_sitemap(self, url):\n \"\"\"\n Fetching and parsing main sitemap, extracting sitemaps locations,\n then parsing them and preparing URLs for check.\n \"\"\"\n \n try:\n r = requests.get(url)\n soup = BeautifulSoup(r.text)\n for location in soup.findAll('loc'):\n if location.text.endswith('.xml'):\n self._check_sitemap(location.text)\n else:\n url = extract_url_from_skimlinks(location.text)\n self.urls.append(url)\n \n except Exception as e:\n sys.stdout.write('\\nError: %s' % str(e))\n\n def _check_urls(self):\n \"\"\"\n Concurrently checking bunch of URLs, limited by SITEMAP_CHECK_LIMIT setting,\n displaying progress in console.\n \"\"\"\n \n total = len(self.urls)\n for i in range(0, len(self.urls), settings.SITEMAP_CHECK_LIMIT):\n r = (grequests.head(u) for u in self.urls[i:i+settings.SITEMAP_CHECK_LIMIT])\n rs = grequests.map(r)\n self.error_urls += filter(lambda x: x.status_code not in [200, 301, 302], rs)\n self.checked_urls += len(self.urls[i:i+settings.SITEMAP_CHECK_LIMIT])\n remained = total - self.checked_urls\n progress = round(self.checked_urls / total, 2)\n sys.stdout.write('\\rChecked %.2f%% (%s URLs, %d errors, %d remained)' % (progress, self.checked_urls, \n len(self.error_urls), remained))\n sys.stdout.flush()\n\n def handle(self, *args, **options):\n sys.stdout.write('\\rChecking sitemaps.\\n')\n sys.stdout.flush()\n self._check_sitemap(SITEMAP_URL)\n sys.stdout.write('\\rChecking extracted URLs.\\n')\n sys.stdout.flush()\n self._check_urls()\n # if there any URLs that hasn't passed check - sending report by email to addresses, listed in SITEMAP_REPORT_RECIPIENTS\n if self.error_urls:\n report = '\\n'.join(error_urls)\n email_message = EmailMessage('Sitemaps report', body='Report with list of URLs which returned error response codes is attached.', \n from_email=settings.DEFAULT_FROM_EMAIL, to=settings.SITEMAP_REPORT_RECIPIENTS)\n email_message.attach('report.txt', report,'text/plain')\n email_message.send()\n else:\n sys.stdout.write('\\nAll pages returned success response codes.\\n')", "sub_path": "core/management/commands/check_sitemap.py", "file_name": "check_sitemap.py", "file_ext": "py", "file_size_in_byte": 2953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 13, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 26, "usage_type": "call"}, {"api_name": "core.util.extract_url_from_skimlinks", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.conf.settings.SITEMAP_CHECK_LIMIT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 44, "usage_type": "name"}, {"api_name": "grequests.head", "line_number": 45, "usage_type": "call"}, {"api_name": "django.conf.settings.SITEMAP_CHECK_LIMIT", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 45, "usage_type": "name"}, {"api_name": "grequests.map", "line_number": 46, "usage_type": "call"}, {"api_name": "django.conf.settings.SITEMAP_CHECK_LIMIT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 48, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 57, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 59, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.core.mail.EmailMessage", "line_number": 65, "usage_type": "call"}, {"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 66, "usage_type": "name"}, {"api_name": "django.conf.settings.SITEMAP_REPORT_RECIPIENTS", "line_number": 66, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 70, "usage_type": "attribute"}]} +{"seq_id": "280769912", "text": "# coding: utf8\nfrom django.urls import path\nfrom .v1 import views as views_v1\nfrom .v1.views import (\n StationModelListView,\n StationModelDetailView,\n StationModelDeleteView,\n StationModelCreateView,\n)\n\nurlpatterns_v1_locations = (\n [path(\"\", views_v1.LocationView.as_view(), name=\"v1_list_create_location\"),],\n \"locations\",\n)\n\nurls_stations = (\n [\n path(\"\", StationModelListView.as_view(), name=\"stations-all\"),\n path(\"/\", StationModelDetailView.as_view(), name=\"stations-detail\",),\n path(\"create\", StationModelCreateView.as_view(), name=\"stations-create\"),\n path(\n \"delete/\", StationModelDeleteView.as_view(), name=\"stations-delete\",\n ),\n ],\n \"stations\",\n)\n", "sub_path": "apps/stations/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 748, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "v1.views.LocationView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "v1.views.LocationView", "line_number": 12, "usage_type": "attribute"}, {"api_name": "v1.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "v1.views.StationModelListView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "v1.views.StationModelListView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "v1.views.StationModelDetailView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "v1.views.StationModelDetailView", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "v1.views.StationModelCreateView.as_view", "line_number": 20, "usage_type": "call"}, {"api_name": "v1.views.StationModelCreateView", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "v1.views.StationModelDeleteView.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "v1.views.StationModelDeleteView", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "1647127", "text": "import numpy as np\nimport math\nfrom matplotlib import pyplot as plt\nfrom numpy import linalg as LA\n \n# Parametros de la senal analiada\nf=2500.\nFsamp= 20000. # la frecuencia de muestreo\n# La senal discreta \nN=200\nk=90\nn=np.linspace(0,N-1,N)\nt=n/Fsamp\nsignal=np.cos(2.*math.pi*f*t)\nfourier=np.fft.fft(signal)\nfourier_mejor=np.fft.fftshift(fourier)\n\n#******\nfourier_mejor_abs=np.power(np.absolute(fourier_mejor),2.)\n\n#complex signal\nsignal_ex=np.exp(1.j*2.*math.pi*k*f*t)#verificar\nfourier_ex=np.fft.fft(signal_ex)\nfourier_mejor_ex=np.fft.fftshift(fourier_ex)\n\n#******\nfourier_mejor_ex_abs=np.power(np.absolute(fourier_mejor_ex),2.)\n \n# calculos para relacional la senal discreta con el mundo real\nFmin=-Fsamp/2.\nFresol=Fsamp/N\nFmax=-Fmin-Fresol\nf=np.linspace(Fmin,Fmax,N)\n \nplt.plot(f,fourier_mejor_abs)\n\nplt.plot(f,fourier_mejor_ex_abs)\nplt.show()\n\n", "sub_path": "lab4/lab4_7D.py", "file_name": "lab4_7D.py", "file_ext": "py", "file_size_in_byte": 848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.linspace", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 14, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftshift", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.power", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 22, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.fft.fftshift", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.power", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "584807730", "text": "#!/usr/bin/env python3\n\"\"\"\nUsage: by_gene.py \n\nArguments:\n Name of the isoform to be run\n Path to ctl file\n\n\"\"\"\n#imports\nimport docopt\nimport Corsair as cor\nimport sys\n\n## Initialize docopt\nif __name__ == '__main__':\n\n try:\n arguments = docopt.docopt(__doc__)\n iso_name = str(arguments[''])\n ctl_file = str(arguments[''])\n except docopt.DocoptExit as e:\n print(e)\n\n## parse the ctl file, initialize the control object\nctl = cor.load_ctl(ctl_file)\n\n## a few things rely of the gene list being here, so just make it a one item list\nctl.gene_list = [iso_name]\n\n## for the first time only - will over-write saves otherwise\ncor.corsair_initialize(ctl)\n\n# # just do blast\ncor.run_blast(ctl, iso_name)\n", "sub_path": "aws_blast.py", "file_name": "aws_blast.py", "file_ext": "py", "file_size_in_byte": 802, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "docopt.docopt", "line_number": 19, "usage_type": "call"}, {"api_name": "docopt.DocoptExit", "line_number": 22, "usage_type": "attribute"}, {"api_name": "Corsair.load_ctl", "line_number": 26, "usage_type": "call"}, {"api_name": "Corsair.corsair_initialize", "line_number": 32, "usage_type": "call"}, {"api_name": "Corsair.run_blast", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "232781882", "text": "# -*- coding: utf-8 -*-\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport qdarkstyle\nfrom PyQt5.QtSql import *\nimport time\nimport csv, sqlite3\n\nclass addUser(QDialog):\n add_user_success_signal = pyqtSignal()\n\n def __init__(self, parent=None):\n super(addUser, self).__init__(parent)\n self.setUpUI()\n self.setWindowModality(Qt.WindowModal)\n self.setWindowTitle(\"添加用户\")\n\n def setUpUI(self):\n # 书名,书号,作者,分类,添加数量.出版社,出版日期\n # 书籍分类:哲学类、社会科学类、政治类、法律类、军事类、经济类、文化类、教育类、体育类、语言文字类、艺术类、历史类��地理类、天文学类、生物学类、医学卫生类、农业类\n # BookCategory = [\"哲学\", \"社会科学\", \"政治\", \"法律\", \"军事\", \"经济\", \"文化\", \"教育\", \"体育\", \"语言文字\", \"艺术\", \"历史\"\n # , \"地理\", \"天文学\", \"生物学\", \"医学卫生\", \"农业\"]\n\n self.resize(450, 400)\n self.layout = QFormLayout()\n self.setLayout(self.layout)\n\n # Label控件\n self.titlelabel = QLabel(\" 添加用户\")\n self.listingid_Label = QLabel(\"ListingId\")\n self.jkje_Label = QLabel(\"借款金额\")\n self.jkqx_Label = QLabel(\"借款期限\")\n self.jkll_Label = QLabel(\"借款利率\")\n self.jkcgrq_Label = QLabel(\"借款成功日期\")\n self.cspj_Label = QLabel(\"初始评级\")\n self.jklx_Label = QLabel(\"借款类型\")\n\n self.sfsb_label = QLabel(\"是否首标\")\n self.nl_Label = QLabel(\"年龄\")\n self.xb_Label = QLabel(\"性别\")\n self.sjrz_Label = QLabel(\"手机认证\")\n self.hkrz_Label = QLabel(\"户口认证\")\n self.sprz_Label = QLabel(\"视频认证\")\n self.xlrz_Label = QLabel(\"学历认证\")\n self.zxrz_Label = QLabel(\"征信认证\")\n self.tbrz_label = QLabel(\"淘宝认证\")\n\n self.lscgjkcs_Label = QLabel(\"历史成功借款次数\")\n self.lscgjkje_Label = QLabel(\"历史成功借款金额\")\n self.zdhbj_Label = QLabel(\"总待还本金\")\n self.lszchkqs_Label = QLabel(\"历史正常还款期数\")\n self.lsyqhkqs_Label = QLabel(\"历史逾期还款期数\")\n self._button_Label = QLabel(\"历史逾期还款期数\")\n\n\n # edit控件\n # 初始评级\n cspj_box = [\"AAA\", \"AA\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n jklx_box = [\"应收安全标\", \"电商\", \"APP闪电\", \"普通\", \"其他\"]\n xb_box = [\"男\", \"女\"]\n yes_no_box = [\"未成功认证\", \"成功认证\"]\n shi_fo_box = [\"是\", \"否\"]\n self.jkcgrq_Time = QDateTimeEdit()\n self.jkcgrq_Time.setDisplayFormat(\"yyyy-MM-dd\")\n\n self.cspj_ComboBox = QComboBox()\n self.cspj_ComboBox.addItems(cspj_box)\n\n self.jklx_ComboBox = QComboBox()\n self.jklx_ComboBox.addItems(jklx_box)\n\n self.xb_ComboBox = QComboBox()\n self.xb_ComboBox.addItems(xb_box)\n\n self.sjrz_ComboBox = QComboBox()\n self.sjrz_ComboBox.addItems(yes_no_box)\n\n self.sjrz_ComboBox = QComboBox()\n self.sjrz_ComboBox.addItems(yes_no_box)\n\n self.hkrz_ComboBox = QComboBox()\n self.hkrz_ComboBox.addItems(yes_no_box)\n\n self.sprz_ComboBox = QComboBox()\n self.sprz_ComboBox.addItems(yes_no_box)\n\n self.xlrz_ComboBox = QComboBox()\n self.xlrz_ComboBox.addItems(yes_no_box)\n\n self.zxrz_ComboBox = QComboBox()\n self.zxrz_ComboBox.addItems(yes_no_box)\n\n self.tbrz_ComboBox = QComboBox()\n self.tbrz_ComboBox.addItems(yes_no_box)\n\n self.sfsb_ComboBox = QComboBox()\n self.sfsb_ComboBox.addItems(shi_fo_box)\n # button控件\n self.addUserButton = QPushButton(\"添 加\")\n self.addfromCsvButton = QPushButton(\"外部导入\")\n\n\n # lineEdit控件\n self.listingid_Edit = QLineEdit()\n self.jkje_Edit = QLineEdit()\n self.jkqx_Edit = QLineEdit()\n self.jkll_Edit = QLineEdit()\n # self.jkcgrq_Edit = QLineEdit()\n # self.cspj_Edit = QLineEdit()\n # self.jklx_Edit = QLineEdit()\n\n # self.sfsb_Edit = QLineEdit()\n self.nl_Edit = QLineEdit()\n # self.xb_Edit = QLineEdit()\n # self.sjrz_Edit = QLineEdit()\n # self.hkrz_Edit = QLineEdit()\n # self.sprz_Edit = QLineEdit()\n # self.xlrz_Edit = QLineEdit()\n # self.zxrz_Edit = QLineEdit()\n # self.tbrz_Edit = QLineEdit()\n\n self.lscgjkcs_Edit = QLineEdit()\n self.lscgjkje_Edit = QLineEdit()\n self.zdhbj_Edit = QLineEdit()\n self.lszchkqs_Edit = QLineEdit()\n self.lsyqhkqs_Edit = QLineEdit()\n\n\n # self.bookNameEdit = QLineEdit()\n # self.bookIdEdit = QLineEdit()\n # self.authNameEdit = QLineEdit()\n # self.categoryComboBox = QComboBox()\n # self.categoryComboBox.addItems(BookCategory)\n # self.publisherEdit = QLineEdit()\n # self.publishTime = QDateTimeEdit()\n # self.publishTime.setDisplayFormat(\"yyyy-MM-dd\")\n # # self.publishDateEdit = QLineEdit()\n\n\n # self.addNumEdit = QLineEdit()\n #\n # self.bookNameEdit.setMaxLength(10)\n # self.bookIdEdit.setMaxLength(6)\n # self.authNameEdit.setMaxLength(10)\n # self.publisherEdit.setMaxLength(10)\n # self.addNumEdit.setMaxLength(12)\n # self.addNumEdit.setValidator(QIntValidator())\n\n # 添加进formlayout\n self.layout.addRow(\"\", self.titlelabel)\n self.layout.addRow(self.listingid_Label, self.listingid_Edit)\n self.layout.addRow(self.jkje_Label, self.jkje_Edit)\n self.layout.addRow(self.jkqx_Label, self.jkqx_Edit)\n self.layout.addRow(self.jkll_Label, self.jkll_Edit)\n self.layout.addRow(self.jkcgrq_Label, self.jkcgrq_Time)\n self.layout.addRow(self.cspj_Label, self.cspj_ComboBox)\n self.layout.addRow(self.jklx_Label, self.jklx_ComboBox)\n self.layout.addRow(self.sfsb_label, self.sfsb_ComboBox)\n self.layout.addRow(self.nl_Label, self.nl_Edit)\n self.layout.addRow(self.xb_Label, self.xb_ComboBox)\n self.layout.addRow(self.sjrz_Label, self.sjrz_ComboBox)\n self.layout.addRow(self.hkrz_Label, self.hkrz_ComboBox)\n self.layout.addRow(self.sprz_Label, self.sprz_ComboBox)\n self.layout.addRow(self.xlrz_Label, self.xlrz_ComboBox)\n\n self.layout.addRow(self.zxrz_Label, self.zxrz_ComboBox)\n self.layout.addRow(self.tbrz_label, self.tbrz_ComboBox)\n self.layout.addRow(self.lscgjkcs_Label, self.lscgjkcs_Edit)\n self.layout.addRow(self.lscgjkje_Label, self.lscgjkje_Edit)\n self.layout.addRow(self.zdhbj_Label, self.zdhbj_Edit)\n self.layout.addRow(self.lszchkqs_Label, self.lszchkqs_Edit)\n self.layout.addRow(self.lsyqhkqs_Label, self.lsyqhkqs_Edit)\n\n self.layout.addRow(\"\", self.addUserButton)\n self.layout.addRow(\"\", self.addfromCsvButton)\n\n # # 设置字体\n # font = QFont()\n # font.setPixelSize(20)\n # self.titlelabel.setFont(font)\n # font.setPixelSize(14)\n # self.bookNameLabel.setFont(font)\n # self.bookIdLabel.setFont(font)\n # self.authNameLabel.setFont(font)\n # self.categoryLabel.setFont(font)\n # self.publisherLabel.setFont(font)\n # self.publishDateLabel.setFont(font)\n # self.addNumLabel.setFont(font)\n #\n # self.bookNameEdit.setFont(font)\n # self.bookIdEdit.setFont(font)\n # self.authNameEdit.setFont(font)\n # self.publisherEdit.setFont(font)\n # self.publishTime.setFont(font)\n # self.categoryComboBox.setFont(font)\n # self.addNumEdit.setFont(font)\n\n # # button设置\n # font.setPixelSize(16)\n # self.addBookButton.setFont(font)\n # self.addBookButton.setFixedHeight(32)\n # self.addBookButton.setFixedWidth(140)\n\n # 设置间距\n self.titlelabel.setMargin(8)\n self.layout.setVerticalSpacing(10)\n\n self.addUserButton.clicked.connect(self.addBookButtonCicked)\n # self.addfromCsvButton.click.connect(self.addfromCsvButtonClicked)\n\n def addBookButtonCicked(self):\n\n listingid = self.listingid_Edit.text()\n jkje = self.jkje_Edit.text()\n jkqx = self.jkqx_Edit.text()\n jkll = self.jkll_Edit.text()\n jkcgrq = self.jkcgrq_Time.text()\n cspj = self.cspj_ComboBox.currentText()\n jklx = self.jklx_ComboBox.currentText()\n sfsb = self.sfsb_ComboBox.currentText()\n nl = self.nl_Edit.text()\n xb = self.xb_ComboBox.currentText()\n sjrz = self.sjrz_ComboBox.currentText()\n hkrz = self.hkrz_ComboBox.currentText()\n sprz = self.sprz_ComboBox.currentText()\n xlrz = self.xlrz_ComboBox.currentText()\n zxrz = self.zxrz_ComboBox.currentText()\n tbrz = self.tbrz_ComboBox.currentText()\n lscgjkcs = self.lscgjkcs_Edit.text()\n lscgjkje = self.lscgjkje_Edit.text()\n zdhbj = self.zdhbj_Edit.text()\n lszchkqx = self.lszchkqs_Edit.text()\n lsyqhkqs = self.lsyqhkqs_Edit.text()\n\n if (\n listingid == \"\" or jkje == \"\" or jkqx == \"\" or jkcgrq == \"\" or\n cspj == \"\" or jklx == \"\" or sfsb == \"\" or nl == \"\" or xb == \"\" or\n sjrz == \"\" or hkrz == \"\" or sprz == \"\" or xlrz == \"\" or zxrz == \"\" or\n tbrz == \"\" or lscgjkcs == \"\" or lscgjkje == \"\" or zdhbj == \"\" or\n lszchkqx == \"\" or lsyqhkqs == \"\" or jkll == \"\"\n ):\n print(QMessageBox.warning(self, \"警告\", \"有字段为空,添加失败\", QMessageBox.Yes, QMessageBox.Yes))\n return\n else:\n # addBookNum = int(addBookNum)\n db = QSqlDatabase.addDatabase(\"QSQLITE\")\n db.setDatabaseName('/home/deng/bi_ye_she_ji/shadow/LCLP.db')\n db.open()\n query = QSqlQuery()\n # 如果已存在,则update Book表的现存量,剩余可借量,不存在,则insert Book表,同时insert buyordrop表\n sql = \"SELECT * FROM user WHERE ListingId='%s'\" % (listingid)\n query.exec_(sql)\n if (query.next()):\n # sql = \"UPDATE Book SET NumStorage=NumStorage+%d,NumCanBorrow=NumCanBorrow+%d WHERE BookId='%s'\" % (\n # addBookNum, addBookNum, bookId)\n print(QMessageBox.information(self, \"提示\", \"该用户已存在!\", QMessageBox.Yes, QMessageBox.Yes))\n else:\n print(type(listingid), jkje, jkqx,jkll,\n jkcgrq,type(cspj),jklx,sfsb,\n nl, xb, sjrz, hkrz,\n sprz,xlrz, zxrz, tbrz,\n lscgjkcs,lscgjkje, zdhbj, lszchkqx, lsyqhkqs)\n sql = \"INSERT INTO user VALUES ('%s','%s','%s','%s',\" \\\n \"'%s','%s','%s','%s',\" \\\n \"'%s','%s','%s','%s',\" \\\n \"'%s','%s','%s','%s',\" \\\n \"'%s','%s','%s','%s','%s')\"%(\n listingid, jkje, jkqx,jkll,jkcgrq,cspj,jklx,sfsb,\n nl, xb, sjrz, hkrz,\n sprz,xlrz, zxrz, tbrz,\n lscgjkcs,lscgjkje, zdhbj, lszchkqx, lsyqhkqs)\n print(sql)\n query.exec_(sql)\n db.commit()\n # 插入droporinsert表\n # timenow = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n # sql = \"INSERT INTO buyordrop VALUES ('%s','%s',1,%d)\" % (bookId, timenow, addBookNum)\n # query.exec_(sql)\n # db.commit()\n print(QMessageBox.information(self, \"提示\", \"添加用户成功!\", QMessageBox.Yes, QMessageBox.Yes))\n self.add_user_success_signal.emit()\n self.close()\n self.clearEdit()\n return\n\n # def addfromCsvButtonClicked(self):\n # db = QSqlDatabase.addDatabase(\"QSQLITE\")\n # db.setDatabaseName('/home/deng/bi_ye_she_ji/shadow/LCLP.db')\n # db.open()\n # query = QSqlQuery()\n # conn = sqlite3.connect(\"dbname.db\")\n # df = pandas.read_csv('d:\\\\filefolder\\csvname.csv')\n # df.to_sql('tablename', conn, if_exists='append', index=False)\n\n\n def clearEdit(self):\n self.listingid_Edit.clear()\n self.jkje_Edit.clear()\n self.jkqx_Edit.clear()\n self.jkll_Edit.clear()\n self.nl_Edit.clear()\n self.lscgjkcs_Edit.clear()\n self.lscgjkje_Edit.clear()\n self.zdhbj_Edit.clear()\n self.lszchkqs_Edit.clear()\n self.lsyqhkqs_Edit.clear()\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n app.setWindowIcon(QIcon(\"/home/deng/bi_ye_she_ji/library/images/MainWindow_1.png\"))\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n mainMindow = addUser()\n mainMindow.show()\n sys.exit(app.exec_())\n\n", "sub_path": "addUser.py", "file_name": "addUser.py", "file_ext": "py", "file_size_in_byte": 13155, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.argv", "line_number": 311, "usage_type": "attribute"}, {"api_name": "qdarkstyle.load_stylesheet_pyqt5", "line_number": 313, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 316, "usage_type": "call"}]} +{"seq_id": "362977255", "text": "\"\"\"A job to run executable programs.\"\"\"\n\nimport json\n\nfrom ndscheduler import job\nfrom ndscheduler import utils\nfrom ndscheduler import constants\nfrom ndscheduler.core import scheduler_manager\n\n\nclass SerialExecutionException(Exception):\n pass\n\n\nclass SerialJob(job.JobBase):\n @classmethod\n def meta_info(cls):\n return {\n \"job_class_string\": \"%s.%s\" % (cls.__module__, cls.__name__),\n \"notes\": (\"This will run list of tasks consequently\"),\n \"arguments\": [{\"type\": \"dict\", \"description\": \"Job class path and it params\"}],\n \"example_arguments\":\n '[{\"class\": \"simple_scheduler.jobs.sample_job.AwesomeJob\", \"args\": {\"foo\": \"bar\", \"baz\": [1, 2, 3]}},'\n '{\"class\": \"simple_scheduler.jobs.shell_job.ShellJob\", \"args\": \"wc -l /tmp/foo.txt\"}]',\n }\n\n def run(self, *args, **kwargs):\n scheduler = scheduler_manager.SchedulerManager.get_instance()\n datastore = scheduler.get_datastore()\n\n res = []\n\n for job_no, job_info in enumerate(args):\n job = utils.import_from_path(job_info[\"class\"])\n execution_id = utils.generate_uuid()\n\n job_id = \"{}-{}\".format(self.job_id, job_no)\n datastore.add_execution(\n execution_id,\n job_id,\n constants.EXECUTION_STATUS_SCHEDULED,\n description=\"Subtask {}: {}\".format(\n job_no, job.get_scheduled_description()\n ),\n )\n\n job.run_job(self.job_id, execution_id, *job_info.get(\"args\", []))\n exec_state = datastore.get_execution(execution_id)\n\n if exec_state[\"state\"] == constants.EXECUTION_STATUS_SCHEDULED_ERROR:\n raise SerialExecutionException(\n \"Job #{} failed with result {}\".format(job_no, exec_state[\"result\"])\n )\n\n res.append(json.loads(exec_state[\"result\"]))\n\n return res\n", "sub_path": "simple_scheduler/jobs/serial_job.py", "file_name": "serial_job.py", "file_ext": "py", "file_size_in_byte": 1974, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "ndscheduler.job.JobBase", "line_number": 15, "usage_type": "attribute"}, {"api_name": "ndscheduler.job", "line_number": 15, "usage_type": "name"}, {"api_name": "ndscheduler.core.scheduler_manager.SchedulerManager.get_instance", "line_number": 28, "usage_type": "call"}, {"api_name": "ndscheduler.core.scheduler_manager.SchedulerManager", "line_number": 28, "usage_type": "attribute"}, {"api_name": "ndscheduler.core.scheduler_manager", "line_number": 28, "usage_type": "name"}, {"api_name": "ndscheduler.job", "line_number": 34, "usage_type": "name"}, {"api_name": "ndscheduler.utils.import_from_path", "line_number": 34, "usage_type": "call"}, {"api_name": "ndscheduler.utils", "line_number": 34, "usage_type": "name"}, {"api_name": "ndscheduler.utils.generate_uuid", "line_number": 35, "usage_type": "call"}, {"api_name": "ndscheduler.utils", "line_number": 35, "usage_type": "name"}, {"api_name": "ndscheduler.constants.EXECUTION_STATUS_SCHEDULED", "line_number": 41, "usage_type": "attribute"}, {"api_name": "ndscheduler.constants", "line_number": 41, "usage_type": "name"}, {"api_name": "ndscheduler.job.get_scheduled_description", "line_number": 43, "usage_type": "call"}, {"api_name": "ndscheduler.job", "line_number": 43, "usage_type": "name"}, {"api_name": "ndscheduler.job.run_job", "line_number": 47, "usage_type": "call"}, {"api_name": "ndscheduler.job", "line_number": 47, "usage_type": "name"}, {"api_name": "ndscheduler.constants.EXECUTION_STATUS_SCHEDULED_ERROR", "line_number": 50, "usage_type": "attribute"}, {"api_name": "ndscheduler.constants", "line_number": 50, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "63791107", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nhttp://blog.csdn.net/chenghit\n\"\"\"\n\nimport wx\nimport threading\nfrom frames import *\nfrom gui import GuiManager\nfrom util import *\nfrom core import *\n\nclass MainAPP(wx.App):\n\n\t@debug_name\n\tdef OnInit(self):\n\t\tself.manager = GuiManager(self.UpdateUI)\n\t\tself.frame = self.manager.GetFrame('initframe', 'INIT')\n\t\tself.frame.Show()\n\t\treturn True\n\n\tdef UpdateUI(self, name, typ, **kwargs):\n\t\tself.frame.Show(False)\n\t\t# self.frame.DestroyLater()\n\t\tself.frame = self.manager.GetFrame(name, typ, **kwargs)\n\t\tself.frame.Show(True)\n\t#\n\t# @debug_name\n\tdef MacOpenFile(self, filename):\n\t\tif filename.endswith('.rl'):\n\t\t\t# 获得一个db并连接\n\t\t\tfilepath = filename\n\t\t\t# parent = event.GetEventObject()\n\t\t\t# is_new = parent.GetName() == 'newdb'\n\t\t\t# if is_new:\n\t\t\t# \tif not filepath.endswith(\".rl\"):\n\t\t\t# \t\tfilepath += \".rl\"\n\t\t\t# \t# import os\n\t\t\t# \tif os.path.exists(filepath):\n\t\t\t# \t\tres = wx.MessageBox(\"已存在同名Reading List,是否覆盖?\", \"Warning\", wx.YES_NO | wx.ICON_EXCLAMATION)\n\t\t\t# \t\tif res == wx.NO:\n\t\t\t# \t\t\tevent.SkipEvent()\n\t\t\t# \t\telse:\n\t\t\t# \t\t\tos.remove(filepath)\n\t\t\t# 创建连接\n\t\t\tres = core.safe_connect(filepath)\n\t\t\tif res != None:\n\t\t\t\twx.MessageBox(u\"连接到数据库失败,原因:%s\" % res, \"Error\", wx.OK | wx.CENTRE | wx.ICON_ERROR)\n\t\t\t# 读取主界面需要的数据\n\t\t\t# OpenTable(DB_conn, '123')\n\t\t\t# 切换到主界面\n\t\t\telse:\n\t\t\t\tself.manager.UpdateUI('mainframe', 'MAIN')\n\n\napp = MainAPP()\napp.MainLoop()\n\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "wx.App", "line_number": 14, "usage_type": "attribute"}, {"api_name": "gui.GuiManager", "line_number": 18, "usage_type": "call"}, {"api_name": "core.safe_connect", "line_number": 47, "usage_type": "call"}, {"api_name": "wx.MessageBox", "line_number": 49, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 49, "usage_type": "attribute"}, {"api_name": "wx.CENTRE", "line_number": 49, "usage_type": "attribute"}, {"api_name": "wx.ICON_ERROR", "line_number": 49, "usage_type": "attribute"}]} +{"seq_id": "589489489", "text": "import gym\nimport tensorflow as tf\nimport random\nimport numpy as np\nimport time\nfrom statistics import mean, median\n\nenv = gym.make('CartPole-v0').env\nenv.reset()\ngoal_steps = 1000\nscore_requirement = 200\ninitial_games = 1000\nloaded_model = tf.keras.models.load_model('Cartpole.h5')\nPTAS = 0\n\ndef model_data_preparation():\n training_data = []\n scores = []\n accepted_scores = []\n for game_index in range(initial_games): \n score = 0\n game_memory = []\n previous_observation = []\n for step_index in range(goal_steps):\n #env.render() \n if len(previous_observation) == 0:\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n else:\n action = np.argmax(loaded_model.predict(observation.reshape(-1, len(observation)))[0])\n observation, reward, done, info = env.step(action)\n game_memory.append([previous_observation, action]) \n previous_observation = observation\n score += reward\n if done:\n env.reset\n break \n if score >= score_requirement:\n accepted_scores.append(score)\n for data in game_memory:\n if data[1] == 1:\n output = [0, 1]\n elif data[1] == 0:\n output = [1, 0]\n training_data.append([data[0], output]) \n env.reset()\n scores.append(score)\n print ('episode '+ str(game_index + 1) + ' of ' + str(initial_games) + '-> score: ' + str(score))\n print(accepted_scores) \n print(scores)\n PTAS = sum(scores)/len(scores)\n print('Pre Train Average Score: ', PTAS) \n return training_data\n\ndef build_model(input_size, output_size):\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Dense(128, input_dim = input_size, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dropout(0.2))\n model.add(tf.keras.layers.Dense(256, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dropout(0.2))\n model.add(tf.keras.layers.Dense(512, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dropout(0.2))\n model.add(tf.keras.layers.Dense(256, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dropout(0.2))\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dropout(0.2))\n model.add(tf.keras.layers.Dense(output_size, activation=tf.nn.softmax))\n model.compile(loss= tf.keras.losses.categorical_crossentropy, optimizer=tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5))\n return model\n\ndef train_model(training_data):\n x = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]))\n y = np.array([i[1] for i in training_data]).reshape(-1, len(training_data[0][1]))\n model = build_model(input_size=len(x[0]), output_size=len(y[0]))\n model.fit(x, y, epochs=10, callbacks=[tb])\n return model\n \ntb = tf.keras.callbacks.TensorBoard('./logs/OpenAI_Gym_Cartpole_Selftrained'+ time.strftime(\"%Y%m%d%H%M\") ) \ntraining_data = model_data_preparation()\ntrained_model = train_model(training_data)\nscores = []\nchoices = []\nenv.reset\nfor each_game in range(initial_games):\n score = 0\n prev_obs = []\n for step_index in range(goal_steps):\n #env.render()\n if len(prev_obs)==0:\n action = random.randrange(0,2)\n else:\n action = np.argmax(trained_model.predict(prev_obs.reshape(-1, len(prev_obs)))[0]) \n choices.append(action)\n new_observation, reward, done, info = env.step(action)\n prev_obs = new_observation\n score+=reward\n if done:\n break\n env.reset()\n scores.append(score)\n print ('episode '+ str(each_game + 1) + ' of ' + str(initial_games) + '-> score: ' + str(score))\nprint(scores)\nTAS = sum(scores)/len(scores)\nprint('Train Average Score:', TAS)\nprint('choice 1:{} choice 0:{}'.format(choices.count(1)/len(choices),choices.count(0)/len(choices)))\nif PTAS <= TAS:\n print('Training Average Score is higher than Pre Training Average Score')\n print('Saving Model...')\n tf.keras.models.save_model(trained_model, filepath='Cartpole.h5')\nelse:\n print('Training Average Score is lower than Pre Training Average Score')\n print('Not Saving Model, set score requirement equal to Pre Training Average Score')", "sub_path": "OpenAI_Gym_Cartpole_SelfTraining.py", "file_name": "OpenAI_Gym_Cartpole_SelfTraining.py", "file_ext": "py", "file_size_in_byte": 4450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "gym.make", "line_number": 8, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 13, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.TensorBoard", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 78, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 78, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.save_model", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 109, "usage_type": "attribute"}]} +{"seq_id": "547278292", "text": "# -*- coding: utf-8 -*-\nimport hashlib\nimport json\nimport logging\nimport time\nimport redis\nimport requests\nfrom pymongo import MongoClient\nfrom requests.auth import HTTPProxyAuth\n\n\nclass Aid(object):\n\n def __init__(self):\n self.num = 0\n self.url = 'https://do.comfire.cn/caihui/anchor/getAnchorInfoForNickName'\n self.liveId_url = 'https://do.comfire.cn/caihui/anchor/getAnchorHistoryInfo'\n self.goods_list_url = 'https://do.comfire.cn/caihui/liveInfo/getGoodsList'\n self.r = redis.StrictRedis(host='192.168.1.45', port=6379, db=0, password='admin')\n self.headers = {\n 'Content-Type': 'application/json;charset=UTF-8', 'ETag': '1387aa53', 'Referer': 'https://www.hh1024.com/',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'\n }\n self.timestamp = int(time.time()*1000)\n self.token = '5150b38f29904b4836a21c355189a43ae0a873b31c7881232a4f4730d1bc344d' # 登录用户唯一token,可能在一段时间后会改变\n\n self.mongo_port = 32766\n self.db = MongoClient('192.168.1.180', port=self.mongo_port)['pltaobao']\n self.collection = self.db['hot_anchor_info']\n # self.db = MongoClient('192.168.1.45')['pltaobao']\n # self.collection = self.db['anchor_info']\n\n def get_data(self):\n words = self.r.smembers('AnchorNickname')\n for word in words:\n self.num += 1\n print(self.num)\n param = {\n \"anchorEntry\": {\"searchWord\": str(word.decode('utf-8'))},\n \"pageEntry\": {\"pageSize\": 100, \"pageNum\": 1, \"sortBy\": 1}\n }\n params = {\n \"tenant\": \"caihui\",\n \"timestamp\": self.timestamp,\n \"token\": self.token,\n \"sign\": self.data_sha256(param),\n \"param\": param\n }\n response = requests.post(url=self.url, headers=self.headers, data=json.dumps(params)).json().get('preload').get('results')\n # print(response)\n if response:\n for index in response:\n id = index.get('anchorId')\n nickname = index.get('anchorName')\n fansCount = index.get('fansNum')\n anchorPhoto = index.get('picUrl')\n if id:\n self.save_data(id, nickname, fansCount, anchorPhoto)\n else:\n continue\n\n def get_anchor_info(self, days):\n '''\n :param days: 3,7,30\n :return:\n '''\n num = 0\n try:\n days = int(days)\n except:\n raise\n\n while 1:\n result = self.r.spop('anchorId_pop')\n if not result:\n break\n id = result.decode('utf-8').split(':')[0]\n name = result.decode('utf-8').split(':')[1]\n fansnum = result.decode('utf-8').split(':')[2]\n\n try:\n fansnum = int(fansnum)\n except:\n fansnum = 0\n\n param = {\n \"anchorLiveHistoryEntry\": {\"anchorId\": str(id), \"days\": days}\n }\n params = {\n \"tenant\": \"caihui\",\n \"timestamp\": self.timestamp,\n \"token\": self.token,\n \"sign\": self.data_sha256(param),\n \"param\": param\n }\n response = requests.post(url=self.liveId_url, headers=self.headers, data=json.dumps(params)).json().get('preload')\n result = response.get('result')\n\n if result:\n item = {}\n item['anchorId'] = id\n item['anchorName'] = name\n item['area'] = result.get('area')\n item['fansNum'] = fansnum\n\n d = result.get('itemCatDistributionDTOS')\n\n if not result.get('itemCatDistributionDTOS'):\n catName = None\n else:\n res = sorted(d, key=lambda x:x['itemNum'], reverse=True)\n catName = res[0].get('catName')\n item['catName'] = catName\n\n data = result.get('clickTrendDTOS')\n\n if data:\n pvQuantity_list = []\n quantity_list = []\n for i in data:\n pvQuantity_list.append(i.get('pvQuantity'))\n quantity_list.append(i.get('pvQuantity'))\n pvQuantity = sum(pvQuantity_list)/len(pvQuantity_list)\n quantity = sum(quantity_list)/len(quantity_list)\n else:\n pvQuantity = None\n quantity = None\n item['pvQuantity'] = pvQuantity\n item['quantity'] = quantity\n response = self.db['anchor_analysis_data'].find_one({\"_id\": id})\n if not response:\n self.db['anchor_analysis_data'].insert_one(item)\n num += 1\n print(num)\n\n\n\n\n\n\n def get_liveid(self, days):\n '''\n :param days: 3,7,30\n :return:\n '''\n\n try:\n days = int(days)\n except:\n raise\n\n while 1:\n id = self.r.spop('anchorId_liveid')\n if not id:\n break\n param = {\n \"anchorLiveHistoryEntry\": {\"anchorId\":str(id.decode('utf-8')), \"days\": days}\n }\n params = {\n \"tenant\": \"caihui\",\n \"timestamp\": self.timestamp,\n \"token\": self.token,\n \"sign\": self.data_sha256(param),\n \"param\": param\n }\n time.sleep(4)\n response = requests.post(url=self.liveId_url, headers=self.headers, data=json.dumps(params), proxies={'http': 'http-dyn.abuyun.com:9020'}, auth=HTTPProxyAuth('HG3T29V0U33H432D', 'CF9328D54686ED24')).json().get('preload')\n if response.get('result') and response.get('result').get('liveScene'):\n response = response.get('result')\n for index in range(response.get('liveScene')):\n self.r.sadd('anchorId:liveId_7', f'{id.decode(\"utf-8\")}:{response.get(\"liveIdList\")}')\n\n # liveid = response.get('liveIdList')[index]\n # # livetime = response.get('avgPvTrendDTOS')[index].get('liveTime')\n # r2 = redis.StrictRedis(host='192.168.1.180', port=30378)\n # r2.sadd('anchorId:liveId', f'{id.decode(\"utf-8\")}:{liveid}')\n # # self.save_liveid(liveid, str(id.decode('utf-8')), days)\n # # self.save_liveid(liveid, str(id.decode('utf-8')), response.get('liveIdList'))\n\n def get_goods_data(self):\n while 1:\n liveid = self.r.spop('liveid')\n if not liveid:\n break\n else:\n self.num += 1\n print(self.num)\n param = {\n \"itemEntry\": {\"liveIdList\": [liveid.decode('utf-8')]},\n \"pageEntry\": {\"pageNum\": 1, \"pageSize\": 500, \"sortBy\": 1}\n }\n params = {\n \"tenant\": \"caihui\",\n \"timestamp\": self.timestamp,\n \"token\": self.token,\n \"sign\": self.data_sha256(param),\n \"param\": param\n }\n response = requests.post(url=self.goods_list_url, headers=self.headers, data=json.dumps(params)).json()\n # print(response)\n if response.get('preload') and response.get('preload').get('results'):\n liveid = liveid.decode('utf-8')\n liveid_goods = response.get('preload').get('results')\n self.save_liveid_goods(liveid, liveid_goods)\n else:\n self.FailGetLiveData(liveid.decode('utf-8'))\n time.sleep(1.5)\n\n def get_goods_data_7(self):\n while 1:\n anchorId_liveid = self.r.spop('anchorId:liveId_7')\n if not anchorId_liveid:\n break\n else:\n self.num += 1\n print(self.num)\n anchorId = anchorId_liveid.decode('utf-8').split(\":\")[0]\n liveid = anchorId_liveid.decode('utf-8').split(\":\")[1]\n param = {\n \"itemEntry\": {\"liveIdList\": liveid},\n \"pageEntry\": {\"pageNum\": 1, \"pageSize\": 1000, \"sortBy\": 1}\n }\n params = {\n \"tenant\": \"caihui\",\n \"timestamp\": self.timestamp,\n \"token\": self.token,\n \"sign\": self.data_sha256(param),\n \"param\": param\n }\n response = requests.post(url=self.goods_list_url, headers=self.headers, data=json.dumps(params), proxies={'http': 'http-dyn.abuyun.com:9020'}, auth=HTTPProxyAuth('HG3T29V0U33H432D', 'CF9328D54686ED24')).json()\n # print(response)\n if response.get('preload') and response.get('preload').get('results'):\n liveid_goods = response.get('preload').get('results')\n\n l = []\n\n for i in liveid_goods:\n reservePrice = i.get('reservePrice')\n l.append(reservePrice)\n\n if not self.db['anchorId_reservePrice'].find_one({\"_id\":anchorId}):\n data = {\n '_id': anchorId,\n 'reservePrice': sum(l) / len(l)\n }\n self.db['anchorId_reservePrice'].insert_one(data)\n\n\n def test_get_goods_data(self):\n param = {\n \"itemEntry\": {\"liveIdList\": [231826632761]},\n \"pageEntry\": {\"pageNum\": 1, \"pageSize\": 100, \"sortBy\": 1}\n }\n params = {\n \"tenant\": \"caihui\",\n \"timestamp\": self.timestamp,\n \"token\": self.token,\n \"sign\": self.data_sha256(param),\n \"param\": param\n }\n response = requests.post(url=self.goods_list_url, headers=self.headers, data=json.dumps(params)).json()\n # print(response)\n if response.get('preload').get('results'):\n print(len(response.get('preload').get('results')))\n for d in response.get('preload').get('results'):\n print(d)\n\n def save_data(self, id, name, fansCount, anchorPhoto):\n self.r.sadd('anchorId', str(id))\n self.r.sadd('anchorName', name)\n res = self.collection.find_one({'anchorId': str(id)})\n if not res:\n data = {\n\n 'anchorId': str(id),\n 'anchorName': name,\n 'houseId': None,\n 'fansCount': int(fansCount),\n 'liveCount': None,\n 'city': None,\n 'creatorType': None,\n 'darenScore': None,\n 'descText': None,\n 'anchorPhoto': anchorPhoto,\n 'organId': None,\n 'fansFeature': None,\n 'historyData': None,\n }\n self.collection.insert_one(data) # 插入一条不存在的主播数据\n # else:\n # if res.get('fansCount') == int(fansCount) and res.get('anchorPhoto') == anchorPhoto:\n # pass\n # else:\n # self.collection.update_one({'anchorId': str(id)}, {'$set': {'fansCount': fansCount, 'anchorPhoto': anchorPhoto}}) # 更新已存在的主播数���\n\n def data_sha256(self, param):\n s = 'param'+str(param)+'×tamp='+str(self.timestamp)+'&tenant=caihui&token='+self.token\n sha = hashlib.sha256()\n sha.update(s.encode())\n sign = sha.hexdigest()\n return sign\n\n def save_liveid(self, liveid, anchorId, days):\n\n res = self.db['anchorId_liveId_{}'.format(days)].find_one({'_id': '{}:{}'.format(anchorId, liveid)})\n if not res:\n data = {\n '_id': '{}:{}'.format(anchorId, liveid),\n }\n self.db['anchorId_liveId_{}'.format(days)].insert_one(data)\n\n def save_liveid_goods(self, liveid, result):\n\n res = self.db['LiveIdData'].find_one({'_id': str(liveid)})\n if not res:\n data = {\n '_id': str(liveid),\n 'livedata': result,\n }\n self.db['LiveIdData'].insert_one(data)\n\n def FailGetLiveData(self, id):\n self.r.sadd('FailToGetLiveData', id)\n\n def run(self, days):\n # self.get_data()\n self.get_liveid(days)\n # self.get_goods_data()\n\n\nclass CopySet(object):\n\n def __init__(self, host='192.168.1.45', port=6379, password=None):\n self.host = host\n self.port = port\n self.password = password\n self.r = redis.StrictRedis(host=self.host, port=self.port, db=0, password=self.password)\n\n def __copy__(self):\n if self.r.type(self.oldkey).decode('utf-8') != 'set':\n raise logging.error('不是set类型数据')\n values = self.r.smembers(self.oldkey)\n if not values:\n raise logging.error('oldkey不存在')\n if self.r.smembers(self.newkey):\n raise logging.error('newkey已存在')\n\n for value in values:\n self.r.sadd(self.newkey, value)\n return logging.info('成功')\n\n def run(self,oldkey,newkey):\n self.oldkey = oldkey\n self.newkey = newkey\n self.__copy__()\n\n\n\nif __name__ == '__main__':\n\n # copyset = CopySet(password='admin')\n # copyset.run('anchorId', 'anchorId_pop')\n aid = Aid()\n aid.get_liveid(days=7)", "sub_path": "pinyou/test/live_seven_data.py", "file_name": "live_seven_data.py", "file_ext": "py", "file_size_in_byte": 14143, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "redis.StrictRedis", "line_number": 19, "usage_type": "call"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 49, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 96, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 96, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 166, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 167, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 167, "usage_type": "call"}, {"api_name": "requests.auth.HTTPProxyAuth", "line_number": 167, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 199, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 199, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 207, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 230, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 230, "usage_type": "call"}, {"api_name": "requests.auth.HTTPProxyAuth", "line_number": 230, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 261, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 261, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 298, "usage_type": "call"}, {"api_name": "redis.StrictRedis", "line_number": 337, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 341, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 344, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 346, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 350, "usage_type": "call"}]} +{"seq_id": "70185413", "text": "# from django.shortcuts import render, get_object_or_404, get_list_or_404\nfrom django.shortcuts import render ,redirect, get_object_or_404, get_list_or_404\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import user_reg_form, UserProfileForm, UserProfileUpdateForm\nfrom .models import Profile\nfrom django.contrib.auth.models import User\n\n# Create your views here.\n\ndef register(request):\n if request.method=='POST':\n form = user_reg_form(request.POST)\n if form.is_valid():\n form.save()\n username= form.cleaned_data.get('username')\n messages.success(request,f'Your Account has been created! You are now able to Login')\n return redirect('profile')\n else:\n form=user_reg_form()\n\n return render(request,'user/register.html',{'form': form})\n\n#If User Add New Image to profile\n@login_required\ndef profile(request):\n flag=True\n if request.method == 'POST':\n profile_form = UserProfileForm(request.POST or None, request.FILES or None)\n if profile_form.is_valid():\n profile = profile_form.save(commit=False)\n try:\n profile.user = request.user #.profile\n except Profile.DoesNotExist:\n profile.user = Profile(user=request.user)\n if profile_form.cleaned_data['image']:\n profile.picture = profile_form.cleaned_data[\"image\"]\n else:\n messages.success(request,f'Something is wrong, try again')\n profile.save()\n else:\n print(profile_form.errors)\n else:\n profile_form = UserProfileForm()\n\n return render(request,'user/profile.html',{'form': profile_form,'flag':flag})\n\n#If User Update Profile Image\n@login_required\ndef profileUpdate(request, pk):\n flag=True\n if request.method == 'POST':\n #print(pk)\n server = get_object_or_404(Profile, pk=pk)\n form = UserProfileForm(request.POST or None, request.FILES or None, instance=server)\n if form.is_valid():\n edit = form.save(commit=False)\n edit.save()\n return redirect('profile')\n form = UserProfileForm()\n return render(request,'user/profile.html',{'form': form,'flag':flag})\n\n@login_required\ndef editProfile(request):\n flag=True\n if request.method == 'POST':\n form = UserProfileUpdateForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n messages.success(request, f'Your profile information updated successfully')\n return redirect(profile)\n form = UserProfileUpdateForm(instance = request.user)\n return render(request, 'user/editProfile.html', {'form': form, 'flag':flag} )\n ", "sub_path": "mysite/users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "forms.user_reg_form", "line_number": 13, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "forms.user_reg_form", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "forms.UserProfileForm", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Profile.DoesNotExist", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.Profile", "line_number": 34, "usage_type": "name"}, {"api_name": "models.Profile", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 39, "usage_type": "name"}, {"api_name": "forms.UserProfileForm", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 25, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Profile", "line_number": 54, "usage_type": "argument"}, {"api_name": "forms.UserProfileForm", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 59, "usage_type": "call"}, {"api_name": "forms.UserProfileForm", "line_number": 60, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 49, "usage_type": "name"}, {"api_name": "forms.UserProfileUpdateForm", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 70, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 70, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "forms.UserProfileUpdateForm", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "567267817", "text": "from flask import Flask, render_template\nfrom num2words import num2words #num2words external package\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return '

This is First Page.

'\n\n@app.route(\"/\")\ndef sample(num):\n num = num2words(num)\n return render_template('input.html', num=num)\n\nif __name__ == \"__main__\":\n app.run()", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 352, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "num2words.num2words", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "602907552", "text": "\"\"\"adding new kmeans parameters\n\nRevision ID: 5bf9db6d7909\nRevises: a13c4b5cc25f\nCreate Date: 2020-10-15 10:01:50.734058\n\n\"\"\"\nfrom alembic import context, op\nfrom sqlalchemy import String, Integer, Text\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.sql import table, column\nimport json\n\n# revision identifiers, used by Alembic.\nrevision = '5bf9db6d7909'\ndown_revision = 'a13c4b5cc25f'\nbranch_labels = None\ndepends_on = None\n\nOFFSET_FIELD = 582\nFORM_KMEANS = 27\nFORM_KMODES = 152\n\n\ndef _insert_operation_form_field():\n tb = table(\n 'operation_form_field',\n column('id', Integer),\n column('name', String),\n column('type', String),\n column('required', Integer),\n column('order', Integer),\n column('default', Text),\n column('suggested_widget', String),\n column('values_url', String),\n column('values', String),\n column('scope', String),\n column('form_id', Integer),\n column('enable_conditions', String),\n )\n data = [\n [OFFSET_FIELD, 'distance', 'TEXT', 0, 9, 'euclidean',\n \"dropdown\", None, json.dumps([\n {'key': 'euclidean', 'value': 'Euclidean'},\n {'key': 'cosine', 'value': 'Cosine'},\n ]), \"EXECUTION\", FORM_KMEANS, None],\n\n [OFFSET_FIELD+1, 'fragmentation', 'INTEGER', 0, 9, None, 'checkbox',\n None, None, 'EXECUTION', FORM_KMODES, None],\n ]\n columns = [c.name for c in tb.columns]\n rows = [dict(list(zip(columns, row))) for row in data]\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_form_field_translation():\n tb = table(\n 'operation_form_field_translation',\n column('id', Integer),\n column('locale', String),\n column('label', String),\n column('help', String), )\n\n columns = [c.name for c in tb.columns]\n data = [\n [OFFSET_FIELD, \"en\", \"Distance Measure\", \"The distance measure\"],\n [OFFSET_FIELD, \"pt\", \"Medida de distância\", \"A medida de distância\"],\n\n [OFFSET_FIELD+1, \"en\", \"Reduce fragmentation\",\n \"If enabled, it will reduce the parallelization in favor of the \"\n \"ability to handle small databases.\"],\n [OFFSET_FIELD+1, \"pt\", \"Reduzir a fragmentação\",\n \"Se ativado, irá reduzir a paralelização em favor da capacidade de \"\n \"lidar com pequenas bases\"],\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n op.bulk_insert(tb, rows)\n\n\nall_commands = [\n\n (_insert_operation_form_field, \"\"\"DELETE FROM operation_form_field\n WHERE id BETWEEN {} AND {}\"\"\".format(OFFSET_FIELD, OFFSET_FIELD+1)),\n\n (_insert_operation_form_field_translation,\n 'DELETE FROM operation_form_field_translation WHERE id BETWEEN {} AND {}'\n .format(OFFSET_FIELD, OFFSET_FIELD+1)),\n]\n\n\ndef upgrade():\n ctx = context.get_context()\n session = sessionmaker(bind=ctx.bind)()\n connection = session.connection()\n\n try:\n for cmd in all_commands:\n if isinstance(cmd[0], str):\n connection.execute(cmd[0])\n elif isinstance(cmd[0], list):\n for row in cmd[0]:\n connection.execute(row)\n else:\n cmd[0]()\n except:\n session.rollback()\n raise\n session.commit()\n\n\ndef downgrade():\n ctx = context.get_context()\n session = sessionmaker(bind=ctx.bind)()\n connection = session.connection()\n\n try:\n connection.execute('SET FOREIGN_KEY_CHECKS=0;')\n for cmd in reversed(all_commands):\n if isinstance(cmd[1], str):\n connection.execute(cmd[1])\n elif isinstance(cmd[1], list):\n for row in cmd[1]:\n connection.execute(row)\n else:\n cmd[1]()\n connection.execute('SET FOREIGN_KEY_CHECKS=1;')\n except:\n session.rollback()\n raise\n session.commit()\n", "sub_path": "migrations/versions/5bf9db6d7909_adding_new_kmeans_parameters.py", "file_name": "5bf9db6d7909_adding_new_kmeans_parameters.py", "file_ext": "py", "file_size_in_byte": 3982, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sqlalchemy.sql.table", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 28, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 29, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 30, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 31, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 32, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 33, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 34, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 35, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 36, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 37, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 38, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 39, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}, {"api_name": "alembic.op.bulk_insert", "line_number": 53, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 53, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.table", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.column", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 59, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 60, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 61, "usage_type": "argument"}, {"api_name": "sqlalchemy.sql.column", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 62, "usage_type": "argument"}, {"api_name": "alembic.op.bulk_insert", "line_number": 77, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 77, "usage_type": "name"}, {"api_name": "alembic.context.get_context", "line_number": 92, "usage_type": "call"}, {"api_name": "alembic.context", "line_number": 92, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 93, "usage_type": "call"}, {"api_name": "alembic.context.get_context", "line_number": 112, "usage_type": "call"}, {"api_name": "alembic.context", "line_number": 112, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "33064254", "text": "import pandas as pd\nimport numpy as np\n\nimport abc\n\nfrom Protocols import Layer4Protocol\nfrom ExfilData import DataTextureEnum\nfrom NetworkIO.BaseNetworkIO import BaseNetworkIO\n\nfrom typing import Optional, List, Tuple\n\n\nclass BaseEnsembleNetworkIO(BaseNetworkIO):\n \"\"\"\n A base class for a NetworkIO containing other NetworkIOs\n \"\"\"\n def __init__(self, network_ios: List[BaseNetworkIO], baseline_data: Optional[pd.DataFrame] = None):\n \"\"\"\n :param network_ios: a list of the ensembles' NetworkIOs\n :param baseline_data: the baseline data of the communication on each protocol\n \"\"\"\n super().__init__(baseline_data)\n self.network_ios: List[BaseNetworkIO] = network_ios\n\n def mask_ios(self, mask: List[bool]) -> List[BaseNetworkIO]:\n \"\"\"\n Masks the network ios by a list of booleans - returns the ones whose positions had a value of `True`\n\n :param mask: a list of booleans to mask the NetworkIOs\n :return: a list of the networkIOs that had the masking value of `True`\n \"\"\"\n assert len(mask) == len(self.network_ios), \"Mask must be as long as number of network ios\"\n assert any(mask), \"At least one network io must be enabled in the mask\"\n return [network_io for network_io, mask in zip(self.network_ios, mask) if mask]\n\n def ios_subset(self, mask: List[bool]) -> 'BaseEnsembleNetworkIO':\n \"\"\"\n returns a new ensemble of the same type with only the network ios that had `True` in their mask\n :param mask: a list of booleans, a value of `True` means that the io of the same index will be in the subset\n :return: a new ensemble of the same type with only the network ios that had `True` in their mask\n \"\"\"\n masked_ios = self.mask_ios(mask)\n return type(self)(masked_ios, self.baseline_data)\n\n def calc_network_ios_decisions(self, data: bytes, proto: Layer4Protocol,\n data_texture: DataTextureEnum) -> Tuple[bool, ...]:\n \"\"\"\n calculates the decision of each network io in the ensemble over the `send` parameters\n :param data: the data to send in bytes\n :param proto: the protocol over which the data will be sent\n :param data_texture: the data's texture\n :return: a tuple of the NetworkIOs decisions\n \"\"\"\n return tuple([network_io.send(data, proto, data_texture) for network_io in self.network_ios])\n\n def set_baseline_data(self, baseline_data: pd.DataFrame):\n self.baseline_data = baseline_data\n\n for network_io in self.network_ios:\n network_io.set_baseline_data(baseline_data)\n\n def __str__(self) -> str:\n network_ios_names: List[str] = [str(network_io) for network_io in self.network_ios]\n return f\"{type(self).__name__}({','.join(network_ios_names)})\"\n\n @abc.abstractmethod\n def send(self, data: bytes, proto: Layer4Protocol, data_texture: DataTextureEnum) -> bool:\n pass\n\n def reset(self):\n for network_io in self.network_ios:\n network_io.reset()\n\n\nclass FullConsensusEnsembleNetworkIO(BaseEnsembleNetworkIO):\n \"\"\"\n Lets data be sent only if all of it's NetworkIOs agreed that it should be sent\n \"\"\"\n def enforce_on_data(self, baseline_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n enforces all of the contained NetworkIOs on the baseline data\n \"\"\"\n enforced_data = baseline_data.copy()\n\n for network_io in self.network_ios:\n enforced_data = network_io.enforce_on_data(enforced_data)\n\n return enforced_data\n\n def send(self, data: bytes, proto: Layer4Protocol, data_texture: DataTextureEnum) -> bool:\n return all(self.calc_network_ios_decisions(data, proto, data_texture))\n\n\nclass VotingEnsembleNetworkIO(BaseEnsembleNetworkIO):\n \"\"\"\n Decides the send result via a vote between it's NetworkIos\n Each NetworkIO has a voting weight (not all voters must have the same weight)\n \"\"\"\n def __init__(self, network_ios: List[BaseNetworkIO], voting_weights: Optional[List[float]] = None,\n baseline_data: Optional[pd.DataFrame] = None, tie_breaker_result: Optional[bool] = True):\n \"\"\"\n :param network_ios: a list of the ensembles' NetworkIOs\n :param voting_weights: the weight of each IO. defaults to 1/num_of_ios each and normalized to have a sum of 1\n :param baseline_data: the baseline data of the communication on each protocol\n :param tie_breaker_result: the value to return in the case of a voting tie. defaults to `True`\n \"\"\"\n super().__init__(network_ios, baseline_data)\n self.tie_breaker_result: bool = tie_breaker_result\n\n if voting_weights is None:\n voting_weights = [1 / len(network_ios)] * len(network_ios)\n\n if len(voting_weights) != len(network_ios):\n exception_msg = \"Invalid init values for VotingEnsembleNetworkIO, len(voting_weights) != len(network_ios)\"\n exception_msg = f\"{exception_msg} ({len(voting_weights)} != {len(network_ios)})\"\n raise ValueError(exception_msg)\n\n self.voting_weights = np.array(voting_weights)\n # normalize to make the sum 1\n self.voting_weights = self.voting_weights / self.voting_weights.sum()\n\n def ios_subset(self, mask: List[bool]) -> 'BaseEnsembleNetworkIO':\n \"\"\"\n Implemented here because the constructor has different params\n \"\"\"\n masked_ios = self.mask_ios(mask)\n return type(self)(masked_ios, self.voting_weights, self.baseline_data, self.tie_breaker_result)\n\n def send(self, data: bytes, proto: Layer4Protocol, data_texture: DataTextureEnum) -> bool:\n network_ios_votes: Tuple[bool, ...] = self.calc_network_ios_decisions(data, proto, data_texture)\n\n vote_values = np.array([int(vote) for vote in network_ios_votes])\n weighted_votes_sum: float = vote_values @ self.voting_weights\n\n if weighted_votes_sum == 0.5:\n return self.tie_breaker_result\n\n return weighted_votes_sum > 0.5\n\n\n\n", "sub_path": "NetworkIO/EnsembleNetworkIO.py", "file_name": "EnsembleNetworkIO.py", "file_ext": "py", "file_size_in_byte": 6085, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "NetworkIO.BaseNetworkIO.BaseNetworkIO", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "NetworkIO.BaseNetworkIO.BaseNetworkIO", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 17, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 17, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "NetworkIO.BaseNetworkIO.BaseNetworkIO", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 25, "usage_type": "name"}, {"api_name": "NetworkIO.BaseNetworkIO.BaseNetworkIO", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 36, "usage_type": "name"}, {"api_name": "Protocols.Layer4Protocol", "line_number": 45, "usage_type": "name"}, {"api_name": "ExfilData.DataTextureEnum", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 46, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 63, "usage_type": "name"}, {"api_name": "Protocols.Layer4Protocol", "line_number": 67, "usage_type": "name"}, {"api_name": "ExfilData.DataTextureEnum", "line_number": 67, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 79, "usage_type": "attribute"}, {"api_name": "Protocols.Layer4Protocol", "line_number": 90, "usage_type": "name"}, {"api_name": "ExfilData.DataTextureEnum", "line_number": 90, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 99, "usage_type": "name"}, {"api_name": "NetworkIO.BaseNetworkIO.BaseNetworkIO", "line_number": 99, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 99, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 100, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 122, "usage_type": "name"}, {"api_name": "Protocols.Layer4Protocol", "line_number": 129, "usage_type": "name"}, {"api_name": "ExfilData.DataTextureEnum", "line_number": 129, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "121664886", "text": "\"\"\"\nMiguel Capllonch Juan\n30 September 2018\nDraw together all the LFPs computed with the different methods:\n - Results from the simulations (RN model)\n - Using the VC conductor theory\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\n\n\n# Recording electrodes\nrec_els = {\n\t'E': {'pos': (250, 0, 19000), \n\t\t'lfp_indiv_ly1': {}, 'lfp_indiv_ly2': {}, \n\t\t'lfp_total_ly1': None, 'lfp_total_ly2': None, \n\t\t'color': 'r'},\n\t'S': {'pos': (0, -250, 19000), \n\t\t'lfp_indiv_ly1': {}, 'lfp_indiv_ly2': {}, \n\t\t'lfp_total_ly1': None, 'lfp_total_ly2': None, \n\t\t'color': 'g'},\n\t'W': {'pos': (-250, 0, 19000), \n\t\t'lfp_indiv_ly1': {}, 'lfp_indiv_ly2': {}, \n\t\t'lfp_total_ly1': None, 'lfp_total_ly2': None, \n\t\t'color': 'b'},\n\t'N': {'pos': (0, 250, 19000), \n\t\t'lfp_indiv_ly1': {}, 'lfp_indiv_ly2': {}, \n\t\t'lfp_total_ly1': None, 'lfp_total_ly2': None, \n\t\t'color': 'k'}\n}\n\n# Stimulating electrode (amp in mA)\ndelay = 0.1\ndur = 0.2\namp = -15.e-3\nstimcurrent = None\nstimelpos = (250, 0, 100)\n\n# Medium conductivity tensor (1/(Ohm*um))\nsigma_x = 1. / (1.211e3 * 1.e-6)\nsigma_y = 1. / (1.211e3 * 1.e-6)\nsigma_z = 1. / (0.175e3 * 1.e-6)\n\n# Functions\ndef compute_lfp(currents, elpos):\n\t\"\"\" Compute the LFP from a time series of currents as recorded by \n\ta point electrode situated at elpos\n\tThe equation is taken from:\n\tNicholson & Freeman (1975) \n\tThe current sources are all the segments in the axon \"\"\"\n\tdx = x - elpos[0]\n\tdy = y - elpos[1]\n\tdz = z - elpos[2]\n\tdenominator = 4. * np.pi * np.sqrt\\\n\t\t(\\\n\t\t\tsigma_y * sigma_z * dx ** 2 + \\\n\t\t\tsigma_z * sigma_x * dy ** 2 + \\\n\t\t\tsigma_x * sigma_y * dz ** 2\\\n\t\t)\n\t# denominator = np.tile(denominator, nt).reshape(currents.shape)\n\tdenominator = denominator.repeat(nt).reshape(currents.shape)\n\t# print dz.shape, (dz ** 2).shape, currents.shape\n\treturn (currents / denominator).sum(axis=0)\n\ndef compute_lfp_fromtimeseries(currents, srcpos, elpos):\n\t\"\"\" Compute the LFP from a time series of currents as recorded by \n\ta point electrode situated at elpos\n\tThe equation is taken from:\n\tNicholson & Freeman (1975) \n\tThis time, there is only one current point source \"\"\"\n\tdx = srcpos[0] - elpos[0]\n\tdy = srcpos[1] - elpos[1]\n\tdz = srcpos[2] - elpos[2]\n\tdenominator = 4. * np.pi * np.sqrt\\\n\t\t(\\\n\t\t\tsigma_y * sigma_z * dx ** 2 + \\\n\t\t\tsigma_z * sigma_x * dy ** 2 + \\\n\t\t\tsigma_x * sigma_y * dz ** 2\\\n\t\t)\n\t# denominator = np.tile(denominator, nt).reshape(currents.shape)\n\t# denominator = denominator.repeat(nt).reshape(currents.shape)\n\t# denominator = denominator.repeat(nt)\n\t# print dz.shape, (dz ** 2).shape, currents.shape\n\treturn currents / denominator\n\n# Declare arrays\nnames = {}\nily1 = {}\nily2 = {}\nbalancely1 = {}\nbalancely2 = {}\nlfp_indiv_ly1 = {}\nlfp_indiv_ly2 = {}\nx = []\ny = []\nz = []\nii_fibs = []\n\n# Other parameters\ndt = 0.005\n\n\n\n# Get recordings from file\nrecs = []\n# with open('./recordings_R0P0_noartefacts.csv', 'r') as f:\nwith open('./recordings_R0P0_withartefacts.csv', 'r') as f:\n\tfrl = list(csv.reader(f))\n\tfor row in frl[1:]:\n\t\trecs.append(float(row[1]))\nrecs = np.array(recs)\n\n# Get currents from file\nwith open('./membranecurrents.csv', 'r') as f:\n\tfrl = list(csv.reader(f))\n\tfor i, row in enumerate(frl[1:]):\n\t\tifib = int(row[0])\n\t\tii_fibs.append(ifib)\n\t\tname = row[1]\n\t\tdata = row[5:]\n\t\tndata = len(data) / 2\n\t\tdataly1 = np.array([float(item) for item in data[:ndata]])\n\t\tdataly2 = np.array([float(item) for item in data[ndata:]])\n\t\ttry:\n\t\t\tily1[ifib].append(dataly1.copy())\n\t\t\tily2[ifib].append(dataly2.copy())\n\t\t\tnames[ifib].append(name)\n\t\texcept KeyError:\n\t\t\tnames[ifib] = [name]\n\t\t\tily1[ifib] = [dataly1]\n\t\t\tily2[ifib] = [dataly2]\n\t\tx.append(float(row[2]))\n\t\ty.append(float(row[3]))\n\t\tz.append(float(row[4]))\n\n# Positions from lists to arrays\nx = np.array(x)\ny = np.array(y)\nz = np.array(z)\n\n# Finish setting parameters that depend on the data\ntarray = np.arange(0, dt * ndata, dt)\nnt = len(tarray)\nnsegstotal = len(z)\nstimcurrent = amp * np.ones_like(tarray)\n# stimcurrent[np.where(tarray < delay + dur)] = amp\nstimcurrent[np.where(tarray < delay)] = 0.\nstimcurrent[np.where(tarray > delay + dur)] = 0.\n# stimcurrent = stimcurrent()\n# stimcurrent[np.where(delay < tarray < delay + dur)] = amp\n\n# Positions of the nodes of Ranvier\nzRN = {}\n# and indices corresponding to them\nindsRN = {}\nfor k, v in names.items():\n\tzRN[k] = []\n\tindsRN[k] = []\n\tfor i, vv in enumerate(v):\n\t\tif 'node' in vv:\n\t\t\tzRN[k].append(z[i])\n\t\t\tindsRN[k].append(i)\n\nfor ifib in ii_fibs:\n\n\t# Current balances\n\tily1[ifib] = np.array(ily1[ifib])\n\tily2[ifib] = np.array(ily2[ifib])\n\tbalancely1[ifib] = np.zeros(nt)\n\tbalancely2[ifib] = np.zeros(nt)\n\tfor i_t, t in enumerate(tarray):\n\t\tbalancely1[ifib][i_t] = ily1[ifib][:, i_t].sum()\n\t\tbalancely2[ifib][i_t] = ily2[ifib][:, i_t].sum()\n\n\t# Individual LFPs\n\tfor k, re in rec_els.items():\n\t\tre['lfp_indiv_ly1'][ifib] = compute_lfp(ily1[ifib], re['pos'])\n\t\tre['lfp_indiv_ly2'][ifib] = compute_lfp(ily2[ifib], re['pos'])\n\n# Finally, sum up the individual LFPs of the fibers into a total LFP\n# for each electrode\nfor k, re in rec_els.items():\n\tre['lfp_total_ly1'] = np.zeros(nt)\n\tre['lfp_total_ly2'] = np.zeros(nt)\n\tfor ifib in ii_fibs:\n\t\tre['lfp_total_ly1'] += re['lfp_indiv_ly1'][ifib]\n\t\tre['lfp_total_ly2'] += re['lfp_indiv_ly2'][ifib]\n\t# Add the contribution of the stimulating electrode\n\tre['lfp_total_ly1'] += compute_lfp_fromtimeseries(stimcurrent, \n\t\tstimelpos, re['pos'])\n\tre['lfp_total_ly2'] += compute_lfp_fromtimeseries(stimcurrent, \n\t\tstimelpos, re['pos'])\n\n# What if I sum them?\nresum = re['lfp_total_ly1'] + re['lfp_total_ly2']\n\n# Now compare the two curves\nresum_norm = resum / np.abs(resum.max() - resum.min())\nrecs_norm = recs / np.abs(recs.max() - recs.min())\n\n###############################################################################\n# Figures\n\n# Time evolution at some point\nfig, ax = plt.subplots()\nax.plot(tarray, recs, lw=3, label='RN model')\nfor k, re in rec_els.items():\n\tax.plot(tarray, re['lfp_total_ly1'], c=re['color'] , ls='-', \n\t\tlabel=k + '. Layer 1')\n\tax.plot(tarray, re['lfp_total_ly2'], c=re['color'] , ls='--', \n\t\tlabel=k + '. Layer 2')\nax.plot(tarray, resum, 'r', lw=3, label='Sum VC model')\nax.set_xlabel('Time (ms)')\nax.set_ylabel('Extracellular recordings (mV)')\nax.set_title('Extracellular recordings')\nax.legend()\nfig.tight_layout()\n# plt.show()\nfig.savefig('recordings_all.png')\n\nfig, ax = plt.subplots()\nax.plot(tarray, recs_norm, lw=3, label='RN model')\nax.plot(tarray, resum_norm, 'r', lw=3, label='Sum VC model')\nax.set_xlabel('Time (ms)')\nax.set_ylabel('Extracellular recordings (mV)')\nax.set_title('Extracellular recordings (normalised)')\nax.legend()\nfig.tight_layout()\n# plt.show()\nfig.savefig('recordings_all_compare_RN_VC.png')\n", "sub_path": "dataset_04__eph_stim_vs_dist/fig9b/code/data/results/all_lfps.py", "file_name": "all_lfps.py", "file_ext": "py", "file_size_in_byte": 6650, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.pi", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 77, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 114, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}]} +{"seq_id": "65438661", "text": "from typing import List, Optional\n\nfrom channels.layers import get_channel_layer\nfrom django.http.response import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom ninja import Router, Schema\nfrom pydantic import HttpUrl\n\nchannel_layer = get_channel_layer()\nrouter = Router()\n\n\nclass ConsumerRequest(Schema):\n name: str\n topics: List[str]\n format: str = \"json\"\n auto_offset_reset: str = \"earliest\"\n webhook_url: Optional[HttpUrl]\n\n\n@router.post(\"/\")\nasync def create_consumer(request, consumer: ConsumerRequest):\n await channel_layer.send(\n \"kafka-consume\",\n {\n \"type\": \"kafka.consume\",\n \"topics\": consumer.topics,\n \"name\": consumer.name,\n \"format\": consumer.format,\n \"auto.offset.reset\": consumer.auto_offset_reset,\n \"webhook\": consumer.webhook_url,\n },\n )\n", "sub_path": "consumers/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 887, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "channels.layers.get_channel_layer", "line_number": 9, "usage_type": "call"}, {"api_name": "ninja.Router", "line_number": 10, "usage_type": "call"}, {"api_name": "ninja.Schema", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}, {"api_name": "pydantic.HttpUrl", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "552915144", "text": "import random\nimport string\nfrom django.utils.text import slugify\n\n\ndef random_number_generator():\n main_num = random.randint(1066545465, 6454635465) + random.randint(3066445465, 9786287465)\n print(main_num)\n return main_num\n\n\ndef random_string_generator(size=10, chars=string.ascii_lowercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef unique_slug_generator(instance, new_slug=None):\n \"\"\"\n This is for generating a unique slug for the model and it assumes your instance \n has a model with a slug field and a title character (char) field.\n \"\"\"\n if new_slug is not None:\n slug = new_slug\n else:\n slug = slugify(instance.name)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(slug=slug).exists()\n if qs_exists:\n new_slug = \"{slug}-{randstr}\".format(\n slug=slug,\n randstr=random_string_generator(size=4)\n )\n return unique_slug_generator(instance, new_slug=new_slug)\n print(slug)\n return slug\n\n\ndef valid_username(username):\n \"\"\"\n To validate the Username and Clean it for authentication purpose\n \"\"\"\n # from django.contrib import messages\n special = \"`~!@#$%^&*()_-+=\\{\\}[];':\\\",<.>/? \"\n\n for i in username:\n if i in special:\n messages.error(request, 'Username can only contain alphabets')\n return False\n\n return True", "sub_path": "eepicjobs/eepicjobs/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "random.randint", "line_number": 7, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 12, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 12, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.text.slugify", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "603152510", "text": "## Package for Web App\nimport streamlit as st\nimport joblib, os\n\n## Package for Classification\nimport string, re\nfrom nltk.tokenize import word_tokenize\nfrom Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory, StopWordRemover, ArrayDictionary\nfrom Sastrawi.Stemmer.StemmerFactory import StemmerFactory\n\n## Package for WordCloud\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\n# Add More Stopwords\nstop = ['rp','bj','aku','radar','habibie','indonesia','jokowi','polisi','anak']\n\n# Customized Preprocessing Functions\ndef preprocess_title(titles,more_stop=stop):\n\n # 1. Lowercase and Remove Punctuation\n lowercase_titles = titles.lower()\n no_pun = re.sub(r'[>)}:{\",?+!\\[\\].(<;1234567890]','',lowercase_titles)\n no_pun = re.sub('\\n','',no_pun)\n \n # 2. Remove Stopwords\n stopwords = StopWordRemoverFactory().get_stop_words() + more_stop\n data_stop = ArrayDictionary(stopwords)\n srf = StopWordRemover(data_stop)\n passed = srf.remove(no_pun)\n\n # 3. Stemming Process\n factory = StemmerFactory()\n stemmer = factory.create_stemmer()\n stemmed = stemmer.stem(passed)\n \n return stemmed\n\n## Load Vectorizer\nvectorizer = open('classification models/cv.pkl', 'rb')\ncv = joblib.load(vectorizer)\n\n## Prediction Function\ndef load_model(model,vect_text):\n fixed_model = joblib.load(open(os.getcwd() + '\\\\classification models\\\\' + model, 'rb'))\n proba = fixed_model.predict_proba(vect_text)\n if proba[0][1] > proba[0][0]:\n st.success('Judul Berita Clickbait! Nilai Probabilitas hingga {:.2f}%'.format(proba[0][1]*100))\n else:\n st.success('Judul Berita Clickbait! Nilai Probabilitas hingga {:.2f}%'.format(proba[0][0]*100))\n return fixed_model\n\ndef main():\n # Clickbait News App\n st.title('Aplikasi NLP dan ML untuk Bahasa Indonesia')\n st.subheader('Pilih aktivitas yang ingin dilakukan pada sidebar')\n\n activity = ['Klasifikasi Judul Clickbait', 'Visualisasi Word Cloud']\n task = st.sidebar.selectbox('Pilih Aktivitas', activity)\n st.sidebar.subheader('Tentang Aplikasi')\n st.sidebar.markdown('Aplikasi ini dibuat sebagai contoh penerapan NLP dan ML untuk Bahasa Indonesia menggunakan Python')\n st.sidebar.markdown('''Dibuat Oleh: \n **_Raka Andriawan_** \n **_Reynaldy Aries Ariyanto_**''')\n \n if task == 'Klasifikasi Judul Clickbait':\n st.info('Klasifikasi Judul Berita Clickbait dengan akurasi hingga 83%')\n title = st.text_area('Tulis Judul Berita','Ketik Disini')\n model_ml = ['Naive Bayes','Random Forest','Support Vector Machine']\n model_chosen = st.selectbox('Pilih Model ML', model_ml)\n \n if st.button('Klasifikasi'):\n vect_title = cv.transform([title])\n if model_chosen == 'Naive Bayes':\n estimator = load_model('mnb_model.pkl',vect_title)\n if model_chosen == 'Random Forest':\n estimator = load_model('rf_model.pkl',vect_title)\n if model_chosen == 'Support Vector Machine':\n estimator = load_model('svm_model.pkl',vect_title)\n \n if task == 'Visualisasi Word Cloud':\n st.info('Word Cloud dengan Stopword dan Stemmer untuk Bahasa Indonesia')\n article = st.text_area('Tulis Teks','Ketik Disini')\n stopword = st.text_area('Tambah Kata Stop','Ketik Disini')\n \n if st.button('Visualisasi'):\n stopword = stopword.split(' ')\n clean_article = preprocess_title(article,stopword)\n text_cloud = WordCloud(background_color='white',colormap='plasma').generate(clean_article)\n plt.imshow(text_cloud, interpolation='bilinear')\n plt.axis('off')\n st.pyplot(plt)\n \nif __name__ == '__main__':\n main()", "sub_path": "nlp_app.py", "file_name": "nlp_app.py", "file_ext": "py", "file_size_in_byte": 3775, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "re.sub", "line_number": 23, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 24, "usage_type": "call"}, {"api_name": "Sastrawi.StopWordRemover.StopWordRemoverFactory.StopWordRemoverFactory", "line_number": 27, "usage_type": "call"}, {"api_name": "Sastrawi.StopWordRemover.StopWordRemoverFactory.ArrayDictionary", "line_number": 28, "usage_type": "call"}, {"api_name": "Sastrawi.StopWordRemover.StopWordRemoverFactory.StopWordRemover", "line_number": 29, "usage_type": "call"}, {"api_name": "Sastrawi.Stemmer.StemmerFactory.StemmerFactory", "line_number": 33, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 41, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 45, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 45, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 48, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 50, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 55, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 56, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 59, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 59, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.subheader", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 60, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 61, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 61, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.markdown", "line_number": 62, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 62, "usage_type": "attribute"}, {"api_name": "streamlit.info", "line_number": 67, "usage_type": "call"}, {"api_name": "streamlit.text_area", "line_number": 68, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 70, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 72, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 82, "usage_type": "call"}, {"api_name": "streamlit.text_area", "line_number": 83, "usage_type": "call"}, {"api_name": "streamlit.text_area", "line_number": 84, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 86, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "argument"}]} +{"seq_id": "211832512", "text": "# Import the Libraries\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\n# Import the Dataset\r\ntrain_data = pd.read_csv(\"train.csv\")\r\ntest_data = pd.read_csv(\"test.csv\")\r\nsample = pd.read_csv(\"sample_submission.csv\")\r\n\r\n# Explarotory Data Analysis\r\ndef eda(input_data):\r\n print(\"The shape of this data = \", input_data.shape)\r\n print(\"The detail information of this data\", input_data.describe())\r\n type(input_data)\r\n print(\"The 5 top : \", input_data.head(5))\r\n print(\"Information: \", input_data.info)\r\n# Create the Values\r\neda(train_data)\r\neda(test_data)\r\neda(sample)\r\n\r\n# Printing the Value Counts\r\nprint(train_data.target.value_counts())\r\n\r\n# Visualization of the dataset\r\ndef visualize(input_data, id):\r\n # Reading the Dataset\r\n true = 0 \r\n false = 0 \r\n for i in input_data.iloc[:,id]:\r\n if(i==1):\r\n true +=1\r\n else:\r\n false +=1\r\n \r\n X = [\"True\",\"False\"]\r\n y = [true, false]\r\n # Analysis information\r\n chart = sns.barplot(x = X, y = y)\r\n chart.set_xticklabels(labels = X, rotation=45)\r\n chart.set_title(\"The Training Data : Total Number of True & False \")\r\n chart.set(ylabel = \"The total number of true and false\")\r\n chart.set(xlabel = \"The X-Axis\")\r\n# Create the Values\r\nvisualize(train_data, 4)\r\n\r\n# Text Mining in Python\r\nj = \"\"\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.porter import PorterStemmer\r\nfor i in train_data[\"text\"]:\r\n j = j + i\r\n\r\n# Data Cleansing & Normalizing\r\ntoken = word_tokenize(j)\r\ntoken = [x.lower() for x in token] # Using sequential python for \r\nstopwords = set(stopwords.words('english'))\r\nfiltered = [x for x in token if not x in stopwords] # Remove Stopwords\r\nfiltered = [x for x in filtered if x.isalpha()] # Remove Punctuation\r\n\r\n# Avoiding the Stemming\r\nporter = PorterStemmer()\r\nstemmed = [porter.stem(word) for word in filtered]\r\n\r\n# Lemmatization\r\nfrom nltk.stem import WordNetLemmatizer\r\nwordnet_lemmatizer = WordNetLemmatizer()\r\n\r\nlemmatize = [wordnet_lemmatizer.lemmatize(x) for x in stemmed]\r\n\r\n# Importing FreqDist library from nltk and passing token into FreqDist\r\nfrom nltk.probability import FreqDist\r\nfdist = FreqDist(lemmatize)\r\ncheck_data = fdist.most_common(100)[1:10]\r\n\r\n# Convert the data into x and y\r\nx, y = zip(*check_data)\r\nprint(x)\r\nprint(y)\r\n\r\n# Passing the string text into word tokenize \r\nx = list(x)\r\ny = list(y)\r\nsns.barplot(x,y)\r\nplt.title(\"The Most Frequent words\")\r\nplt.show()\r\n\r\n# WordCloud Generation\r\nfrom wordcloud import WordCloud\r\n# Display the generated image:\r\nstr1 = \" \"\r\nstr2 = str1.join(x)\r\nwordcloud = WordCloud().generate(str2)\r\nplt.imshow(wordcloud, interpolation='bilinear')\r\nplt.axis(\"off\")\r\nplt.show()\r\n\r\n\r\n# Using Count Vectorizer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom nltk.tokenize import RegexpTokenizer\r\ntoken = RegexpTokenizer(r'[a-zA-Z0-9]+')\r\ncv = CountVectorizer(lowercase=True,stop_words='english',ngram_range = (1,1),tokenizer = token.tokenize)\r\n\r\n# Implementing the Feature Extraction to both Training Data and Test Data\r\ntext_counts= cv.fit_transform(train_data['text']) # Obtained from the training data\r\ntesting= cv.transform(test_data['text']) # Obtained from the dataset\r\n\r\n# Differentiate the Training and Testing\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n text_counts, train_data['target'], test_size=0.25, random_state=1)\r\n\r\n\r\n\r\n# Fitting SVM to the Training set\r\nfrom sklearn.svm import SVC\r\nfrom sklearn import metrics\r\nclassifier = SVC(kernel = 'linear', random_state = 0)\r\nclassifier.fit(X_train, y_train)\r\npredicted = classifier.predict(X_test)\r\nprint(\"SVC Accuracy:\",metrics.accuracy_score(y_test, predicted))\r\n\r\n\r\n # To do Grid Search \r\nparameters = [{'C': [1, 10, 100], 'kernel': ['linear']},\r\n {'C': [1, 10, 100], 'kernel': ['rbf']}]\r\n\r\n # Applying Grid Search to find the best model and the best parameters\r\nfrom sklearn.model_selection import GridSearchCV\r\ngrid_search = GridSearchCV(estimator = classifier, param_grid = parameters, scoring ='accuracy', cv = 10, n_jobs=-1 )\r\ngrid_search = grid_search.fit(X_train, y_train)\r\n\r\n # Finding the best SVC\r\nprint(grid_search.best_estimator_)\r\nprint(grid_search.best_params_)\r\nprint(grid_search.best_score_)\r\n\r\n# Implementing the Evalulation Method\r\nfrom sklearn.metrics import f1_score\r\nf1 = f1_score(y_test, predicted)\r\nprint(\"The f1 score is : \", f1)\r\n\r\n\r\n# finding the best prediction\r\npredicted = classifier.predict(testing)\r\nprint(predicted)\r\nprint(type(predicted))\r\n\r\npredicted = pd.DataFrame({'target' : predicted})\r\n# Generate the values\r\nprint(predicted.target.value_counts())\r\n\r\nX = [\"True\",\"False\"]\r\ny = [predicted.target.value_counts()[0], predicted.target.value_counts()[1]]\r\n\r\n# Analysis information\r\nchart = sns.barplot(x = X, y = y)\r\nchart.set_xticklabels(labels = X, rotation=45)\r\nchart.set_title(\"The Testing Data : Total Number of True & False \")\r\nchart.set(ylabel = \"The total number of true and false\")\r\nchart.set(xlabel = \"True vs False in Testing Data\")\r\n\r\n# Deploy into the sample submission\r\nprint(predicted)\r\nsample = pd.read_csv(\"sample_submission.csv\")\r\n\r\nsample.target = predicted\r\nprint(sample)", "sub_path": "NLP_Started_Final.py", "file_name": "NLP_Started_Final.py", "file_ext": "py", "file_size_in_byte": 5316, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "seaborn.barplot", "line_number": 39, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 56, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 58, "usage_type": "name"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 58, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 59, "usage_type": "name"}, {"api_name": "nltk.stem.porter.PorterStemmer", "line_number": 63, "usage_type": "call"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 68, "usage_type": "call"}, {"api_name": "nltk.probability.FreqDist", "line_number": 74, "usage_type": "call"}, {"api_name": "seaborn.barplot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "wordcloud.WordCloud", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "nltk.tokenize.RegexpTokenizer", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 123, "usage_type": "name"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 132, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 142, "usage_type": "call"}, {"api_name": "seaborn.barplot", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "405310564", "text": "import os\nimport sys\nimport uuid\nimport base64\nimport shutil\nimport hashlib\nimport zipfile\nimport tempfile\n\nimport requests\nfrom Algorithmia.errors import AlgorithmException\n\n# Add this directory to the PYTHONPATH to make demucs lib importable\nthis_dir = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(this_dir)\n\ntry:\n from . import algorithmia_utils\n from . import wrapper\nexcept Exception:\n import algorithmia_utils\n import wrapper\n\n\nclass DemucsAPI(algorithmia_utils.BaseAPI):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n this_dir = os.path.dirname(os.path.realpath(__file__))\n self.output_dir = os.path.join(this_dir, \"separated\")\n\n def load_model(self):\n if algorithmia_utils.in_algorithmia:\n model_fpath = algorithmia_utils.get_file(\n \"data://danielfrg/demucs/demucs_extra.th\"\n )\n else:\n model_fpath = \"models/demucs_extra.th\"\n\n if algorithmia_utils.in_algorithmia:\n self.download_ffmpeg()\n\n return wrapper.Demucs(model_fpath)\n\n def download_ffmpeg(self):\n print(\"Downloading ffmpeg and ffprobe\")\n\n import subprocess\n\n output_dir = (\n \"/home/algo/.local/bin\"\n if algorithmia_utils.in_algorithmia\n else \"/Users/danielfrg/Downloads\"\n )\n ffmpeg_url = \"https://github.com/vot/ffbinaries-prebuilt/releases/download/v4.2.1/ffmpeg-4.2.1-linux-64.zip\"\n ffprobe_url = \"https://github.com/vot/ffbinaries-prebuilt/releases/download/v4.2.1/ffprobe-4.2.1-linux-64.zip\"\n\n download_and_unzip(ffmpeg_url, output_dir=output_dir)\n download_and_unzip(ffprobe_url, output_dir=output_dir)\n\n subprocess.check_output([\"chmod\", \"+x\", os.path.join(output_dir, \"ffmpeg\")])\n subprocess.check_output([\"chmod\", \"+x\", os.path.join(output_dir, \"ffprobe\")])\n\n def debug_info(self):\n import subprocess\n import torch as th\n\n bin_dir = (\n \"/home/algo/.local/bin\"\n if algorithmia_utils.in_algorithmia\n else \"/Users/danielfrg/Downloads\"\n )\n\n return {\n \"pytorch_device\": \"cuda\" if th.cuda.is_available() else \"cpu\",\n \"which_ffmpeg\": shutil.which(\"ffmpeg\"),\n \"which_ffprobe\": shutil.which(\"ffprobe\"),\n \"ls_bin\": subprocess.check_output([\"ls\", \"-la\", bin_dir]).decode(\"utf-8\"),\n }\n\n def cached(self, fpath):\n \"\"\"\n Checks if this file has already been processed\n and if it has returns the same format as self.predict()\n\n The output changes if this is run on algorithmia or not\n\n Returns\n -------\n tuple of (unique_id, generated_files)\n \"\"\"\n unique_id = hash_file(fpath)\n sources = [\"bass\", \"drums\", \"other\", \"vocals\"]\n source_exists = []\n output = {}\n\n for source in sources:\n output[source] = f\"{source}.mp3\"\n\n if algorithmia_utils.in_algorithmia:\n username = \"danielfrg\"\n collection = \"demucs_output\"\n fname = f\"{unique_id}-{source}.mp3\"\n file_exists = algorithmia_utils.exists(\n username=username, collection=collection, fname=fname,\n )\n source_exists.append(file_exists)\n\n # Change output if in algorithmia\n output[source] = fname\n else:\n source_exists.append(os.path.exists(fpath))\n fpath = os.path.join(self.output_dir, f\"{unique_id}/{source}.mp3\")\n output[source] = fpath\n\n if not all(source_exists):\n output = None\n\n return unique_id, output\n\n def predict(self, predict):\n if \"fpath\" in predict:\n fpath = predict[\"fpath\"]\n elif \"base64\" in predict:\n tempfile_ = base64_to_file(predict[\"base64\"])\n fpath = tempfile_.name\n else:\n raise AlgorithmException(\"Invalid input json format\")\n\n unique_id, generated_files = self.cached(fpath)\n output_dir = os.path.join(self.output_dir, unique_id)\n\n if not generated_files:\n os.makedirs(output_dir, exist_ok=True)\n generated_files = self.model.separate(fpath, output_dir=output_dir)\n\n if algorithmia_utils.in_algorithmia:\n for source_name, file in generated_files.items():\n fname = os.path.basename(file)\n key = f\"{unique_id}-{fname}\"\n algorithmia_utils.upload_file(\n file,\n username=\"danielfrg\",\n collection=\"demucs_output\",\n fname=key,\n )\n\n # Change output if in algorithmia\n generated_files[source_name] = key\n\n generated_files[\"id\"] = unique_id\n return generated_files\n\n\ndef hash_file(target):\n hasher = hashlib.sha256()\n\n with open(target, \"rb\") as f:\n while True:\n data = f.read(65536)\n if not data:\n break\n hasher.update(data)\n\n signature = hasher.hexdigest()\n return signature\n\n\ndef base64_to_file(base64str):\n \"\"\"\n Takes a base64 enconded file and saves it to a temp file\n Returns the NamedTemporaryFile object\n \"\"\"\n decoded = base64.decodebytes(bytearray(base64str, \"utf8\"))\n fp = tempfile.NamedTemporaryFile()\n fp.write(decoded)\n fp.flush()\n return fp\n\n\ndef download_and_unzip(url, output_dir=None):\n local_filename = url.split(\"/\")[-1]\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n\n fpath = local_filename\n if output_dir:\n fpath = os.path.join(output_dir, local_filename)\n\n with open(fpath, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=8192):\n # If you have chunk encoded response uncomment if\n # and set chunk_size parameter to None.\n # if chunk:\n f.write(chunk)\n\n with zipfile.ZipFile(fpath, \"r\") as zip_ref:\n zip_ref.extractall(output_dir)\n\n return fpath\n", "sub_path": "src/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 6225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "algorithmia_utils.BaseAPI", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "algorithmia_utils.in_algorithmia", "line_number": 32, "usage_type": "attribute"}, {"api_name": "algorithmia_utils.get_file", "line_number": 33, "usage_type": "call"}, {"api_name": "algorithmia_utils.in_algorithmia", "line_number": 39, "usage_type": "attribute"}, {"api_name": "wrapper.Demucs", "line_number": 42, "usage_type": "call"}, {"api_name": "algorithmia_utils.in_algorithmia", "line_number": 51, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "algorithmia_utils.in_algorithmia", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 74, "usage_type": "attribute"}, {"api_name": "shutil.which", "line_number": 75, "usage_type": "call"}, {"api_name": "shutil.which", "line_number": 76, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 77, "usage_type": "call"}, {"api_name": "algorithmia_utils.in_algorithmia", "line_number": 99, "usage_type": "attribute"}, {"api_name": "algorithmia_utils.exists", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "Algorithmia.errors.AlgorithmException", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 133, "usage_type": "call"}, {"api_name": "algorithmia_utils.in_algorithmia", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "algorithmia_utils.upload_file", "line_number": 140, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 155, "usage_type": "call"}, {"api_name": "base64.decodebytes", "line_number": 173, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 174, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "50971040", "text": "import json\nfrom datetime import datetime, timedelta\n\nfrom django.core.urlresolvers import reverse\nfrom django.core.cache import get_cache\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import RedirectView, TemplateView\nfrom django.views.generic.base import ContextMixin\nfrom django.db import models\nfrom django.conf import settings\n\nfrom pokeradio.models import Message\nfrom pokeradio.history.models import ArchiveTrack\nfrom pokeradio.scoring.models import Point\nfrom pokeradio.utils import current_playlist\nfrom django.utils.safestring import mark_safe\n\n\ncache = get_cache('object_cache')\n\n\nclass HomeView(TemplateView, ContextMixin):\n template_name = 'home/index.html'\n\n def get_leaderboard(self):\n cached = cache.get('hp_leaderboard')\n if cached:\n return cached\n\n today = datetime.today().date()\n td = timedelta(days=1)\n period = [today, today + td]\n\n \"\"\" \n Return id with a point instead of \n User.objects.all()\n\n \"\"\"\n ids = Point.objects.values('user_id').filter(created__range=period)\n qs = User.objects.filter(pk__in=ids)\n\n object_list = []\n\n for i in qs:\n points = i.point_set.filter(created__range=period)\n \"\"\" if no points move prevent wasted cycle\"\"\"\n if not points.exists():\n continue\n likes = i.point_set\\\n .filter(action=Point.TRACK_LIKED, created__range=period)\\\n .aggregate(models.Sum('value'))['value__sum']\n dislikes = i.point_set\\\n .filter(action=Point.TRACK_DISLIKED, created__range=period)\\\n .aggregate(models.Sum('value'))['value__sum']\n\n likes = likes if likes else 0\n dislikes = dislikes if dislikes else 0\n\n net = likes + dislikes\n\n if likes < 1 or net < 1:\n continue\n\n object_list.append({'user': i,'net': likes + dislikes})\n\n if not len(object_list):\n return None\n\n # Get the highest value\n max_value = float(max(object_list, key=lambda i: i['net'])['net'])\n\n # Find percentage of max value for the week\n for k, v in enumerate(object_list):\n # max is 90 % to give room for arrows\n object_list[k]['net_percent'] = v['net'] / max_value * 90\n\n items = sorted(object_list, key=lambda i: i['net'])\n items.reverse()\n\n cache.set('hp_leaderboard', items[:5], settings.CACHE_LEADERBOARD_HP)\n\n return items[:5]\n\n\n def get_messages(self):\n # General messages for everyone that this user hasn't seen\n messages = Message.objects.exclude(seenby=self.request.user)\\\n .filter(target_to_individuals=False)\n for m in messages:\n m.seenby.add(self.request.user)\n\n data = [m.to_dict() for m in messages]\n\n # Specific messages for this user\n messages = Message.objects.filter(target_to_individuals=True,\n to_be_seen_by=self.request.user)\n\n for m in messages:\n m.to_be_seen_by.remove(self.request.user)\n\n data += [m.to_dict() for m in messages]\n\n return json.dumps(data)\n\n def get_context_data(self, **kwargs):\n c = super(HomeView, self).get_context_data(**kwargs)\n\n blacklist = ArchiveTrack.blacklist.all().values_list('spotify_href',\n flat=True)\n initial_playlist = current_playlist()\n if len(blacklist) == 1:\n blacklist = [blacklist[0], ]\n\n c['initial_playlist'] = mark_safe(json.dumps(initial_playlist))\n c['blacklist'] = json.dumps(map(str, blacklist))\n c['leaderboard'] = self.get_leaderboard()\n c['alerts'] = self.get_messages()\n return c\n\n\nclass WeekArchiveRedirect(RedirectView):\n\n \"\"\" Redirect to a WeekArchiveView for the current week\n \"\"\"\n permanent = False\n pattern = 'scoring:statement_week'\n who = 'me'\n\n def get_redirect_url(self, **kwargs):\n now = datetime.now()\n url_params = {'year': now.year, 'week': now.strftime('%U')}\n if 'who' in kwargs:\n url_params['who'] = kwargs.get('who')\n return reverse(self.pattern, kwargs=url_params)\n\nhome = login_required(HomeView.as_view())\n", "sub_path": "web/pokeradio/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.core.cache.get_cache", "line_number": 20, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 23, "usage_type": "name"}, {"api_name": "django.views.generic.base.ContextMixin", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 32, "usage_type": "call"}, {"api_name": "pokeradio.scoring.models.Point.objects.values", "line_number": 40, "usage_type": "call"}, {"api_name": "pokeradio.scoring.models.Point.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pokeradio.scoring.models.Point", "line_number": 40, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 41, "usage_type": "name"}, {"api_name": "pokeradio.scoring.models.Point.TRACK_LIKED", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pokeradio.scoring.models.Point", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "pokeradio.scoring.models.Point.TRACK_DISLIKED", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pokeradio.scoring.models.Point", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.conf.settings.CACHE_LEADERBOARD_HP", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 81, "usage_type": "name"}, {"api_name": "pokeradio.models.Message.objects.exclude", "line_number": 88, "usage_type": "call"}, {"api_name": "pokeradio.models.Message.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pokeradio.models.Message", "line_number": 88, "usage_type": "name"}, {"api_name": "pokeradio.models.Message.objects.filter", "line_number": 96, "usage_type": "call"}, {"api_name": "pokeradio.models.Message.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pokeradio.models.Message", "line_number": 96, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 104, "usage_type": "call"}, {"api_name": "pokeradio.history.models.ArchiveTrack.blacklist.all", "line_number": 109, "usage_type": "call"}, {"api_name": "pokeradio.history.models.ArchiveTrack.blacklist", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pokeradio.history.models.ArchiveTrack", "line_number": 109, "usage_type": "name"}, {"api_name": "pokeradio.utils.current_playlist", "line_number": 111, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 115, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 115, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 116, "usage_type": "call"}, {"api_name": "django.views.generic.RedirectView", "line_number": 122, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 131, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 131, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 135, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "319989885", "text": "import sys, random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef main():\n if len(sys.argv) < 2:\n print(\"Use: python3 {} [outputFile2]\".format(sys.argv[0]))\n s = \"When the second output file is included, the ratio between the\"\n s += \" two solutions is shown.\"\n print(s)\n return 1\n \n # Get input file\n inputFile = sys.argv[1]\n if len(sys.argv) > 2:\n inputFile2 = sys.argv[2]\n isSynchronous = input(\"Synchronous division? (y/n): \")\n isSynchronous = True if isSynchronous == \"y\" else False\n else:\n inputFile2 = None\n isSynchronous = None\n \n # Read the runs\n with open(inputFile, \"r\") as fread:\n tArray = [float(x) for x in fread.readline().split()]\n \n if inputFile2 is not None:\n fread2 = open(inputFile2, \"r\")\n if isSynchronous:\n fread2.readline() # Remove the tArray from this one\n \n # Read each run\n results = []\n for line in fread:\n if inputFile2 is not None:\n line2 = fread2.readline().split()\n results.append([float(x)/(1e-200 + float(y)) for x, y in\n zip(line.split(), line2)])\n else:\n results.append([float(x) for x in line.split()])\n \n if inputFile2 is not None:\n fread2.close()\n # If not synchronous, remove first result (divided by tArray!)\n if not isSynchronous:\n results.pop(1)\n \n # Take one random example for the plot\n oneExample = results[np.random.choice(len(results))]\n \n # Transpose the results for calculations\n results = np.transpose(results)\n \n # Calculate statistics\n median = [np.median(x) for x in results]\n sig1p = [np.percentile(x, 50 + 34) for x in results]\n sig1n = [np.percentile(x, 50 - 34) for x in results]\n sig2p = [np.percentile(x, 50 + 47.5) for x in results]\n sig2n = [np.percentile(x, 50 - 47.5) for x in results]\n maxNum = [max(x) for x in results]\n minNum = [min(x) for x in results]\n \n # Choose RGB colors for:\n # 1 sigma\n innerColor = (0.1, 0.1, 0.1)\n \n # 2 Sigma\n outerColor = (0.4, 0.4, 0.4)\n \n # Extremes\n gray = (0.8, 0.8, 0.8)\n \n # Get minimum time for plotting\n lowTime = 2000\n for ii in range(len(tArray)):\n if tArray[ii] >= lowTime:\n break\n \n # Plot outside in\n plt.fill_between(tArray[ii:], minNum[ii:], maxNum[ii:], color = gray,\n label = \"Full\")\n plt.fill_between(tArray[ii:], sig2n[ii:], sig2p[ii:], color = outerColor,\n label = \"95%\")\n plt.fill_between(tArray[ii:], sig1n[ii:], sig1p[ii:], color = innerColor,\n label = \"68%\")\n plt.plot(tArray[ii:], median[ii:], \"y-\", label = \"Median\")\n plt.plot(tArray[ii:], oneExample[ii:], \"b-\", label = \"One run\")\n \n # Plot log only if there is more than one order of magnitude\n if min(sig2n[ii:]) < 0.1*max(sig2p[ii:]):\n plt.yscale(\"log\")\n \n plt.xlabel(\"time [Myr]\")\n plt.ylabel(\"Mass (arbitrary)\")\n plt.legend()\n plt.show()\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "output/visualizeOutput.py", "file_name": "visualizeOutput.py", "file_ext": "py", "file_size_in_byte": 3240, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.transpose", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "146452487", "text": "import torch\r\nfrom EfficientNet.EfficientNet import EfficientNet,MBConvBlock\r\nfrom torch import nn\r\nfrom torchvision import transforms\r\nfrom EfficientNet.utils import (\r\n round_filters,\r\n round_repeats,\r\n drop_connect,\r\n get_same_padding_conv2d,\r\n get_model_params,\r\n efficientnet_params,\r\n load_pretrained_weights,\r\n Swish,\r\n MemoryEfficientSwish,\r\n calculate_output_image_size\r\n)\r\n\r\nclass LYCNet(nn.Module):\r\n def __init__(self,block_args=None,global_params = None):\r\n super().__init__()\r\n self._block_args = block_args\r\n self._global_params = global_params\r\n\r\n #BN的两个参数\r\n self.bn_mom = 1 - self._global_params.batch_norm_momentum\r\n self.bn_eps = self._global_params.batch_norm_epsilon\r\n\r\n image_size = global_params.image_size\r\n Conv2d = get_same_padding_conv2d(image_size=image_size)\r\n self.scales = [[16,112],[24,56],[40,28],[112,14],[1280,7]]\r\n\r\n in_channels = 3\r\n out_channels = round_filters(32,self._global_params)\r\n self._conv_stem = Conv2d(in_channels,out_channels,kernel_size=3,stride=2, bias=False)\r\n self._bn0 = nn.BatchNorm2d(num_features=out_channels,eps=self.bn_eps,momentum=self.bn_mom)\r\n #计算输出图像大小 使用Conv2dSamePadding with a stride 2\r\n image_size = calculate_output_image_size(image_size,2)\r\n\r\n #Build blocks\r\n self._blocks = nn.ModuleList([])\r\n #第一个for循环是多少个BLOCK,第二个num_repeat是指一个block中是否重复添加MBConv\r\n for block_args in self._block_args:\r\n block_args = block_args._replace(\r\n input_filters=round_filters(block_args.input_filters, self._global_params),\r\n output_filters=round_filters(block_args.output_filters, self._global_params),\r\n num_repeat=round_repeats(block_args.num_repeat, self._global_params)\r\n )\r\n\r\n self._blocks.append(MBConvBlock(block_args,self._global_params,image_size=image_size))\r\n image_size = calculate_output_image_size(image_size,block_args.stride)\r\n if block_args.num_repeat > 1:\r\n block_args = block_args._replace(input_filters = block_args.output_filters, stride =1)\r\n for _ in range(block_args.num_repeat -1):\r\n self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))\r\n\r\n # head\r\n in_channels = block_args.output_filters\r\n out_channels = round_filters(1280, self._global_params)\r\n Conv2d = get_same_padding_conv2d(image_size=image_size)\r\n self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)\r\n self._bn1 = nn.BatchNorm2d(num_features=out_channels,momentum=self.bn_mom,eps=self.bn_eps)\r\n\r\n # Final linear layer\r\n self._avg_pooling = nn.AdaptiveAvgPool2d(1)\r\n self._dropout = nn.Dropout(self._global_params.dropout_rate)\r\n self._fc = nn.Linear(out_channels, self._global_params.num_classes)\r\n self._swish = MemoryEfficientSwish()\r\n naive_downsample_layers = self.add_adaptive_layers(self.scales)\r\n self.downsample_layers1 = naive_downsample_layers['reduction_1']\r\n self.downsample_layers2 = naive_downsample_layers['reduction_2']\r\n self.downsample_layers3 = naive_downsample_layers['reduction_3']\r\n self.downsample_layers4 = naive_downsample_layers['reduction_4']\r\n\r\n\r\n\r\n def set_swish(self, memory_efficient=True):\r\n \"\"\"Sets swish function as memory efficient (for training) or standard (for export).\r\n Args:\r\n memory_efficient (bool): Whether to use memory-efficient version of swish.\r\n \"\"\"\r\n self._swish = MemoryEfficientSwish() if memory_efficient else Swish()\r\n for block in self._blocks:\r\n block.set_swish(memory_efficient)\r\n\r\n #这里是找到每次block后的那个特征,比如endponits['reduction_1']就是第一个block后提取到的特征\r\n def extract_endpoints(self, inputs):\r\n \"\"\"Use convolution layer to extract features\r\n from reduction levels i in [1, 2, 3, 4, 5].\r\n Args:\r\n inputs (tensor): Input tensor.\r\n Returns:\r\n Dictionary of last intermediate features\r\n with reduction levels i in [1, 2, 3, 4, 5].\r\n Example:\r\n >>> import torch\r\n >>> from efficientnet.model import EfficientNet\r\n >>> inputs = torch.rand(1, 3, 224, 224)\r\n >>> model = EfficientNet.from_pretrained('efficientnet-b0')\r\n >>> endpoints = model.extract_endpoints(inputs)\r\n >>> print(endpoints['reduction_1'].shape) # torch.Size([1, 16, 112, 112])\r\n >>> print(endpoints['reduction_2'].shape) # torch.Size([1, 24, 56, 56])\r\n >>> print(endpoints['reduction_3'].shape) # torch.Size([1, 40, 28, 28])\r\n >>> print(endpoints['reduction_4'].shape) # torch.Size([1, 112, 14, 14])\r\n >>> print(endpoints['reduction_5'].shape) # torch.Size([1, 1280, 7, 7])\r\n \"\"\"\r\n endpoints = dict()\r\n\r\n # Stem\r\n x = self._swish(self._bn0(self._conv_stem(inputs)))\r\n prev_x = x\r\n\r\n # Blocks\r\n for idx, block in enumerate(self._blocks):\r\n drop_connect_rate = self._global_params.drop_connect_rate\r\n if drop_connect_rate:\r\n drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate\r\n x = block(x, drop_connect_rate=drop_connect_rate)\r\n if prev_x.size(2) > x.size(2):\r\n endpoints['reduction_{}'.format(len(endpoints) + 1)] = prev_x\r\n prev_x = x\r\n\r\n # Head\r\n x = self._swish(self._bn1(self._conv_head(x)))\r\n endpoints['reduction_{}'.format(len(endpoints) + 1)] = x\r\n\r\n return endpoints\r\n def extract_features(self, inputs):\r\n \"\"\"use convolution layer to extract feature .\r\n Args:\r\n inputs (tensor): Input tensor.\r\n Returns:\r\n Output of the final convolution\r\n layer in the efficientnet model.\r\n \"\"\"\r\n # Stem\r\n x = self._swish(self._bn0(self._conv_stem(inputs)))\r\n\r\n # Blocks\r\n for idx, block in enumerate(self._blocks):\r\n drop_connect_rate = self._global_params.drop_connect_rate\r\n if drop_connect_rate:\r\n drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate\r\n x = block(x, drop_connect_rate=drop_connect_rate)\r\n\r\n # Head\r\n x = self._swish(self._bn1(self._conv_head(x)))\r\n\r\n return x\r\n\r\n def add_adaptive_layers(self,scales):\r\n \"\"\"\r\n 主要用于增加第一层到第四层的适应层,该卷积核会将不同大小的feature map转换到\r\n 同样大小[1280,7,7]的大小\r\n :param scales: [[16,112],[24,56],[40,28],[112,14],[1280,7]]\r\n :return: {'reduction_1':nn.ModuleList(),'reduction_2':nn.ModuleList()}\r\n \"\"\"\r\n new_scales = list()\r\n for i in range(len(scales)-1):\r\n y = [x for x in scales[i:]]\r\n new_scales.append(y)\r\n blocks = {}\r\n blocks['reduction_1'] = NaiveDownSampleBlock(new_scales[0],global_params=self._global_params)\r\n blocks['reduction_2'] = NaiveDownSampleBlock(new_scales[1],global_params=self._global_params)\r\n blocks['reduction_3'] = NaiveDownSampleBlock(new_scales[2], global_params=self._global_params)\r\n blocks['reduction_4'] = NaiveDownSampleBlock(new_scales[3], global_params=self._global_params)\r\n return blocks\r\n\r\n \"\"\"\r\n >>> print(endpoints['reduction_1'].shape) # torch.Size([1, 16, 112, 112])\r\n >>> print(endpoints['reduction_2'].shape) # torch.Size([1, 24, 56, 56])\r\n >>> print(endpoints['reduction_3'].shape) # torch.Size([1, 40, 28, 28])\r\n >>> print(endpoints['reduction_4'].shape) # torch.Size([1, 112, 14, 14])\r\n >>> print(endpoints['reduction_5'].shape) # torch.Size([1, 1280, 7, 7])\r\n \"\"\"\r\n def forward(self,inputs):\r\n endpoints = self.extract_endpoints(inputs)\r\n feature1,output1 = self.downsample_layers1(endpoints['reduction_1'])\r\n feature2,output2 = self.downsample_layers2(endpoints['reduction_2'])\r\n feature3,output3 = self.downsample_layers3(endpoints['reduction_3'])\r\n feature4,output4 = self.downsample_layers4(endpoints['reduction_4'])\r\n feature5 = endpoints['reduction_5']\r\n x = self._avg_pooling(feature5)\r\n x = x.flatten(start_dim=1)\r\n x = self._dropout(x)\r\n output5 = self._fc(x)\r\n features = [feature1,feature2,feature3,feature4,feature5]\r\n outputs = [output1,output2,output3,output4,output5]\r\n return features,outputs\r\n\r\n\r\n\r\nclass NaiveDownSampleBlock(nn.Module):\r\n \"\"\"\r\n 要用于增加第一层到第四层的适应层,该卷积核会将不同大小的feature map转换到\r\n 同样大小[1280,7,7]的大小\r\n :param scales: [[16,112],[24,56],[40,28],[112,14],[1280,7]]\r\n :return: {'reduction_1':nn.ModuleList(),'reduction_2':nn.ModuleList()}\r\n \"\"\"\r\n def __init__(self,scales,global_params):\r\n super().__init__()\r\n self._global_params = global_params\r\n bn_mom = 1 - self._global_params.batch_norm_momentum\r\n bn_eps = self._global_params.batch_norm_epsilon\r\n self.block = nn.ModuleList()\r\n for i,scale in enumerate(scales):\r\n if i == len(scales)-1:\r\n break\r\n Conv2d = get_same_padding_conv2d(scale[1])\r\n depthwise_conv = Conv2d(in_channels=scale[0], out_channels=scale[0], groups=scale[0],\r\n kernel_size=3, stride=2, bias=False)\r\n bn1 = nn.BatchNorm2d(num_features=scale[0], momentum=bn_mom,\r\n eps=bn_eps)\r\n swish = MemoryEfficientSwish()\r\n project_conv = Conv2d(in_channels=scale[0], out_channels=scales[i+1][0], kernel_size=1, bias=False)\r\n bn2 = nn.BatchNorm2d(num_features=scales[i+1][0], momentum=bn_mom,\r\n eps=bn_eps)\r\n self.block.append(depthwise_conv)\r\n self.block.append(bn1)\r\n self.block.append(swish)\r\n self.block.append(project_conv)\r\n self.block.append(bn2)\r\n self.bn3 = nn.BatchNorm2d(num_features=scales[-1][0],momentum=bn_mom,eps=bn_eps)\r\n self.swish = MemoryEfficientSwish()\r\n self.avg_pooling = nn.AdaptiveAvgPool2d(1)\r\n self.dropout = nn.Dropout(self._global_params.dropout_rate)\r\n self.fc = nn.Linear(1280,self._global_params.num_classes)\r\n\r\n def forward(self,inputs):\r\n x = inputs\r\n for idx,layer in enumerate(self.block):\r\n x = layer(x)\r\n feature_map = self.swish(self.bn3(x))\r\n x = self.avg_pooling(feature_map)\r\n x = x.flatten(start_dim = 1)\r\n x = self.dropout(x)\r\n x = self.fc(x)\r\n return feature_map,x\r\n", "sub_path": "LYCNet.py", "file_name": "LYCNet.py", "file_ext": "py", "file_size_in_byte": 11075, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torch.nn.Module", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "EfficientNet.utils.get_same_padding_conv2d", "line_number": 29, "usage_type": "call"}, {"api_name": "EfficientNet.utils.round_filters", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "EfficientNet.utils.calculate_output_image_size", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "EfficientNet.utils.round_filters", "line_number": 44, "usage_type": "call"}, {"api_name": "EfficientNet.utils.round_filters", "line_number": 45, "usage_type": "call"}, {"api_name": "EfficientNet.utils.round_repeats", "line_number": 46, "usage_type": "call"}, {"api_name": "EfficientNet.EfficientNet.MBConvBlock", "line_number": 49, "usage_type": "call"}, {"api_name": "EfficientNet.utils.calculate_output_image_size", "line_number": 50, "usage_type": "call"}, {"api_name": "EfficientNet.EfficientNet.MBConvBlock", "line_number": 54, "usage_type": "call"}, {"api_name": "EfficientNet.utils.round_filters", "line_number": 58, "usage_type": "call"}, {"api_name": "EfficientNet.utils.get_same_padding_conv2d", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "EfficientNet.utils.MemoryEfficientSwish", "line_number": 67, "usage_type": "call"}, {"api_name": "EfficientNet.utils.MemoryEfficientSwish", "line_number": 81, "usage_type": "call"}, {"api_name": "EfficientNet.utils.Swish", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 192, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 192, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "name"}, {"api_name": "EfficientNet.utils.get_same_padding_conv2d", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 211, "usage_type": "name"}, {"api_name": "EfficientNet.utils.MemoryEfficientSwish", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 215, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 222, "usage_type": "name"}, {"api_name": "EfficientNet.utils.MemoryEfficientSwish", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 224, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 225, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 226, "usage_type": "name"}]} +{"seq_id": "185759725", "text": "import os\nimport sys\nsys.path.append('../')\nimport random\nimport torch\nimport argparse\nimport pickle\nimport numpy as np\nfrom torch import optim\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch.utils.data as tdata\nimport data_utils as du\nfrom torchtext.data import Iterator as BatchIter\nfrom s2sa import S2SWithA\nfrom utils import variable\nfrom masked_cross_entropy import masked_cross_entropy\n\ndef get_data_loader(inp_vocab, out_vocab):\n \"\"\"\n train and test loaders are each dict with key as the task_num. Total key length as num_task.\n \"\"\" \n train_dataset = du.S2SSentenceDataset(args.train_data, inp_vocab, out_vocab) \n train_loader = BatchIter(train_dataset, args.batch_size, sort_key=lambda x:len(x.text), train=True, sort_within_batch=True, device=-1)\n \n test_loader = None\n \n return train_loader, test_loader\n\n\ndef train(args):\n\n # Vocabulary will be created once for all the tasks. Must include tokens from all the tasks.\n print(\"Loading vocabulary.\")\n inp_vocab = du.sentiment_label_vocab()\n out_vocab = du.load_vocab(args.out_vocab)\n \n print(\"Preparing the data loader.\")\n train_loader, test_loader = get_data_loader(inp_vocab, out_vocab)\n \n if args.load_model:\n print(\"Loading the model\")\n model = torch.load(args.load_model)\n else:\n print(\"Creating the model\")\n model = S2SWithA(args.emb_size, args.enc_hid_size, args.dec_hid_size, inp_vocab, out_vocab, layers=args.nlayers, use_cuda=args.cuda, bidir=args.bidir, dropout=args.dropout)\n\n # do cuda transfer before constructing the optimizer\n if torch.cuda.is_available() and args.cuda:\n print(\"Transferring the model to CUDA.\")\n model.cuda()\n\n # set to train mode\n model.train()\n\n if args.load_opt:\n print(\"Loading the optimizer\")\n optimizer = torch.load(args.load_opt)\n else:\n print(\"Creating the optimizer\")\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n\n\n c_epoch = args.start_epoch\n train_loss = 0.0\n iters_per_epoch = int(np.ceil(len(train_loader.dataset) / float(args.batch_size)))\n for batch_iter, batch in enumerate(train_loader): # continues forever (shuffling every epoch) till args.epochs finished\n \n input, input_lens = batch.text\n target, target_lens = batch.target\n input, input_lens, target, target_lens = variable(input), variable(input_lens), variable(target), variable(target_lens)\n \n #print(\"input {} and input_lens {}, input {}\".format(input, input_lens, du.transform(input.tolist()[0], inp_vocab.itos))) \n #print(\"target {} and target_lens {}, target {}\".format(target, target_lens, du.transform(target.tolist()[0], out_vocab.itos)))\n\n optimizer.zero_grad()\n\n logits = model(input, input_lens, target)\n \n loss = masked_cross_entropy(logits, target, target_lens)\n\n train_loss += loss.item()\n\n loss.backward()\n # gradient clipping\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)\n optimizer.step()\n\n if (batch_iter + 1) % args.log_after == 0:\n avg_train_loss = train_loss / (batch_iter + 1) \n print(\"Iteration {} train_loss {:.4f} **\".format(batch_iter, avg_train_loss))\n\n # end of epoch / after intervals\n if (batch_iter+1) % iters_per_epoch == 0 or (batch_iter + 1) % args.validate_after == 0:\n\n avg_train_loss = train_loss / (batch_iter + 1) \n\n print(\"**Epoch {} iteration {} train_loss {:.4f} **\".format(c_epoch, batch_iter, avg_train_loss)) \n \n if (batch_iter+1) % iters_per_epoch == 0:\n print(\"Saving checkpoint.\\n\") \n torch.save(model, \"models/{}_e{}_itr{}_l{:.4f}\".format(args.expt_name, c_epoch, batch_iter, avg_train_loss))\n torch.save(optimizer, \"models/{}_{}_e{}_itr{}_l{:.4f}\".format(\"optimizer\", args.expt_name, c_epoch, batch_iter, avg_train_loss))\n\n if (batch_iter+1) % iters_per_epoch == 0:\n c_epoch += 1\n\n if c_epoch >= args.epochs:\n print(\"Max epoch {}/{} reached. Break\\n\".format(c_epoch, args.epochs))\n break\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='S2SA') \n parser.add_argument('--train_data', type=str, help=\"Pass train files for all the tasks.\")\n #parser.add_argument('--test_data', type=str, help=\"Pass test files for all the tasks\")\n parser.add_argument('--out_vocab', type=str, default=\"\", help='the output vocabulary pickle file')\n parser.add_argument('--emb_size', type=int, default=300, help='size of word embeddings')\n parser.add_argument('--enc_hid_size', type=int, default=512, help='size of encoder hidden')\n parser.add_argument('--dec_hid_size', type=int, default=512, help='size of encoder hidden')\n parser.add_argument('--nlayers', type=int, default=2, help='number of layers')\n parser.add_argument('--optimizer', type=str, default=\"adam\", help='Optimizer to be used.')\n parser.add_argument('--lr', type=float, default=0.0005, help='initial learning rate')\n parser.add_argument('--dropout', type=float, default=0.1, help='dropout')\n parser.add_argument('--bidir', type=bool, default=False, help='Use bidirectional encoder') \n parser.add_argument('--batch_size', type=int, default=32, metavar='N', help='batch size')\n parser.add_argument('--epochs', type=int, default=200)\n parser.add_argument('--validate_after', type=int, default=2000)\n parser.add_argument('--log_after', type=int, default=100)\n parser.add_argument('--clip', type=float, default=5.0, help='gradient clipping')\n parser.add_argument('--seed', type=int, default=11, help='random seed') \n parser.add_argument('--cuda', action='store_true', help='use CUDA')\n #parser.add_argument('--src_seq_len', type=int, default=50, help=\"Maximum source sequence length\")\n #parser.add_argument('--max_decode_len', type=int, default=50, help='Maximum prediction length.')\n parser.add_argument('--expt_name', type=str, default=\"new_expt\", help='Parent folder under which all files will be created fo rthe expt..')\n parser.add_argument('--load_model', type=str)\n parser.add_argument('--load_opt', type=str) \n parser.add_argument('--start_epoch', type=int, default=0)\n args = parser.parse_args()\n print(\"\\nAll args: {}\\n\".format(args))\n\n # Set all the seeds\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n if torch.cuda.is_available() and args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n\n train(args)\n\n", "sub_path": "S2S/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 6686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "data_utils.S2SSentenceDataset", "line_number": 23, "usage_type": "call"}, {"api_name": "torchtext.data.Iterator", "line_number": 24, "usage_type": "call"}, {"api_name": "data_utils.sentiment_label_vocab", "line_number": 35, "usage_type": "call"}, {"api_name": "data_utils.load_vocab", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 43, "usage_type": "call"}, {"api_name": "s2sa.S2SWithA", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.ceil", "line_number": 66, "usage_type": "call"}, {"api_name": "utils.variable", "line_number": 71, "usage_type": "call"}, {"api_name": "masked_cross_entropy.masked_cross_entropy", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 103, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 145, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 147, "usage_type": "attribute"}, {"api_name": "torch.cuda.manual_seed", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 148, "usage_type": "attribute"}]} +{"seq_id": "454964883", "text": "basePath = '/snfs7/goes/'\nsatellite = 'goes13'\nyear = 2017\nstartDay = 1\nendDay = 10\nfilterBand = 'BAND_01'\nS3InputDir = 'Input/area/'\n\nimport os\n\nimport boto3\n\n\ndef removePrefix(text, prefix):\n return text[text.startswith(prefix) and len(prefix):]\n\n\ndirNames = (basePath + satellite + '/' + str(year) + '/' + \"%03d\" % day for day in range(startDay, endDay + 1))\n\nfullPath = lambda path: [os.path.join(path, fn) for fn in next(os.walk(path))[2]]\nfileNames = map(lambda d: fullPath(d), dirNames)\nfileNames = [item for sublist in fileNames for item in sublist]\nfileNames = list(filter(lambda f: f.endswith(filterBand), fileNames))\n\nbucketName = 'org.cicsnc.albedo'\ns3 = boto3.resource('s3')\n\nfor fullName in fileNames:\n fileName = os.path.basename(fullName)\n print(fileName)\n s3.Bucket(bucketName).upload_file(fullName, S3InputDir + fileName)\n\nprint(len(fileNames))\n", "sub_path": "Albedo/LoadS3.py", "file_name": "LoadS3.py", "file_ext": "py", "file_size_in_byte": 875, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 20, "usage_type": "call"}, {"api_name": "boto3.resource", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "418070968", "text": "import sys\nfrom typing import List, Iterable, Dict\n\nfrom list_lib import flatten\nfrom trec.trec_parse import load_ranked_list_grouped, write_trec_ranked_list_entry\nfrom trec.types import TrecRankedListEntry\n\n\ndef main():\n first_list_path = sys.argv[1]\n second_list_path = sys.argv[2]\n save_path = sys.argv[3]\n print(\"Use {} if available, if not use {}\".format(first_list_path, second_list_path))\n l1: Dict[str, List[TrecRankedListEntry]] = load_ranked_list_grouped(first_list_path)\n l2: Dict[str, List[TrecRankedListEntry]] = load_ranked_list_grouped(second_list_path)\n\n new_entries: Dict[str, List[TrecRankedListEntry]] = l1\n\n for qid in l2:\n if qid not in l1:\n new_entries[qid] = l2[qid]\n\n flat_entries: Iterable[TrecRankedListEntry] = flatten(new_entries.values())\n write_trec_ranked_list_entry(flat_entries, save_path)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "src/runnable/trec/combine_two_ranked_list.py", "file_name": "combine_two_ranked_list.py", "file_ext": "py", "file_size_in_byte": 911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 14, "usage_type": "name"}, {"api_name": "trec.types.TrecRankedListEntry", "line_number": 14, "usage_type": "name"}, {"api_name": "trec.trec_parse.load_ranked_list_grouped", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "trec.types.TrecRankedListEntry", "line_number": 15, "usage_type": "name"}, {"api_name": "trec.trec_parse.load_ranked_list_grouped", "line_number": 15, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "trec.types.TrecRankedListEntry", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 23, "usage_type": "name"}, {"api_name": "trec.types.TrecRankedListEntry", "line_number": 23, "usage_type": "name"}, {"api_name": "list_lib.flatten", "line_number": 23, "usage_type": "call"}, {"api_name": "trec.trec_parse.write_trec_ranked_list_entry", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "113553659", "text": "import argparse\nimport os\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom rllab.envs.normalized_env import normalize\nfrom rllab.misc.instrument import VariantGenerator\nfrom rllab import config\n\nfrom sac.algos import SAC\n\nfrom sac.misc.instrument import run_sac_experiment\nfrom sac.misc.utils import timestamp, unflatten\nfrom sac.policies import GaussianPolicy, LatentSpacePolicy, GMMPolicy, UniformPolicy\nfrom sac.misc.sampler import SimpleSampler\nfrom sac.replay_buffers import SimpleReplayBuffer\nfrom sac.value_functions import NNQFunction, NNVFunction\nfrom sac.preprocessors import MLPPreprocessor\nfrom examples.variants import parse_domain_and_task, get_variants\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--exp_name', type=str, default='DRL')\nparser.add_argument('--mode', type=str, default='train')\nparser.add_argument('--scale_reward', type=float, default=1)\nparser.add_argument('--epoch', type=int, default=20)\nparser.add_argument('--seed', type=int, default=0)\nparser.add_argument('--log_dir', type=str, default='SAC_LSP')\nparser.add_argument('--args_data', type=str, default=None)\nparser.add_argument('--snapshot_mode', type=str, default=\"gap\")\nparser.add_argument('--snapshot_gap', type=int, default=10)\nargs = parser.parse_args()\n\nfrom rllab.misc import logger\nimport os.path as osp\npre_dir = './Data/'+args.exp_name\nmain_dir = args.log_dir+'rs'+str(args.scale_reward)\nlog_dir = osp.join(pre_dir,main_dir,'seed'+str(args.seed))\n\nseed = args.seed\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n\nstates = np.array([[0.5]])\n\nif args.mode == 'train':\n tabular_log_file = osp.join(log_dir, 'process.csv')\n text_log_file = osp.join(log_dir, 'text.csv')\n params_log_file = osp.join(log_dir, 'args.txt')\n logger.log_parameters_lite(params_log_file, args)\n logger.add_text_output(text_log_file)\n logger.add_tabular_output(tabular_log_file)\n prev_snapshot_dir = logger.get_snapshot_dir()\n prev_mode = logger.get_snapshot_mode()\n logger.set_snapshot_dir(log_dir)\n logger.set_snapshot_mode(args.snapshot_mode)\n logger.set_snapshot_gap(args.snapshot_gap)\n logger.set_log_tabular_only(False)\n logger.push_prefix(\"[%s] \" % args.exp_name)\n\n policy_params = {\n 'preprocessing_hidden_sizes': (256, 256, 4),\n 's_t_units': 2, # num of units of the realNVP inner mlp\n 'coupling_layers': 2,\n 's_t_layers': 1, # num of layers of the realNVP inner mlp\n 'action_prior': 'uniform', # this is a prior for action distribution, not latent distribution\n 'preprocessing_output_nonlinearity': 'relu',\n 'reparameterize': True,\n 'squash': True, # Ture to add tanh on the output\n }\n value_fn_params = {'layer_size': 256}\n algorithm_params = { \n 'lr': 3e-4,\n 'discount': 0.0, # use 0.0 for deterministic transition\n 'target_update_interval': 1,\n 'tau': 0.005,\n 'reparameterize': True,\n 'scale_reward': args.scale_reward,\n 'base_kwargs': {\n 'n_epochs': args.epoch+1,\n 'epoch_length': 1000, # number of sample() and training done in one epoch\n 'n_train_repeat': 1,\n 'n_initial_exploration_steps': 1000,\n 'eval_render': False,\n 'eval_n_episodes': 10,\n 'eval_deterministic': False, # True would set policy to be deterministic at evaluation\n }\n }\n replay_buffer_params = {'max_replay_buffer_size': 1e6}\n sampler_params = {\n 'max_path_length': 1,\n 'min_pool_size': 1000,\n 'batch_size': 256,\n }\n\n from deterministic_rl import DeterministicRLEnv\n env = DeterministicRLEnv(states=states)\n\n pool = SimpleReplayBuffer(env_spec=env.spec, **replay_buffer_params)\n\n sampler = SimpleSampler(**sampler_params)\n\n base_kwargs = dict(algorithm_params['base_kwargs'], sampler=sampler)\n\n M = value_fn_params['layer_size']\n qf1 = NNQFunction(env_spec=env.spec, hidden_layer_sizes=(M, M), name='qf1')\n qf2 = NNQFunction(env_spec=env.spec, hidden_layer_sizes=(M, M), name='qf2')\n vf = NNVFunction(env_spec=env.spec, hidden_layer_sizes=(M, M))\n\n initial_exploration_policy = UniformPolicy(env_spec=env.spec)\n\n\n nonlinearity = {\n None: None,\n 'relu': tf.nn.relu,\n 'tanh': tf.nn.tanh\n }[policy_params['preprocessing_output_nonlinearity']]\n\n preprocessing_hidden_sizes = policy_params.get('preprocessing_hidden_sizes')\n if preprocessing_hidden_sizes is not None:\n observations_preprocessor = MLPPreprocessor(\n env_spec=env.spec,\n layer_sizes=preprocessing_hidden_sizes,\n output_nonlinearity=nonlinearity)\n else:\n observations_preprocessor = None\n\n policy_s_t_layers = policy_params['s_t_layers']\n policy_s_t_units = policy_params['s_t_units']\n s_t_hidden_sizes = [policy_s_t_units] * policy_s_t_layers\n\n bijector_config = {\n 'num_coupling_layers': policy_params['coupling_layers'],\n 'translation_hidden_sizes': s_t_hidden_sizes,\n 'scale_hidden_sizes': s_t_hidden_sizes,\n }\n\n policy = LatentSpacePolicy(\n env_spec=env.spec,\n squash=policy_params['squash'],\n bijector_config=bijector_config,\n reparameterize=policy_params['reparameterize'],\n q_function=qf1,\n observations_preprocessor=observations_preprocessor)\n\n algorithm = SAC(\n base_kwargs=base_kwargs,\n env=env,\n policy=policy,\n initial_exploration_policy=initial_exploration_policy,\n pool=pool,\n qf1=qf1,\n qf2=qf2,\n vf=vf,\n lr=algorithm_params['lr'],\n scale_reward=algorithm_params['scale_reward'],\n discount=algorithm_params['discount'],\n tau=algorithm_params['tau'],\n reparameterize=algorithm_params['reparameterize'],\n target_update_interval=algorithm_params['target_update_interval'],\n action_prior=policy_params['action_prior'],\n save_full_state=False,\n )\n\n algorithm._sess.run(tf.global_variables_initializer())\n\n algorithm.train()\n\nelif args.mode == 'plot':\n import matplotlib.pyplot as plt\n if not os.path.isdir(log_dir + '/Plots'):\n os.mkdir(log_dir + '/Plots')\n with tf.Session() as sess:\n epoch = args.epoch\n import joblib\n data = joblib.load(log_dir + '/' + 'itr_' + str(epoch) + '.pkl')\n sample_num = 1000\n\n policy = data['policy']\n from deterministic_rl import DeterministicRLEnv\n env = DeterministicRLEnv(states=states)\n qf1 = data['qf1']\n qf2 = data['qf2']\n vf = data['vf']\n # param1 = policy.get_param_values(trainable=True)\n for state in states:\n plt.figure()\n print('state: ', state)\n actions = policy.get_actions(\n np.repeat(np.array(state)[None, :], sample_num, axis=0))\n ax = plt.subplot(2, 1, 1)\n ax.set_title('a0')\n plt.hist(actions[:, 0], bins=20)\n ax = plt.subplot(2, 1, 2)\n ax.set_title('a1')\n plt.hist(actions[:, 1], bins=20)\n plt.savefig(log_dir + '/Plots/' + 'epoch_' + str(epoch) + 's' + str(state) + '_action_hist.png')\n plt.close()\n\n plt.figure()\n x = np.linspace(-1, 1, 100)\n y = np.linspace(-1, 1, 100)\n z = np.zeros((100,100))\n for i in range(len(x)):\n for j in range(len(y)):\n z[j,i] = (env._get_reward([x[i], y[j]]))\n cs = plt.contourf(x, y, z)\n plt.colorbar(cs)\n plt.scatter(actions[:, 0], actions[:, 1])\n plt.savefig(log_dir + '/Plots/' + 'epoch_' + str(epoch) + 's' + str(state) + '_action_scatter.png')\n\n\n plt.figure()\n x = np.linspace(-0.99, 0.99, 100)\n y = np.linspace(-0.99, 0.99, 100)\n z1 = np.zeros((100,100))\n z2 = np.zeros((100,100))\n for i in range(len(x)):\n for j in range(len(y)):\n actions = np.array([[x[i], y[j]]])\n raw_actions = np.arctanh(actions)\n z1[j,i] = qf1.eval(np.reshape(state,(1,-1)),actions)[0]\n z2[j,i] = policy.get_log_pis(np.reshape(state,(1,-1)),actions)[0]\n ax = plt.subplot(2,1,1)\n ax.set_title('qval')\n plt.contourf(x, y, z1)\n ax = plt.subplot(2,1,2)\n ax.set_title('logpi')\n plt.contourf(x, y, z2)\n plt.savefig(log_dir+'/Plots/'+'epoch_'+str(epoch)+'s'+str(state)+'_contour.png')\n plt.close()\n", "sub_path": "tests/DeterministicRL/SAC_LSP.py", "file_name": "SAC_LSP.py", "file_ext": "py", "file_size_in_byte": 8591, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.set_random_seed", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "name"}, {"api_name": "rllab.misc.logger.log_parameters_lite", "line_number": 50, "usage_type": "call"}, {"api_name": "rllab.misc.logger", "line_number": 50, "usage_type": "name"}, {"api_name": "rllab.misc.logger.add_text_output", "line_number": 51, "usage_type": "call"}, {"api_name": "rllab.misc.logger", "line_number": 51, "usage_type": "name"}, {"api_name": "rllab.misc.logger.add_tabular_output", "line_number": 52, "usage_type": "call"}, {"api_name": "rllab.misc.logger", "line_number": 52, "usage_type": "name"}, {"api_name": "rllab.misc.logger.get_snapshot_dir", "line_number": 53, "usage_type": "call"}, {"api_name": "rllab.misc.logger", "line_number": 53, "usage_type": "name"}, {"api_name": "rllab.misc.logger.get_snapshot_mode", "line_number": 54, "usage_type": "call"}, {"api_name": "rllab.misc.logger", "line_number": 54, "usage_type": "name"}, {"api_name": "rllab.misc.logger.set_snapshot_dir", "line_number": 55, "usage_type": "call"}, {"api_name": "rllab.misc.logger", "line_number": 55, "usage_type": "name"}, {"api_name": "rllab.misc.logger.set_snapshot_mode", "line_number": 56, "usage_type": "call"}, {"api_name": "rllab.misc.logger", "line_number": 56, "usage_type": "name"}, {"api_name": "rllab.misc.logger.set_snapshot_gap", "line_number": 57, "usage_type": "call"}, {"api_name": "rllab.misc.logger", "line_number": 57, "usage_type": "name"}, {"api_name": "rllab.misc.logger.set_log_tabular_only", "line_number": 58, "usage_type": "call"}, {"api_name": "rllab.misc.logger", "line_number": 58, "usage_type": "name"}, {"api_name": "rllab.misc.logger.push_prefix", "line_number": 59, "usage_type": "call"}, {"api_name": "rllab.misc.logger", "line_number": 59, "usage_type": "name"}, {"api_name": "deterministic_rl.DeterministicRLEnv", "line_number": 97, "usage_type": "call"}, {"api_name": "sac.replay_buffers.SimpleReplayBuffer", "line_number": 99, "usage_type": "call"}, {"api_name": "sac.misc.sampler.SimpleSampler", "line_number": 101, "usage_type": "call"}, {"api_name": "sac.value_functions.NNQFunction", "line_number": 106, "usage_type": "call"}, {"api_name": "sac.value_functions.NNQFunction", "line_number": 107, "usage_type": "call"}, {"api_name": "sac.value_functions.NNVFunction", "line_number": 108, "usage_type": "call"}, {"api_name": "sac.policies.UniformPolicy", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 116, "usage_type": "attribute"}, {"api_name": "sac.preprocessors.MLPPreprocessor", "line_number": 121, "usage_type": "call"}, {"api_name": "sac.policies.LatentSpacePolicy", "line_number": 138, "usage_type": "call"}, {"api_name": "sac.algos.SAC", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 173, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 176, "usage_type": "call"}, {"api_name": "deterministic_rl.DeterministicRLEnv", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "numpy.repeat", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.arctanh", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}]} +{"seq_id": "180605139", "text": "#!/usr/bin/env python\n\nimport os\nimport json\nimport logging\nimport requests\nimport utm\nimport time\n\nimport datetime\nfrom dateutil.parser import parse\nfrom influxdb import InfluxDBClient, SeriesHelper\n\nfrom pyclowder.extractors import Extractor\nfrom pyclowder.utils import CheckMessage\nimport pyclowder.files\nimport pyclowder.datasets\nimport pyclowder.geostreams\n\nimport canopyCover as ccCore\nimport plotid_by_latlon\n\n\ndef determineOutputDirectory(outputRoot, dsname):\n if dsname.find(\" - \") > -1:\n timestamp = dsname.split(\" - \")[1]\n else:\n timestamp = \"dsname\"\n if timestamp.find(\"__\") > -1:\n datestamp = timestamp.split(\"__\")[0]\n else:\n datestamp = \"\"\n\n return os.path.join(outputRoot, datestamp, timestamp)\n\ndef load_json(meta_path):\n try:\n with open(meta_path, 'r') as fin:\n return json.load(fin)\n except Exception as ex:\n logging.error('Corrupt metadata file, ' + str(ex))\n\n# Try several variations on each position field to get all required information\ndef fetch_md_parts(metadata):\n gantry_x, gantry_y = None, None\n loc_cambox_x, loc_cambox_y = None, None\n fov_x, fov_y = None, None\n ctime = None\n\n \"\"\"\n Due to observed differences in metadata field names over time, this method is\n flexible with respect to finding fields. By default each entry for each field\n is checked with both a lowercase and uppercase leading character.\n \"\"\"\n\n if 'lemnatec_measurement_metadata' in metadata:\n lem_md = metadata['lemnatec_measurement_metadata']\n if 'gantry_system_variable_metadata' in lem_md and 'sensor_fixed_metadata' in lem_md:\n gantry_meta = lem_md['gantry_system_variable_metadata']\n sensor_meta = lem_md['sensor_fixed_metadata']\n\n # X and Y position of gantry\n x_positions = ['position x [m]', 'position X [m]']\n for variant in x_positions:\n val = check_field_variants(gantry_meta, variant)\n if val:\n gantry_x = parse_as_float(val)\n break\n y_positions = ['position y [m]', 'position Y [m]']\n for variant in y_positions:\n val = check_field_variants(gantry_meta, variant)\n if val:\n gantry_y = parse_as_float(val)\n break\n\n # Sensor location within camera box\n cbx_locations = ['location in camera box x [m]', 'location in camera box X [m]']\n for variant in cbx_locations:\n val = check_field_variants(sensor_meta, variant)\n if val:\n loc_cambox_x = parse_as_float(val)\n break\n cby_locations = ['location in camera box y [m]', 'location in camera box Y [m]']\n for variant in cby_locations:\n val = check_field_variants(sensor_meta, variant)\n if val:\n loc_cambox_y = parse_as_float(val)\n break\n\n # Field of view\n x_fovs = ['field of view x [m]', 'field of view X [m]']\n for variant in x_fovs:\n val = check_field_variants(sensor_meta, variant)\n if val:\n fov_x = parse_as_float(val)\n break\n y_fovs = ['field of view y [m]', 'field of view Y [m]']\n for variant in y_fovs:\n val = check_field_variants(sensor_meta, variant)\n if val:\n fov_y = parse_as_float(val)\n break\n if not (fov_x and fov_y):\n val = check_field_variants(sensor_meta, 'field of view at 2m in X- Y- direction [m]')\n if val:\n vals = val.replace('[','').replace(']','').split(' ')\n if not fov_x:\n fov_x = parse_as_float(vals[0])\n if not fov_y:\n fov_y = parse_as_float(vals[1])\n\n # TODO: Find a better solution once metadata files are fixed\n # TODO: These values from https://github.com/terraref/computing-pipeline/issues/126#issuecomment-292027575\n fov_x = 1.015\n fov_y = 0.749\n\n # timestamp, e.g. \"2016-05-15T00:30:00-05:00\"\n val = check_field_variants(gantry_meta, 'time')\n if val:\n ctime = val.encode(\"utf-8\")\n else:\n ctime = \"unknown\"\n\n return gantry_x, gantry_y, loc_cambox_x, loc_cambox_y, fov_x, fov_y, ctime\n\n# Check for fieldname in dict, including capitalization changes\ndef check_field_variants(dict, key):\n if key in dict:\n return dict[key]\n elif key.capitalize() in dict:\n return dict[key.capitalize()]\n else:\n return False\n\n# Try to convert val to float, return val on Exception\ndef parse_as_float(val):\n try:\n return float(val.encode(\"utf-8\"))\n except AttributeError:\n return val\n\nclass CanopyCoverHeight(Extractor):\n def __init__(self):\n Extractor.__init__(self)\n\n # add any additional arguments to parser\n # self.parser.add_argument('--max', '-m', type=int, nargs='?', default=-1,\n # help='maximum number (default=-1)')\n self.parser.add_argument('--output', '-o', dest=\"output_dir\", type=str, nargs='?',\n default=\"/home/extractor/sites/ua-mac/Level_1/stereoTop_canopyCover\",\n help=\"root directory where timestamp & output directories will be created\")\n self.parser.add_argument('--overwrite', dest=\"force_overwrite\", type=bool, nargs='?', default=False,\n help=\"whether to overwrite output file if it already exists in output directory\")\n self.parser.add_argument('--betyURL', dest=\"bety_url\", type=str, nargs='?',\n default=\"https://terraref.ncsa.illinois.edu/bety/api/beta/traits.csv\",\n help=\"traits API endpoint of BETY instance that outputs should be posted to\")\n self.parser.add_argument('--betyKey', dest=\"bety_key\", type=str, nargs='?', default=False,\n help=\"API key for BETY instance specified by betyURL\")\n self.parser.add_argument('--plots', dest=\"plots_shp\", type=str, nargs='?',\n default=\"/home/extractor/extractors-metadata/sensorposition/shp/sorghumexpfall2016v5/sorghumexpfall2016v5_lblentry_1to7.shp\",\n help=\".shp file containing plots\")\n self.parser.add_argument('--influxHost', dest=\"influx_host\", type=str, nargs='?',\n default=\"terra-logging.ncsa.illinois.edu\", help=\"InfluxDB URL for logging\")\n self.parser.add_argument('--influxPort', dest=\"influx_port\", type=int, nargs='?',\n default=8086, help=\"InfluxDB port\")\n self.parser.add_argument('--influxUser', dest=\"influx_user\", type=str, nargs='?',\n default=\"terra\", help=\"InfluxDB username\")\n self.parser.add_argument('--influxPass', dest=\"influx_pass\", type=str, nargs='?',\n default=\"\", help=\"InfluxDB password\")\n self.parser.add_argument('--influxDB', dest=\"influx_db\", type=str, nargs='?',\n default=\"extractor_db\", help=\"InfluxDB databast\")\n\n # parse command line and load default logging configuration\n self.setup()\n\n # setup logging for the exctractor\n logging.getLogger('pyclowder').setLevel(logging.DEBUG)\n logging.getLogger('__main__').setLevel(logging.DEBUG)\n\n # assign other arguments\n self.output_dir = self.args.output_dir\n self.force_overwrite = self.args.force_overwrite\n self.bety_url = self.args.bety_url\n self.bety_key = self.args.bety_key\n self.plots_shp = self.args.plots_shp\n self.influx_host = self.args.influx_host\n self.influx_port = self.args.influx_port\n self.influx_user = self.args.influx_user\n self.influx_pass = self.args.influx_pass\n self.influx_db = self.args.influx_db\n\n def check_message(self, connector, host, secret_key, resource, parameters):\n # Most basic check - is this most recent file for this dataset?\n if resource['latest_file']:\n latest_file = \"\"\n latest_time = \"Sun Jan 01 00:00:01 CDT 1920\"\n for f in resource['files']:\n create_time = datetime.datetime.strptime(f['date-created'].replace(\" CDT\",\"\"), \"%c\")\n if create_time > datetime.datetime.strptime(latest_time.replace(\" CDT\",\"\"), \"%c\"):\n latest_time = f['date-created']\n latest_file = f['filename']\n if latest_file != resource['latest_file']:\n # This message is not for most recently added file; skip dataset for now\n return CheckMessage.ignore\n\n # Check for a left and right file before beginning processing\n found_left = False\n found_right = False\n\n for f in resource['files']:\n if 'filename' in f and f['filename'].endswith('_left.bin'):\n found_left = True\n elif 'filename' in f and f['filename'].endswith('_right.bin'):\n found_right = True\n if not (found_left and found_right):\n return CheckMessage.ignore\n\n # Check if output already exists\n out_dir = determineOutputDirectory(self.output_dir, resource['dataset_info']['name'])\n if not self.force_overwrite:\n outfile = os.path.join(out_dir, 'CanopyCoverTraits.csv')\n if os.path.isfile(outfile):\n logging.info(\"skipping dataset %s, output already exists\" % resource['id'])\n return CheckMessage.ignore\n\n # fetch metadata from dataset to check if we should remove existing entry for this extractor first\n md = pyclowder.datasets.download_metadata(connector, host, secret_key,\n resource['id'])\n found_meta = False\n for m in md:\n if 'agent' in m and 'name' in m['agent']:\n if m['agent']['name'].find(self.extractor_info['name']) > -1:\n logging.info(\"skipping dataset %s, metadata already exists\" % resource['id'])\n return CheckMessage.ignore\n # Check for required metadata before beginning processing\n if 'content' in m and 'lemnatec_measurement_metadata' in m['content']:\n found_meta = True\n\n if found_left and found_right and found_meta:\n return CheckMessage.download\n else:\n return CheckMessage.ignore\n\n def process_message(self, connector, host, secret_key, resource, parameters):\n starttime = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n created_count = 0\n bytes = 0\n\n # Get left/right files and metadata\n metafile, img_left, img_right, metadata = None, None, None, None\n for fname in resource['local_paths']:\n # First check metadata attached to dataset in Clowder for item of interest\n if fname.endswith('_dataset_metadata.json'):\n all_dsmd = load_json(fname)\n for curr_dsmd in all_dsmd:\n if 'content' in curr_dsmd and 'lemnatec_measurement_metadata' in curr_dsmd['content']:\n metafile = fname\n metadata = curr_dsmd['content']\n # Otherwise, check if metadata was uploaded as a .json file\n elif fname.endswith('_metadata.json') and fname.find('/_metadata.json') == -1 and metafile is None:\n metafile = fname\n metadata = load_json(metafile)\n elif fname.endswith('_left.bin'):\n img_left = fname\n elif fname.endswith('_right.bin'):\n img_right = fname\n if None in [metafile, img_left, img_right, metadata]:\n logging.error('could not find all 3 of left/right/metadata')\n return\n\n # Determine output directory\n out_dir = determineOutputDirectory(self.output_dir, resource['dataset_info']['name'])\n logging.info(\"...writing outputs to: %s\" % out_dir)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n outfile = os.path.join(out_dir, 'CanopyCoverTraits.csv')\n\n if (not os.path.isfile(outfile)) or self.force_overwrite:\n # Get information from input data\n metadata = ccCore.lower_keys(metadata)\n plotNum = ccCore.get_plot_num(metadata)\n ccVal = ccCore.get_CC_from_bin(img_left)\n\n # get traits and values & generate output CSV\n (fields, traits) = ccCore.get_traits_table()\n str_time = str(ccCore.get_localdatetime(metadata))\n str_date = str_time[6:10]+'-'+str_time[:5]+'T'+str_time[11:]\n traits['local_datetime'] = str_date.replace(\"/\", '-')\n traits['canopy_cover'] = str(ccVal)\n traits['site'] = 'MAC Field Scanner Field Plot '+ str(plotNum)+' Season 2'\n trait_list = ccCore.generate_traits_list(traits)\n ccCore.generate_cc_csv(outfile, fields, trait_list)\n\n created_count += 1\n bytes += os.path.getsize(outfile)\n\n # Only upload the newly generated CSV to Clowder if it isn't already in dataset\n if outfile not in resource['local_paths']:\n csv_id = pyclowder.files.upload_to_dataset(connector, host, secret_key, resource['id'], outfile)\n else:\n csv_id = \"\"\n\n # submit CSV to BETY\n self.submitToBety(outfile)\n\n # generate datapoint for geostreams\n self.submitDatapoint(connector, host, secret_key, resource, metadata, fields, trait_list)\n\n # Tell Clowder this is completed so subsequent file updates don't daisy-chain\n metadata = {\n # TODO: Generate JSON-LD context for additional fields\n \"@context\": [\"https://clowder.ncsa.illinois.edu/contexts/metadata.jsonld\"],\n \"dataset_id\": resource['id'],\n \"content\": {\n \"files_created\": [csv_id]\n },\n \"agent\": {\n \"@type\": \"cat:extractor\",\n \"extractor_id\": host + \"/api/extractors/\" + self.extractor_info['name']\n }\n }\n pyclowder.datasets.upload_metadata(connector, host, secret_key, resource['id'], metadata)\n\n endtime = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n self.logToInfluxDB(starttime, endtime, created_count, bytes)\n\n def submitToBety(self, csvfile):\n if self.bety_url != \"\":\n sess = requests.Session()\n\n r = sess.post(\"%s?key=%s\" % (self.bety_url, self.bety_key),\n data=file(csvfile, 'rb').read(),\n headers={'Content-type': 'text/csv'})\n\n if r.status_code == 200 or r.status_code == 201:\n logging.info(\"...CSV successfully uploaded to BETYdb.\")\n else:\n print(\"Error uploading CSV to BETYdb %s\" % r.status_code)\n print(r.text)\n\n def submitDatapoint(self, connector, host, secret_key, resource, metadata, trait_names, trait_values):\n logging.info(\"...submitting datapoint to geostreams\")\n\n # Pull positional information from metadata\n gantry_x, gantry_y, loc_cambox_x, loc_cambox_y, fov_x, fov_y, ctime = fetch_md_parts(metadata)\n\n # Convert positional information; see terra.sensorposition extractor for more details\n SE_latlon = (33.0745, -111.97475)\n SE_utm = utm.from_latlon(SE_latlon[0], SE_latlon[1])\n SE_offset_x = 3.8\n SE_offset_y = 0\n\n # Determine sensor position relative to origin and get lat/lon\n gantry_utm_x = SE_utm[0] - (gantry_y - SE_offset_y)\n gantry_utm_y = SE_utm[1] + (gantry_x - SE_offset_x)\n sensor_utm_x = gantry_utm_x - loc_cambox_y\n sensor_utm_y = gantry_utm_y + loc_cambox_x\n sensor_latlon = utm.to_latlon(sensor_utm_x, sensor_utm_y, SE_utm[2], SE_utm[3])\n logging.info(\"sensor lat/lon: %s\" % str(sensor_latlon))\n\n # Upload data into Geostreams API -----------------------------------------------------\n fileIdList = []\n for f in resource['files']:\n fileIdList.append(f['id'])\n\n # SENSOR is the plot - try by location first\n sensor_data = pyclowder.geostreams.get_sensors_by_circle(connector, host, secret_key, sensor_latlon[1], sensor_latlon[0], 0.01)\n if not sensor_data:\n plot_info = plotid_by_latlon.plotQuery(self.plots_shp, sensor_latlon[1], sensor_latlon[0])\n plot_name = \"Range \"+plot_info['plot'].replace(\"-\", \" Pass \")\n logging.info(\"...found plot: \"+str(plot_info))\n sensor_data = pyclowder.geostreams.get_sensor_by_name(connector, host, secret_key, plot_name)\n if not sensor_data:\n sensor_id = pyclowder.geostreams.create_sensor(connector, host, secret_key, plot_name, {\n \"type\": \"Point\",\n \"coordinates\": [plot_info['point'][1], plot_info['point'][0], plot_info['point'][2]]\n }, {\n \"id\": \"MAC Field Scanner\",\n \"title\": \"MAC Field Scanner\",\n \"sensorType\": 4\n }, \"Maricopa\")\n else:\n sensor_id = sensor_data['id']\n else:\n if len(sensor_data) > 1:\n sensor_id = sensor_data[0]['id']\n plot_name = sensor_data[0]['name']\n else:\n sensor_id = sensor_data['id']\n plot_name = sensor_data['name']\n\n # STREAM is plot x instrument\n stream_name = \"Canopy Cover\" + \" - \" + plot_name\n stream_data = pyclowder.geostreams.get_stream_by_name(connector, host, secret_key, stream_name)\n if not stream_data:\n stream_id = pyclowder.geostreams.create_stream(connector, host, secret_key, stream_name, sensor_id, {\n \"type\": \"Point\",\n \"coordinates\": [sensor_latlon[1], sensor_latlon[0], 0]\n })\n else:\n stream_id = stream_data['id']\n\n logging.info(\"posting datapoint to stream %s\" % stream_id)\n metadata[\"source\"] = host+\"datasets/\"+resource['id']\n metadata[\"file_ids\"] = \",\".join(fileIdList)\n\n # Format time properly, adding UTC if missing from Danforth timestamp\n time_obj = time.strptime(ctime, \"%m/%d/%Y %H:%M:%S\")\n time_fmt = time.strftime('%Y-%m-%dT%H:%M:%S', time_obj)\n if len(time_fmt) == 19:\n time_fmt += \"-06:00\"\n\n pyclowder.geostreams.create_datapoint(connector, host, secret_key, stream_id, {\n \"type\": \"Point\",\n \"coordinates\": [sensor_latlon[1], sensor_latlon[0], 0]\n }, time_fmt, time_fmt, metadata)\n\n def logToInfluxDB(self, starttime, endtime, filecount, bytecount):\n # Time of the format \"2017-02-10T16:09:57+00:00\"\n f_completed_ts = int(parse(endtime).strftime('%s'))*1000000000\n f_duration = f_completed_ts - int(parse(starttime).strftime('%s'))*1000000000\n\n client = InfluxDBClient(self.influx_host, self.influx_port, self.influx_user, self.influx_pass, self.influx_db)\n client.write_points([{\n \"measurement\": \"file_processed\",\n \"time\": f_completed_ts,\n \"fields\": {\"value\": f_duration}\n }], tags={\"extractor\": self.extractor_info['name'], \"type\": \"duration\"})\n client.write_points([{\n \"measurement\": \"file_processed\",\n \"time\": f_completed_ts,\n \"fields\": {\"value\": int(filecount)}\n }], tags={\"extractor\": self.extractor_info['name'], \"type\": \"filecount\"})\n client.write_points([{\n \"measurement\": \"file_processed\",\n \"time\": f_completed_ts,\n \"fields\": {\"value\": int(bytecount)}\n }], tags={\"extractor\": self.extractor_info['name'], \"type\": \"bytes\"})\n\n\nif __name__ == \"__main__\":\n extractor = CanopyCoverHeight()\n extractor.start()\n", "sub_path": "canopycover/terra_canopycover.py", "file_name": "terra_canopycover.py", "file_ext": "py", "file_size_in_byte": 20287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 41, "usage_type": "call"}, {"api_name": "pyclowder.extractors.Extractor", "line_number": 142, "usage_type": "name"}, {"api_name": "pyclowder.extractors.Extractor.__init__", "line_number": 144, "usage_type": "call"}, {"api_name": "pyclowder.extractors.Extractor", "line_number": 144, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 177, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 177, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 178, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 178, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 198, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 198, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 199, "usage_type": "attribute"}, {"api_name": "pyclowder.utils.CheckMessage.ignore", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pyclowder.utils.CheckMessage", "line_number": 204, "usage_type": "name"}, {"api_name": "pyclowder.utils.CheckMessage.ignore", "line_number": 216, "usage_type": "attribute"}, {"api_name": "pyclowder.utils.CheckMessage", "line_number": 216, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path", "line_number": 221, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 223, "usage_type": "call"}, {"api_name": "pyclowder.utils.CheckMessage.ignore", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pyclowder.utils.CheckMessage", "line_number": 224, "usage_type": "name"}, {"api_name": "pyclowder.extractors.datasets.download_metadata", "line_number": 227, "usage_type": "call"}, {"api_name": "pyclowder.extractors.datasets", "line_number": 227, "usage_type": "attribute"}, {"api_name": "pyclowder.extractors", "line_number": 227, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 233, "usage_type": "call"}, {"api_name": "pyclowder.utils.CheckMessage.ignore", "line_number": 234, "usage_type": "attribute"}, {"api_name": "pyclowder.utils.CheckMessage", "line_number": 234, "usage_type": "name"}, {"api_name": "pyclowder.utils.CheckMessage.download", "line_number": 240, "usage_type": "attribute"}, {"api_name": "pyclowder.utils.CheckMessage", "line_number": 240, "usage_type": "name"}, {"api_name": "pyclowder.utils.CheckMessage.ignore", "line_number": 242, "usage_type": "attribute"}, {"api_name": "pyclowder.utils.CheckMessage", "line_number": 242, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 245, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 245, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 268, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path", "line_number": 274, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 275, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path", "line_number": 276, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 278, "usage_type": "call"}, {"api_name": "os.path", "line_number": 278, "usage_type": "attribute"}, {"api_name": "canopyCover.lower_keys", "line_number": 280, "usage_type": "call"}, {"api_name": "canopyCover.get_plot_num", "line_number": 281, "usage_type": "call"}, {"api_name": "canopyCover.get_CC_from_bin", "line_number": 282, "usage_type": "call"}, {"api_name": "canopyCover.get_traits_table", "line_number": 285, "usage_type": "call"}, {"api_name": "canopyCover.get_localdatetime", "line_number": 286, "usage_type": "call"}, {"api_name": "canopyCover.generate_traits_list", "line_number": 291, "usage_type": "call"}, {"api_name": "canopyCover.generate_cc_csv", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 295, "usage_type": "call"}, {"api_name": "os.path", "line_number": 295, "usage_type": "attribute"}, {"api_name": "pyclowder.extractors.files.upload_to_dataset", "line_number": 299, "usage_type": "call"}, {"api_name": "pyclowder.extractors.files", "line_number": 299, "usage_type": "attribute"}, {"api_name": "pyclowder.extractors", "line_number": 299, "usage_type": "name"}, {"api_name": "pyclowder.extractors.datasets.upload_metadata", "line_number": 322, "usage_type": "call"}, {"api_name": "pyclowder.extractors.datasets", "line_number": 322, "usage_type": "attribute"}, {"api_name": "pyclowder.extractors", "line_number": 322, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 324, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 324, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 329, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 336, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 342, "usage_type": "call"}, {"api_name": "utm.from_latlon", "line_number": 349, "usage_type": "call"}, {"api_name": "utm.to_latlon", "line_number": 358, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 359, "usage_type": "call"}, {"api_name": "pyclowder.extractors.geostreams.get_sensors_by_circle", "line_number": 367, "usage_type": "call"}, {"api_name": "pyclowder.extractors.geostreams", "line_number": 367, "usage_type": "attribute"}, {"api_name": "pyclowder.extractors", "line_number": 367, "usage_type": "name"}, {"api_name": "plotid_by_latlon.plotQuery", "line_number": 369, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 371, "usage_type": "call"}, {"api_name": "pyclowder.extractors.geostreams.get_sensor_by_name", "line_number": 372, "usage_type": "call"}, {"api_name": "pyclowder.extractors.geostreams", "line_number": 372, "usage_type": "attribute"}, {"api_name": "pyclowder.extractors", "line_number": 372, "usage_type": "name"}, {"api_name": "pyclowder.extractors.geostreams.create_sensor", "line_number": 374, "usage_type": "call"}, {"api_name": "pyclowder.extractors.geostreams", "line_number": 374, "usage_type": "attribute"}, {"api_name": "pyclowder.extractors", "line_number": 374, "usage_type": "name"}, {"api_name": "pyclowder.extractors.geostreams.get_stream_by_name", "line_number": 394, "usage_type": "call"}, {"api_name": "pyclowder.extractors.geostreams", "line_number": 394, "usage_type": "attribute"}, {"api_name": "pyclowder.extractors", "line_number": 394, "usage_type": "name"}, {"api_name": "pyclowder.extractors.geostreams.create_stream", "line_number": 396, "usage_type": "call"}, {"api_name": "pyclowder.extractors.geostreams", "line_number": 396, "usage_type": "attribute"}, {"api_name": "pyclowder.extractors", "line_number": 396, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 403, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 408, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 409, "usage_type": "call"}, {"api_name": "pyclowder.extractors.geostreams.create_datapoint", "line_number": 413, "usage_type": "call"}, {"api_name": "pyclowder.extractors.geostreams", "line_number": 413, "usage_type": "attribute"}, {"api_name": "pyclowder.extractors", "line_number": 413, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 420, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 421, "usage_type": "call"}, {"api_name": "influxdb.InfluxDBClient", "line_number": 423, "usage_type": "call"}]} +{"seq_id": "537668864", "text": "'''\n\nDescription:\n\nGiven a string, sort it in decreasing order based on the frequency of characters.\n\nExample 1:\n\nInput:\n\"tree\"\n\nOutput:\n\"eert\"\n\nExplanation:\n'e' appears twice while 'r' and 't' both appear once.\nSo 'e' must appear before both 'r' and 't'. Therefore \"eetr\" is also a valid answer.\n\n\n\nExample 2:\n\nInput:\n\"cccaaa\"\n\nOutput:\n\"cccaaa\"\n\nExplanation:\nBoth 'c' and 'a' appear three times, so \"aaaccc\" is also a valid answer.\nNote that \"cacaca\" is incorrect, as the same characters must be together.\n\n\n\nExample 3:\n\nInput:\n\"Aabb\"\n\nOutput:\n\"bbAa\"\n\nExplanation:\n\"bbaA\" is also a valid answer, but \"Aabb\" is incorrect.\nNote that 'A' and 'a' are treated as two different characters.\n\n'''\n\n\n\nfrom collections import Counter\n\nclass Solution:\n def frequencySort(self, s: str) -> str:\n \n ## dictionary:\n # key: character\n # value: occurrence of character\n\n char_occ_dict = Counter( s )\n size = len(char_occ_dict)\n\n \n output_list = []\n char_occ_pair = char_occ_dict.most_common(size)\n \n # output character with occurrence, from high frequency to low frequency\n for char, occ in char_occ_pair:\n output_list += [ char ]*occ\n \n \n return ''.join( output_list )\n\n\n\n# n : the length of input string\n\n## Time Complexity: O( n log n )\n#\n# The overhead in time is the cost of occurrence sorting, which is of O( n log n )\n\n## Space Complexity: O( n )\n#\n# The overhead in space is the storage for dictionary and char_occ_pair, which are of O( n )\n\nfrom collections import namedtuple\nTestEntry = namedtuple('TestEntry', 'string')\n\ndef test_bench():\n\n test_data = [\n TestEntry( string = \"tree\" ), \n TestEntry( string = \"cccaaa\" ),\n TestEntry( string = \"Aabb\"),\n ]\n\n # expected output:\n '''\n eetr\n cccaaa\n bbAa\n '''\n\n for t in test_data:\n\n print( Solution().frequencySort( s = t.string ) )\n \n return\n\n\n\nif __name__ == '__main__':\n\n test_bench()\n", "sub_path": "No_0451_Sort Characters By Frequency/by_dictionary.py", "file_name": "by_dictionary.py", "file_ext": "py", "file_size_in_byte": 2070, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "collections.Counter", "line_number": 60, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "565544986", "text": "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nclass GANLoss(nn.Module):\n \"\"\" \n Reward-Refined NLLLoss Function for adversarial reinforcement training of generator\n \"\"\"\n def __init__(self, use_cuda, **kwargs):\n self.use_cuda = use_cuda\n super(GANLoss, self).__init__(**kwargs)\n\n def forward(self, probs, targets, rewards):\n \"\"\"\n Args:\n probs: (seq_len, vocab_size) - torch Variable\n targets: (seq_len) - torch Variable\n rewards: (seq_len) - torch Variable\n \"\"\"\n _, _, vocab_size = probs.size()\n probs = probs.view((-1, vocab_size))\n one_hot = torch.zeros(probs.size())\n indices = targets.data.view((-1, 1))\n # rewards = rewards.data.view((-1, 1))\n rewards = rewards.data.view((-1))\n if self.use_cuda and torch.cuda.is_available(): \n one_hot = one_hot.cuda() \n indices = indices.cuda()\n # write 1 into all positions specified by targets in the 1st dim\n one_hot.scatter_(1, indices, 1) \n one_hot = Variable(one_hot.type(torch.ByteTensor)) # sets the type, so it can be used in masked_select\n if self.use_cuda and torch.cuda.is_available():\n one_hot = one_hot.cuda()\n # import pdb\n # pdb.set_trace()\n loss = torch.masked_select(probs, one_hot)\n # loss = loss * rewards # why does a greater rewards = greater loss? This should be opposite the case.\n # loss = -torch.sum(loss)\n # import pdb\n # pdb.set_trace()\n loss = -torch.dot(loss, rewards)\n return loss\n", "sub_path": "src/models/nottingham/gan_loss.py", "file_name": "gan_loss.py", "file_ext": "py", "file_size_in_byte": 1635, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.ByteTensor", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.masked_select", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.dot", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "583376404", "text": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for dealing with ML jobs API.\"\"\"\nimport copy\nimport datetime\nimport time\n\nfrom apitools.base.py import encoding\nfrom apitools.base.py import list_pager\nfrom googlecloudsdk.api_lib.logging import common as logging_common\nfrom googlecloudsdk.core import apis\nfrom googlecloudsdk.core import properties\nfrom googlecloudsdk.core import resources\nfrom googlecloudsdk.core.util import files\nfrom googlecloudsdk.core.util import times\nimport yaml\n\n\ndef Cancel(job):\n \"\"\"Cancels given job.\"\"\"\n client = apis.GetClientInstance('ml', 'v1beta1')\n res = resources.REGISTRY.Parse(job, collection='ml.projects.jobs')\n req = client.MESSAGES_MODULE.MlProjectsJobsCancelRequest(\n projectsId=res.projectsId, jobsId=res.Name())\n resp = client.projects_jobs.Cancel(req)\n return resp\n\n\ndef Get(job):\n client = apis.GetClientInstance('ml', 'v1beta1')\n ref = resources.REGISTRY.Parse(job, collection='ml.projects.jobs')\n req = client.MESSAGES_MODULE.MlProjectsJobsGetRequest(\n projectsId=ref.projectsId, jobsId=ref.jobsId)\n return client.projects_jobs.Get(req)\n\n\nclass LogPosition(object):\n \"\"\"Tracks a position in the log.\n\n Log messages are sorted by timestamp. Within a given timestamp, logs will be\n returned in order of insert_id.\n \"\"\"\n\n def __init__(self):\n self.timestamp = '1970-01-01T01:00:00.000000000Z'\n self.insert_id = ''\n self.need_insert_id_in_lb_filter = False\n\n def Update(self, timestamp, insert_id):\n \"\"\"Update the log position based on new log entry data.\n\n Args:\n timestamp: the timestamp of the message we just read, as an RFC3339\n string.\n insert_id: the insert id of the message we just read.\n\n Returns:\n True if the position was updated; False if not.\n \"\"\"\n if timestamp < self.timestamp:\n # The message is behind this LogPosition. No update required.\n return False\n elif timestamp == self.timestamp:\n # When the timestamp is the same, we need to move forward the insert id.\n if insert_id > self.insert_id:\n self.insert_id = insert_id\n self.need_insert_id_in_lb_filter = True\n return True\n return False\n else:\n # Once we see a new timestamp, move forward the minimum time that we're\n # willing to accept.\n self.need_insert_id_in_lb_filter = False\n self.insert_id = insert_id\n self.timestamp = timestamp\n return True\n\n def GetFilterLowerBound(self):\n \"\"\"The log message filter which keeps out messages which are too old.\n\n Returns:\n The lower bound filter text that we should use.\n \"\"\"\n\n if self.need_insert_id_in_lb_filter:\n return '((timestamp=\"{0}\" AND insertId>\"{1}\") OR timestamp>\"{2}\")'.format(\n self.timestamp, self.insert_id, self.timestamp)\n else:\n return 'timestamp>=\"{0}\"'.format(self.timestamp)\n\n def GetFilterUpperBound(self, now):\n \"\"\"The log message filter which keeps out messages which are too new.\n\n Args:\n now: The current time, as a datetime object.\n\n Returns:\n The upper bound filter text that we should use.\n \"\"\"\n\n tzinfo = times.ParseDateTime(self.timestamp).tzinfo\n now = now.replace(tzinfo=tzinfo)\n upper_bound = now - datetime.timedelta(seconds=5)\n return 'timestamp<\"{0}\"'.format(\n times.FormatDateTime(upper_bound, '%Y-%m-%dT%H:%M:%S.%6f%Ez'))\n\n\nclass LogFetcher(object):\n \"\"\"A class which fetches job logs.\"\"\"\n\n LOG_BATCH_SIZE = 5000\n\n def __init__(self, job_id, polling_interval, allow_multiline_logs):\n self.job_id = job_id\n self.polling_interval = polling_interval\n self.log_position = LogPosition()\n self.allow_multiline_logs = allow_multiline_logs\n self.client = apis.GetClientInstance('ml', 'v1beta1')\n\n def GetLogs(self, log_position):\n \"\"\"Retrieve a batch of logs.\"\"\"\n filters = ['resource.type=\"ml_job\"',\n 'resource.labels.job_id=\"{0}\"'.format(self.job_id),\n log_position.GetFilterLowerBound(),\n log_position.GetFilterUpperBound(datetime.datetime.utcnow())]\n return logging_common.FetchLogs(\n log_filter=' AND '.join(filters),\n order_by='ASC',\n limit=self.LOG_BATCH_SIZE)\n\n def CheckJobFinished(self):\n \"\"\"Returns True if the job is finished.\"\"\"\n res = resources.REGISTRY.Parse(self.job_id, collection='ml.projects.jobs')\n req = self.client.MESSAGES_MODULE.MlProjectsJobsGetRequest(\n projectsId=res.projectsId, jobsId=res.jobsId)\n resp = self.client.projects_jobs.Get(req)\n return resp.endTime is not None\n\n def YieldLogs(self):\n \"\"\"Return log messages from the given job.\n\n YieldLogs returns messages from the given job, in time order. If the job\n is still running when we finish printing all the logs that exist, we will\n go into a polling loop where we check periodically for new logs. On the\n other hand, if the job has ended, YieldLogs will raise StopException when\n there are no more logs to display.\n\n The log message storage system is optimized for throughput, not for\n immediate, in-order visibility. It may take several seconds for a new log\n message to become visible. To work around this limitation, we refrain from\n printing out log messages which are newer than 5 seconds old.\n\n Log messages are sorted by timestamp. Log messages with the same timestamp\n are further sorted by their unique insertId. By using the combination of\n timestamp and insertId, we can ensure that each query returns a set of log\n messages that we haven't seen before.\n\n Yields:\n A dictionary containing the fields of the log message.\n \"\"\"\n\n periods_without_progress = 0\n last_progress_time = datetime.datetime.utcnow()\n while True:\n log_retriever = self.GetLogs(self.log_position)\n made_progress = False\n while True:\n try:\n log_entry = log_retriever.next()\n except StopIteration:\n break\n if not self.log_position.Update(log_entry.timestamp,\n log_entry.insertId):\n continue\n made_progress = True\n last_progress_time = datetime.datetime.utcnow()\n multiline_log_dict = self.EntryToDict(log_entry)\n if self.allow_multiline_logs:\n yield multiline_log_dict\n else:\n message_lines = multiline_log_dict['message'].splitlines()\n if not message_lines:\n message_lines = ['']\n for message_line in message_lines:\n single_line_dict = copy.deepcopy(multiline_log_dict)\n single_line_dict['message'] = message_line\n yield single_line_dict\n if made_progress:\n periods_without_progress = 0\n else:\n # If our last log query was the second in a row to make no progress,\n # and the last progress was more than 5 seconds ago, check the job\n # status to make sure that it's still running.\n # If it is not, terminate the stream-logs command.\n periods_without_progress += 1\n if periods_without_progress > 1:\n if last_progress_time + datetime.timedelta(\n seconds=5) <= datetime.datetime.utcnow():\n if self.CheckJobFinished():\n raise StopIteration\n time.sleep(self.polling_interval)\n\n def EntryToDict(self, log_entry):\n \"\"\"Convert a log entry to a dictionary.\"\"\"\n output = {}\n output['severity'] = log_entry.severity.name\n output['timestamp'] = log_entry.timestamp\n label_attributes = self.GetLabelAttributes(log_entry)\n output['task_name'] = label_attributes['task_name']\n if 'trial_id' in label_attributes:\n output['trial_id'] = label_attributes['trial_id']\n output['message'] = ''\n if log_entry.jsonPayload is not None:\n json_data = ToDict(log_entry.jsonPayload)\n # 'message' contains a free-text message that we want to pull out of the\n # JSON.\n if 'message' in json_data:\n if json_data['message']:\n output['message'] += json_data['message']\n del json_data['message']\n # Don't put 'levelname' in the JSON, since it duplicates the\n # information in log_entry.severity.name\n if 'levelname' in json_data:\n del json_data['levelname']\n output['json'] = json_data\n elif log_entry.textPayload is not None:\n output['message'] += str(log_entry.textPayload)\n elif log_entry.protoPayload is not None:\n output['json'] = encoding.MessageToDict(log_entry.protoPayload)\n return output\n\n def GetLabelAttributes(self, log_entry):\n \"\"\"Read the label attributes of the given log entry.\"\"\"\n label_attributes = {'task_name': 'unknown_task'}\n if not hasattr(log_entry, 'labels'):\n return label_attributes\n labels = ToDict(log_entry.labels)\n if labels.get('ml.googleapis.com/task_name') is not None:\n label_attributes['task_name'] = labels['ml.googleapis.com/task_name']\n if labels.get('ml.googleapis.com/trial_id') is not None:\n label_attributes['trial_id'] = labels['ml.googleapis.com/trial_id']\n return label_attributes\n\n\ndef ToDict(message):\n if not message:\n return {}\n if isinstance(message, dict):\n return message\n else:\n return encoding.MessageToDict(message)\n\n\ndef List():\n client = apis.GetClientInstance('ml', 'v1beta1')\n msgs = apis.GetMessagesModule('ml', 'v1beta1')\n req = msgs.MlProjectsJobsListRequest(\n projectsId=properties.VALUES.core.project.Get())\n return list_pager.YieldFromList(\n client.projects_jobs, req, field='jobs', batch_size_attribute='pageSize')\n\n\ndef Create(job):\n client = apis.GetClientInstance('ml', 'v1beta1')\n msgs = apis.GetMessagesModule('ml', 'v1beta1')\n req = msgs.MlProjectsJobsCreateRequest(\n projectsId=properties.VALUES.core.project.Get(),\n googleCloudMlV1beta1Job=job)\n resp = client.projects_jobs.Create(req)\n return resp\n\n\ndef BuildTrainingJob(path=None,\n module_name=None,\n job_name=None,\n trainer_uri=None,\n region=None,\n user_args=None):\n \"\"\"Builds a GoogleCloudMlV1beta1Job from a config file and/or flag values.\n\n Args:\n path: path to a yaml configuration file\n module_name: value to set for moduleName field (overrides yaml file)\n job_name: value to set for jobName field (overrides yaml file)\n trainer_uri: List of values to set for trainerUri field (overrides yaml\n file)\n region: compute region in which to run the job (overrides yaml file)\n user_args: [str]. A list of arguments to pass through to the job.\n (overrides yaml file)\n Returns:\n A constructed GoogleCloudMlV1beta1Job object.\n \"\"\"\n msgs = apis.GetMessagesModule('ml', 'v1beta1')\n request_class = msgs.GoogleCloudMlV1beta1Job\n obj = request_class()\n if path:\n with files.Context(open(path)) as config_file:\n data = yaml.load(config_file)\n if data:\n obj = encoding.DictToMessage(data, request_class)\n if not obj.trainingInput:\n obj.trainingInput = msgs.GoogleCloudMlV1beta1TrainingInput()\n if module_name:\n obj.trainingInput.pythonModule = module_name\n if user_args:\n obj.trainingInput.args = user_args\n if job_name:\n obj.jobId = job_name\n if trainer_uri:\n obj.trainingInput.packageUris = trainer_uri\n if region:\n obj.trainingInput.region = region\n return obj\n\n\ndef BuildBatchPredictionJob(job_name=None,\n model_name=None,\n version_name=None,\n input_paths=None,\n data_format=None,\n output_path=None,\n region=None):\n \"\"\"Builds a GoogleCloudMlV1beta1Job for batch prediction from flag values.\n\n Args:\n job_name: value to set for jobName field\n model_name: value to set for modelName field\n version_name: value to set for versionName field\n input_paths: list of input files\n data_format: format of the input files\n output_path: single value for the output location\n region: compute region in which to run the job\n Returns:\n A constructed GoogleCloudMlV1beta1Job object.\n \"\"\"\n msgs = apis.GetMessagesModule('ml', 'v1beta1')\n request_class = msgs.GoogleCloudMlV1beta1Job\n obj = request_class()\n obj.predictionInput = msgs.GoogleCloudMlV1beta1PredictionInput()\n\n obj.jobId = job_name\n project_id = properties.VALUES.core.project.Get()\n if version_name:\n # pylint: disable=g-backslash-continuation\n obj.predictionInput.versionName = 'projects/{0}/models/{1}/versions/{2}'. \\\n format(project_id, model_name, version_name)\n else:\n # pylint: disable=g-backslash-continuation\n obj.predictionInput.modelName = \\\n 'projects/{0}/models/{1}'.format(project_id, model_name)\n obj.predictionInput.inputPaths = input_paths\n data_format_dict = {'TEXT': msgs.GoogleCloudMlV1beta1PredictionInput.\n DataFormatValueValuesEnum.TEXT,\n 'TF_RECORD': msgs.GoogleCloudMlV1beta1PredictionInput.\n DataFormatValueValuesEnum.TF_RECORD}\n obj.predictionInput.dataFormat = data_format_dict[data_format]\n obj.predictionInput.outputPath = output_path\n obj.predictionInput.region = region\n return obj\n", "sub_path": "files/home/gcloud/google-cloud-sdk/lib/googlecloudsdk/api_lib/ml/jobs.py", "file_name": "jobs.py", "file_ext": "py", "file_size_in_byte": 13867, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "googlecloudsdk.core.apis.GetClientInstance", "line_number": 32, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.apis", "line_number": 32, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.resources.REGISTRY.Parse", "line_number": 33, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.resources.REGISTRY", "line_number": 33, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.core.resources", "line_number": 33, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.apis.GetClientInstance", "line_number": 41, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.apis", "line_number": 41, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.resources.REGISTRY.Parse", "line_number": 42, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.resources.REGISTRY", "line_number": 42, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.core.resources", "line_number": 42, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.util.times.ParseDateTime", "line_number": 112, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.util.times", "line_number": 112, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 114, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.util.times.FormatDateTime", "line_number": 116, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.util.times", "line_number": 116, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.apis.GetClientInstance", "line_number": 129, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.apis", "line_number": 129, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 136, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.api_lib.logging.common.FetchLogs", "line_number": 137, "usage_type": "call"}, {"api_name": "googlecloudsdk.api_lib.logging.common", "line_number": 137, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.resources.REGISTRY.Parse", "line_number": 144, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.resources.REGISTRY", "line_number": 144, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.core.resources", "line_number": 144, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 174, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 174, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 187, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 187, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 196, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 208, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 209, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 209, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 212, "usage_type": "call"}, {"api_name": "apitools.base.py.encoding.MessageToDict", "line_number": 240, "usage_type": "call"}, {"api_name": "apitools.base.py.encoding", "line_number": 240, "usage_type": "name"}, {"api_name": "apitools.base.py.encoding.MessageToDict", "line_number": 262, "usage_type": "call"}, {"api_name": "apitools.base.py.encoding", "line_number": 262, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.apis.GetClientInstance", "line_number": 266, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.apis", "line_number": 266, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.apis.GetMessagesModule", "line_number": 267, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.apis", "line_number": 267, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.properties.VALUES.core.project.Get", "line_number": 269, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.properties.VALUES", "line_number": 269, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.core.properties", "line_number": 269, "usage_type": "name"}, {"api_name": "apitools.base.py.list_pager.YieldFromList", "line_number": 270, "usage_type": "call"}, {"api_name": "apitools.base.py.list_pager", "line_number": 270, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.apis.GetClientInstance", "line_number": 275, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.apis", "line_number": 275, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.apis.GetMessagesModule", "line_number": 276, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.apis", "line_number": 276, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.properties.VALUES.core.project.Get", "line_number": 278, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.properties.VALUES", "line_number": 278, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.core.properties", "line_number": 278, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.apis.GetMessagesModule", "line_number": 304, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.apis", "line_number": 304, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.util.files.Context", "line_number": 308, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.util.files", "line_number": 308, "usage_type": "name"}, {"api_name": "yaml.load", "line_number": 309, "usage_type": "call"}, {"api_name": "apitools.base.py.encoding.DictToMessage", "line_number": 311, "usage_type": "call"}, {"api_name": "apitools.base.py.encoding", "line_number": 311, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.apis.GetMessagesModule", "line_number": 347, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.apis", "line_number": 347, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.properties.VALUES.core.project.Get", "line_number": 353, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.properties.VALUES", "line_number": 353, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.core.properties", "line_number": 353, "usage_type": "name"}]} +{"seq_id": "558239964", "text": "from django.conf import settings\r\nfrom django.conf.urls.static import static\r\nfrom django.conf.urls import url\r\nfrom django.contrib import admin\r\nfrom dapp.views import *\r\n\r\nurlpatterns = [\r\n url(r'^admin/', admin.site.urls),\r\n url(r'^$', home, name='home'),\r\n url(r'delete/(\\d+)/$', delete, name='del'),\r\n url(r'edit/(\\d+)/$', edit, name='edit'),\r\n url(r'profile1/(\\d+)/$', profile1, name='profile1'),\r\n url(r'^search/$', search, name='search'),\r\n url(r'^friends/$', friends, name='friend'),\r\n url(r'^profile/$', profile, name='profile'),\r\n url(r'^password/$', password, name='pass'),\r\n url(r'^register/$', register, name='register'),\r\n url(r'^check/$', check, name='check'),\r\n url(r'^index/$', index,name='index'),\r\n url(r'^logout/$', logout, name='logout'),\r\n\r\n]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\r\n", "sub_path": "dk_loginn/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 869, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 22, "usage_type": "attribute"}]} +{"seq_id": "268530997", "text": "# 导入库\nimport requests\nimport parsel\nimport os\nfrom multiprocessing.dummy import Pool\n\n\nclass JinyongSpider(object):\n\n def __init__(self):\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'\n }\n # 保存子页面的url\n self.titles = []\n self.chapter_links = []\n\n # 创建一个文件夹\n if not os.path.exists('../鹿鼎记'):\n os.mkdir('../鹿鼎记')\n\n\n def parse_home_page(self, url='http://jinyong.zuopinj.com/3/'):\n # 发起请求\n response = requests.get(url, headers=self.headers)\n # 修改编码\n response.encoding = response.apparent_encoding\n # 解析数据\n selector = parsel.Selector(response.text)\n # 获取数据\n title = selector.xpath('//div[@class=\"book_list\"]/ul/li/a/@title').extract()\n chapter_link = selector.xpath('//div[@class=\"book_list\"]/ul/li/a/@href').extract()\n\n # 追加数据\n self.titles.extend(title)\n self.chapter_links.extend(chapter_link)\n\n\n def parse_detail_page(self, zip_list):\n print(f'正在爬取{zip_list[0]}章节的小说!')\n\n # 发起请求\n response = requests.get(url=zip_list[1])\n # 修改编码\n response.encoding = response.apparent_encoding\n # 解析数据\n selector = parsel.Selector(response.text)\n # 获取数据\n noval_text = selector.xpath('//div[@id=\"htmlContent\"]//text()').extract()\n noval_text = '\\n'.join(noval_text)\n\n # 写出数据\n with open(f'../鹿鼎记/{zip_list[0]}.txt', 'w', encoding='utf-8') as fp:\n print(f'正在写入{zip_list[0]}章')\n fp.write(noval_text)\n fp.close()\n print('写入完毕,关闭文件!')\n\n\n def multiprocees_function(self):\n # 实例化线程,一个进程开启多个线程\n pool = Pool(10)\n zip_list = list(zip(self.titles, self.chapter_links))\n # map操作(将zip_list中的每一个列表元素map到get_video_data的函数中,parse_detail_page这个函数接收的是列表元素)\n pool.map(self.parse_detail_page, zip_list)\n # 关闭线程池\n pool.close()\n # 主线程等待子线程结束之后再结束\n pool.join()\n\n\nif __name__ == '__main__':\n # 实例化对象\n jinyongspider = JinyongSpider()\n # 先获取章节页面链接\n jinyongspider.parse_home_page(url='http://jinyong.zuopinj.com/3/')\n # 通过线程池运行爬虫\n jinyongspider.multiprocees_function()", "sub_path": "python/jinyongSpider.py", "file_name": "jinyongSpider.py", "file_ext": "py", "file_size_in_byte": 2658, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "parsel.Selector", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "parsel.Selector", "line_number": 47, "usage_type": "call"}, {"api_name": "multiprocessing.dummy.Pool", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "161464967", "text": "import sys\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom dust_blorentz_ode import streamline\n\nfigfile = sys.argv[0].replace('.py', '.pdf')\n\n# Impact parameter\nY0 = 0.00\nZ0 = 0.00\nthB_degrees = 25\nMACH_ALFVEN = 4.0\nstream = streamline(Y0=Y0, Z0=Z0, thB=np.radians(thB_degrees), tstop=150, X0=10., n=2001, LFAC=10, ALPHA_DRAG=0.5, V_TURB_0=1.0/MACH_ALFVEN)\nsns.set_style('white')\nsns.set_color_codes()\nfig, (ax, axp) = plt.subplots(2, 1, figsize=(4, 6))\nax.plot(stream['t'], stream['u'], label='$U$')\nax.plot(stream['t'], stream['v'], label='$V$')\nax.plot(stream['t'], stream['w'], label='$W$')\nax.plot(stream['t'], stream['x'], label='$X$')\nax.plot(stream['t'], stream['y'], label='$Y$')\nax.plot(stream['t'], stream['z'], label='$Z$')\nax.axhspan(0.0, 1.0, color='k', alpha=0.1)\nlabel = fr\"$\\theta_B = {thB_degrees:.1f}^\\circ$, \"\nlabel += f\"$y_0 = {Y0:.2f}$, $z_0 = {Z0:.2f}$\"\nax.legend(title=label, ncol=2)\nax.set(xlabel='Time', ylim=[-2, 3], xlim=[0, 50])\n\naxp.plot(stream['x'], stream['u'], label='$(X, U)$')\naxp.plot(stream['y'], stream['v'], label='$(Y, V)$')\naxp.plot(stream['z'], stream['w'], label='$(Z, W)$')\naxp.axhline(0, color='k', lw=0.5)\naxp.legend(title='Phase space')\naxp.set(xlabel='$X$, $Y$, $Z$', ylabel='$U$, $V$, $W$',\n xlim=[-3, 3], ylim=[-1.5, 1.5])\n\nsns.despine(trim=True)\nfig.tight_layout()\nfig.savefig(figfile)\nprint(figfile, end='')\n", "sub_path": "Dust-wave/dust-lorentz-3d.py", "file_name": "dust-lorentz-3d.py", "file_ext": "py", "file_size_in_byte": 1402, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "dust_blorentz_ode.streamline", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 14, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 15, "usage_type": "call"}, {"api_name": "seaborn.set_color_codes", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "seaborn.despine", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "461835402", "text": "# -*- coding: utf-8 -*-\n'''\nCreated on Aug 12, 2017\n\n@author: tom\n'''\nfrom __future__ import absolute_import, division, unicode_literals, print_function\nimport logging\nimport random\nimport os\nimport ujson\nfrom second_hand_cars.dataset.data_collection.crawler_status import CrawlerStatus\n\n\ndef main():\n crawler_status_folder = '/home/tom/workspace/SecondHandCars/data/meta'\n meta_folder = '/home/tom/workspace/SecondHandCars/data/meta'\n train_test_ratio = 4.\n\n random.seed(3)\n\n status = CrawlerStatus(crawler_status_folder)\n status.load()\n\n num_samples = status.size\n trainset_size = int(float(num_samples) * train_test_ratio / (train_test_ratio + 1))\n sample_pairs = status.samples.items()\n random.shuffle(sample_pairs)\n trainset = dict(sample_pairs[:trainset_size])\n testset = dict(sample_pairs[trainset_size:])\n\n trainset_path = os.path.join(meta_folder, 'trainset.json')\n with open(trainset_path, 'w') as f:\n ujson.dump(trainset, f, indent=4)\n\n testset_path = os.path.join(meta_folder, 'testset.json')\n with open(testset_path, 'w') as f:\n ujson.dump(testset, f, indent=4)\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n main()\n\n", "sub_path": "second_hand_cars/dataset/dataset_generation/dataset_generator.py", "file_name": "dataset_generator.py", "file_ext": "py", "file_size_in_byte": 1204, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "random.seed", "line_number": 20, "usage_type": "call"}, {"api_name": "second_hand_cars.dataset.data_collection.crawler_status.CrawlerStatus", "line_number": 22, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "ujson.dump", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "ujson.dump", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "486190154", "text": "from pathlib import Path\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nimport fiftyone\nfrom PIL import Image\n\nNUM_WORKERS = 4\n\n\nclass TensorImgSet(Dataset):\n \"\"\"TensorDataset with support of transforms.\n \"\"\"\n\n def __init__(self, tensors, transform=None):\n self.imgs = tensors[0]\n self.targets = tensors[1]\n self.tensors = tensors\n self.transform = transform\n self.len = len(self.imgs)\n\n def __getitem__(self, index):\n x = self.imgs[index]\n if self.transform:\n x = self.transform(x)\n y = self.targets[index]\n return x, y\n\n def __len__(self):\n return self.len\n\nclass FiftyOneTorchDataset(Dataset):\n \"\"\"A class to construct a PyTorch dataset from a FiftyOne dataset.\n \n Args:\n fiftyone_dataset: a FiftyOne dataset or view that will be used for \n training or testing\n transforms (None): a list of PyTorch transforms to apply to images \n and targets when loading\n gt_field (\"ground_truth\"): the name of the field in fiftyone_dataset \n that contains the desired labels to load\n classes (None): a list of class strings that are used to define the \n mapping between class names and indices. If None, it will use \n all classes present in the given fiftyone_dataset.\n \"\"\"\n\n def __init__(\n self,\n fiftyone_dataset,\n transforms=None,\n gt_field=\"positive_labels\",\n classes=None,\n ):\n self.samples = fiftyone_dataset\n self.transforms = transforms\n self.gt_field = gt_field\n\n self.img_paths = self.samples.values(\"filepath\")\n\n self.classes = classes\n if not self.classes:\n # Get list of distinct labels that exist in the view\n self.classes = self.samples.default_classes\n\n self.labels_map_rev = {c: i for i, c in enumerate(self.classes)}\n \n def __getitem__(self, idx):\n img_path = self.img_paths[idx]\n sample = self.samples[img_path]\n img = Image.open(img_path).convert(\"RGB\")\n\n try:\n labels = sample[self.gt_field].classifications\n print(f'{len(labels)}')\n except:\n labels = sample['negative_labels'].classifications\n print(f'only negative {len(labels)}')\n \n label = labels[0].label\n target = self.labels_map_rev[label]\n\n if self.transforms is not None:\n img = self.transforms(img)\n\n return img, target\n\n def __len__(self):\n return len(self.img_paths)\n\n def get_classes(self):\n return self.classes\n\ndef get_open_images(num_classes=100, dataset_dir=\"./data\", batch_size=128):\n\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(\n (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])\n\n fo_trainset = fiftyone.zoo.load_zoo_dataset(\"open-images-v7\", split=\"train\", label_types=[\"classifications\"],max_samples=100)\n trainset = FiftyOneTorchDataset(fo_trainset,train_transform)\n\n train_loader = torch.utils.data.DataLoader(trainset,\n batch_size=batch_size,\n num_workers=NUM_WORKERS,\n pin_memory=True, shuffle=True)\n\n fo_testset = fiftyone.zoo.load_zoo_dataset(\"open-images-v7\", split=\"validation\", label_types=[\"classifications\"],max_samples=100)\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(\n (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ])\n testset = FiftyOneTorchDataset(fo_testset,test_transform)\n test_loader = torch.utils.data.DataLoader(testset,\n batch_size=batch_size,\n num_workers=NUM_WORKERS,\n pin_memory=True, shuffle=False)\n \n return train_loader, test_loader\n\ndef load_cifar_10_1():\n # @article{recht2018cifar10.1,\n # author = {Benjamin Recht and Rebecca Roelofs and Ludwig Schmidt\n # and Vaishaal Shankar},\n # title = {Do CIFAR-10 Classifiers Generalize to CIFAR-10?},\n # year = {2018},\n # note = {\\url{https://arxiv.org/abs/1806.00451}},\n # }\n # Original Repo: https://github.com/modestyachts/CIFAR-10.1\n data_path = Path(__file__).parent.joinpath(\"cifar10_1\")\n label_filename = data_path.joinpath(\"v6_labels.npy\").resolve()\n imagedata_filename = data_path.joinpath(\"v6_data.npy\").resolve()\n print(f\"Loading labels from file {label_filename}\")\n labels = np.load(label_filename)\n print(f\"Loading image data from file {imagedata_filename}\")\n imagedata = np.load(imagedata_filename)\n return imagedata, torch.Tensor(labels).long()\n\n\ndef get_cifar(num_classes=100, dataset_dir=\"./data\", batch_size=128,\n use_cifar_10_1=False):\n\n if num_classes == 10:\n print(\"Loading CIFAR10...\")\n dataset = torchvision.datasets.CIFAR10\n normalize = transforms.Normalize(\n (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n else:\n print(\"Loading CIFAR100...\")\n dataset = torchvision.datasets.CIFAR100\n normalize = transforms.Normalize(\n mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])\n\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n\n trainset = dataset(root=dataset_dir, train=True,\n download=True, transform=train_transform)\n\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n\n # Use the normal cifar 10 testset or a new one to test true generalization\n if use_cifar_10_1 and num_classes == 10:\n imagedata, labels = load_cifar_10_1()\n testset = TensorImgSet((imagedata, labels), transform=test_transform)\n else:\n testset = dataset(root=dataset_dir, train=False,\n download=True,\n transform=test_transform)\n\n train_loader = torch.utils.data.DataLoader(trainset,\n batch_size=batch_size,\n num_workers=NUM_WORKERS,\n pin_memory=True, shuffle=True)\n test_loader = torch.utils.data.DataLoader(testset,\n batch_size=batch_size,\n num_workers=NUM_WORKERS,\n pin_memory=True, shuffle=False)\n return train_loader, test_loader\n", "sub_path": "data_loader.py", "file_name": "data_loader.py", "file_ext": "py", "file_size_in_byte": 6984, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 35, "usage_type": "name"}, {"api_name": "torchvision.transforms", "line_number": 58, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 73, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 73, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 98, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 98, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 99, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 99, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 100, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 100, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 101, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 101, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 102, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 102, "usage_type": "name"}, {"api_name": "fiftyone.zoo.load_zoo_dataset", "line_number": 106, "usage_type": "call"}, {"api_name": "fiftyone.zoo", "line_number": 106, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 109, "usage_type": "attribute"}, {"api_name": "fiftyone.zoo.load_zoo_dataset", "line_number": 114, "usage_type": "call"}, {"api_name": "fiftyone.zoo", "line_number": 114, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 115, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 115, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 116, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 116, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 117, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 144, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 152, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 153, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 153, "usage_type": "name"}, {"api_name": "torchvision.datasets", "line_number": 157, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 158, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 158, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 161, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 161, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 162, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 162, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 163, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 163, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 164, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 164, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 171, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 171, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 172, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 185, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 189, "usage_type": "attribute"}]} +{"seq_id": "600930981", "text": "#!/usr/bin/env python\n\nimport copy\nimport json\nimport os\n\n__all__ = ['ValueBag']\n\ndef realvalue(k, v):\n if k == k.upper() and isinstance(v, (str, unicode)):\n v = os.path.expanduser(v)\n v = os.path.expandvars(v)\n \n return v\n\nclass xdict(dict):\n def __init__(self, d = {}, **kwargs):\n dict.__init__(self)\n \n if d:\n self.update(d, **kwargs)\n \n def __str__(self):\n return dict.__str__(self.toDict()) if self else ''\n \n def __unicode__(self):\n return self.__str__()\n \n #def __repr__(self):\n # return '%s(%s)' % (self.__class__.__name__, dict.__repr__(self))\n \n def __getitem__(self, k):\n if isinstance(k, str) and '.' in k:\n k, rest = k.split('.', 1)\n return self[k][rest]\n \n try:\n return dict.__getitem__(self, k)\n except KeyError as e:\n try:\n return object.__getattribute__(self, k)\n except AttributeError:\n raise e\n #v = self[k] = type(self)()\n #return v\n \n def __getattr__(self, k):\n try:\n return object.__getattribute__(self, k)\n except AttributeError as e:\n if k.startswith('__'):\n raise e\n try:\n return self[k]\n except KeyError:\n raise e\n \n def get(self, k, default = None):\n try:\n return self[k]\n except:\n return default\n \n def __setitem__(self, k, v):\n if isinstance(k, str) and '.' in k:\n k, rest = k.split('.', 1)\n try:\n child = self[k]\n except KeyError:\n self[k] = child = type(self)()\n child[rest] = v\n else:\n if isinstance(v, dict) and not isinstance(v, xdict):\n v = xdict(v)\n else:\n v = realvalue(k, v)\n dict.__setitem__(self, k, v)\n \n def __setattr__(self, k, v):\n try:\n object.__getattribute__(self, k)\n except AttributeError:\n self[k] = v\n else:\n object.__setattr__(self, k, realvalue(k, v))\n \n def update(self, E, **F):\n for k in F:\n self[k] = F[k]\n \n if hasattr(E, 'keys'):\n for k in E:\n self[k] = E[k]\n else:\n for (k, v) in E:\n self[k] = v\n\n def __copy__(self):\n return xdict(super(xdict, self).copy())\n \n def __deepcopy__(self, memo = None):\n return xdict(copy.deepcopy(super(xdict, self), memo))\n \n def __contains__(self, k):\n try:\n return self.has_key(k) or hasattr(self, k)\n except:\n return False\n \n def has_key(self, k):\n if isinstance(k, str) and '.' in k:\n k, rest = k.split('.', 1)\n if dict.has_key(self, k):\n child = self[k]\n if not isinstance(child, dict):\n return False\n return child.has_key(rest)\n else:\n return False\n \n return dict.has_key(self, k)\n \n def __delattr__(self, k):\n if isinstance(k, (str, unicode)) and '.' in k:\n k, rest = k.split('.', 1)\n child = self[k]\n xdict.__delattr__(child, rest)\n else:\n try:\n object.__getattribute__(self, k)\n except AttributeError:\n try:\n del self[k]\n except KeyError:\n raise AttributeError(k)\n else:\n object.__delattr__(self, k)\n \n def toDict(self):\n return to_dict(self)\n \n @staticmethod\n def fromDict(d):\n return xdict(d)\n \ndef to_dict(x):\n if isinstance(x, xdict):\n return dict((k, to_dict(v)) for k, v in x.items())\n elif isinstance(x, (tuple, list)):\n return type(x)(to_dict(v) for v in x)\n else:\n return x\n \ntry:\n try:\n import json\n except ImportError:\n import simplejson as json\n \n class _JSONEncoder(json.encoder.JSONEncoder):\n def default(self, o):\n return str(o)\n \n def toJSON(self, **kwargs):\n return json.dumps(self, cls = _JSONEncoder, **kwargs)\n\n def fromJSON(s, **kwargs):\n return xdict(json.loads(s))\n \n xdict.toJSON = toJSON\n xdict.fromJSON = staticmethod(fromJSON)\n \n \nexcept ImportError:\n pass\n\n# class ValueBag(dict):\n# def __init__(self, d = {}):\n# super(ValueBag, self).__init__()\n# \n# self.update(d)\n# \n# def __setattr__(self, name, value):\n# if name.startswith('__'):\n# self.__dict__[name] = value\n# else:\n# self[name] = value\n# \n# def __setitem__(self, name, value):\n# if '.' in name:\n# names = name.split('.')\n# s = self\n# for name in names[:-1]:\n# s = s[name]\n# name = names[-1]\n# else:\n# s = self\n# \n# if name == name.upper():\n# if isinstance(value, (str, unicode)):\n# value = os.path.expanduser(value)\n# value = os.path.expandvars(value)\n# \n# if isinstance(value, dict):\n# value = ValueBag(value)\n# \n# super(ValueBag, s).__setitem__(name, value)\n# \n# def __getattr__(self, name):\n# return self[name]\n# \n# def __getitem__(self, name):\n# if '.' in name:\n# s = self\n# for name in name.split('.'):\n# s = s[name]\n# return s\n# \n# \n# if self.has_key(name):\n# return super(ValueBag, self).__getitem__(name)\n# \n# if not name.startswith('__'):\n# setattr(self, name, {})\n# return super(ValueBag, self).__getitem__(name)\n# \n# def __copy__(self):\n# return ValueBag(super(ValueBag, self).copy())\n# \n# def __deepcopy__(self, memo = None):\n# return ValueBag(copy.deepcopy(dict(self), memo))\n# \n# def update(self, E, **F):\n# for k in F:\n# self[k] = F[k]\n# \n# if hasattr(E, 'keys'):\n# for k in E:\n# self[k] = E[k]\n# else:\n# for (k, v) in E:\n# self[k] = v\n# \n# def get(self, key, default = None):\n# ret = self[key]\n# return ret if ret else default\n# \n# def has_key(self, key):\n# if '.' in key:\n# keys = key.split('.')\n# s = self\n# for key in keys:\n# if not isinstance(s, dict) or not super(ValueBag, s).has_key(key):\n# return False\n# s = s[key]\n# return True\n# \n# return super(ValueBag, self).has_key(key)\n\nValueBag = xdict\n\nclass _JSONEncoder(json.encoder.JSONEncoder):\n def default(self, o):\n return str(o)\n \ndef dumps(value, indent = 4):\n return json.dumps(value, indent = indent, cls = _JSONEncoder)\n\ndef dump(value, fp, indent = 4):\n fp.write(dumps(value, indent = indent))\n\ndef loads(s):\n return ValueBag(json.loads(s))\n\ndef load(fp):\n return ValueBag(json.load(fp))\n \nif __name__ == '__main__':\n import unittest\n \n class ValueBagTest(unittest.TestCase):\n def setUp(self):\n pass\n \n def tearDown(self):\n pass\n \n def test_init(self):\n obj = ValueBag({'a':{'b':{'c':1, 'd':2}}})\n self.assertTrue(isinstance(obj.a, ValueBag))\n self.assertTrue(isinstance(obj.a.b, ValueBag))\n self.assertTrue(isinstance(obj.a.b.c, int))\n #self.assertEqual(obj.a.b.root(), obj)\n \n def test_setattr(self):\n obj = ValueBag()\n obj.abc = 'abc'\n obj.ABC = '~/abc'\n obj.__abc = 'abc'\n self.assertEqual('abc', obj.abc)\n self.assertNotEquals('~/abc', obj.ABC)\n \n obj.dict = {'a':1, 'rec': {'z': 2}}\n self.assertEqual(obj.dict.a, 1)\n self.assertEqual(obj.dict.rec.z, 2)\n \n \n def test_recursive(self):\n obj = ValueBag()\n setattr(obj, \"a.b.c\", 1)\n self.assertEqual(obj.a.b.c, 1)\n self.assertEqual(obj['a.b.c'], 1)\n self.assertEqual(obj.get('a.b.c'), 1)\n\n obj['foo.bar'] = 2\n self.assertEqual(obj.foo.bar, 2)\n self.assertEqual(obj['foo.bar'], 2)\n self.assertEqual(obj.get('foo.bar'), 2)\n \n def test_has_key(self):\n obj = ValueBag()\n setattr(obj, \"a.b.c\", 1)\n\n self.assertEqual(1, obj.a.b.c)\n self.assertTrue(obj.has_key('a'))\n self.assertTrue(obj.has_key('a.b'))\n self.assertTrue(obj.has_key('a.b.c'))\n self.assertFalse(obj.has_key('d'))\n self.assertFalse(obj.has_key('d.d'))\n \n unittest.main()\n \n", "sub_path": "Pyscript/utils/valuebag.py", "file_name": "valuebag.py", "file_ext": "py", "file_size_in_byte": 9203, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.expanduser", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.expandvars", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 102, "usage_type": "call"}, {"api_name": "simplejson.encoder", "line_number": 160, "usage_type": "attribute"}, {"api_name": "simplejson.dumps", "line_number": 165, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 168, "usage_type": "call"}, {"api_name": "simplejson.encoder", "line_number": 262, "usage_type": "attribute"}, {"api_name": "simplejson.dumps", "line_number": 267, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 273, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 276, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 281, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 331, "usage_type": "call"}]} +{"seq_id": "16586838", "text": "'''\nSI 507 F19, homework 11: Basic SQL statements - EC 2\nDeveloped by Gui Ruggiero\n'''\n\nimport sqlite3 as sqlite\nimport sys\n\ninitial_shipcity = sys.argv[1]\nchars_employee_name = sys.argv[2]\n\nconn = sqlite.connect(\"Northwind_small.sqlite\")\ncur = conn.cursor()\n\nprint(\"\\nSearched for\")\nprint(\"1) ShipCity starts with:\", initial_shipcity)\nprint(\"2) The number of characters in employee's first name:\", chars_employee_name)\n\nstatement = \"SELECT COUNT([Order].Id) \"\nstatement += \"FROM [Order] \"\nstatement += \"JOIN Employee ON [Order].EmployeeId = Employee.Id \"\nstatement += \"WHERE ShipCity LIKE '\" + initial_shipcity + \"%' \"\nstatement += \"AND LENGTH(FirstName) = \" + str(chars_employee_name)\ncur.execute(statement)\n\nfor row in cur:\n orders = row[0]\nprint(\"The number of orders:\", orders)\n\nprint(\"\\n*Reference: the\", orders, \"orders captured by the query are as follows.\")\n\nstatement = \"SELECT ShipCity, FirstName \"\nstatement += \"FROM [Order] \"\nstatement += \"JOIN Employee ON [Order].EmployeeId = Employee.Id \"\nstatement += \"WHERE ShipCity LIKE '\" + initial_shipcity + \"%' \"\nstatement += \"AND LENGTH(FirstName) = \" + str(chars_employee_name)\ncur.execute(statement)\n\nfor row in cur:\n print(row)\n\nprint(\"\\n\")\n\nconn.close()", "sub_path": "week11/hw10_ec2.py", "file_name": "hw10_ec2.py", "file_ext": "py", "file_size_in_byte": 1220, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "573698553", "text": "from fastapi import APIRouter, Response, status\nfrom pydantic import BaseModel\nfrom io import StringIO\nfrom app.db import get_conn\nimport psycopg2\nimport traceback\nimport requests\nimport csv\nimport json\n\n\nurl = \"https://covid.ourworldindata.org/data/owid-covid-data.csv\"\n\n\nclass Payload(BaseModel):\n action: str\n\n\nclass Args(BaseModel):\n payload: Payload\n\n\nrefresh_owid = APIRouter()\n\n\n@refresh_owid.post(\"/\", response_model=None)\nasync def handle_refresh_request(args: Args, response: Response) -> None:\n assert args.payload.action == \"refresh\"\n\n r = requests.get(url)\n r.raise_for_status()\n f = StringIO(r.text)\n\n # iso_code\n # continent\n # location\n # date\n # total_cases\n # new_cases\n # new_cases_smoothed\n # total_deaths\n # new_deaths\n # new_deaths_smoothed\n # total_cases_per_million\n # new_cases_per_million\n # new_cases_smoothed_per_million\n # total_deaths_per_million\n # new_deaths_per_million\n # new_deaths_smoothed_per_million\n # reproduction_rate\n # icu_patients\n # icu_patients_per_million\n # hosp_patients\n # hosp_patients_per_million\n # weekly_icu_admissions\n # weekly_icu_admissions_per_million\n # weekly_hosp_admissions\n # weekly_hosp_admissions_per_million\n # new_tests\n # total_tests\n # total_tests_per_thousand\n # new_tests_per_thousand\n # new_tests_smoothed\n # new_tests_smoothed_per_thousand\n # positive_rate\n # tests_per_case\n # tests_units\n # total_vaccinations\n # new_vaccinations\n # total_vaccinations_per_hundred\n # new_vaccinations_per_million\n # stringency_index\n # population\n # population_density\n # median_age\n # aged_65_older\n # aged_70_older\n # gdp_per_capita\n # extreme_poverty\n # cardiovasc_death_rate\n # diabetes_prevalence\n # female_smokers\n # male_smokers\n # handwashing_facilities\n # hospital_beds_per_thousand\n # life_expectancy\n # human_development_index\n\n lookup = {}\n\n csv_reader = csv.DictReader(f)\n for row in csv_reader:\n\n iso_code = row[\"iso_code\"]\n vaccinations = row[\"total_vaccinations\"]\n hospitalisations = row[\"hosp_patients\"]\n # icu_patients = row[\"icu_patients\"]\n # total_tests = row[\"total_tests\"]\n # positive_rate = row[\"positive_rate\"]\n\n if not vaccinations and not hospitalisations:\n continue\n\n if iso_code not in lookup:\n lookup[iso_code] = []\n\n entry = {\n \"date\": row[\"date\"]\n }\n\n if vaccinations:\n entry[\"vaccinations\"] = vaccinations\n\n if hospitalisations:\n entry[\"hospitalisations\"] = hospitalisations\n\n # if icu_patients:\n # entry[\"icu_patients\"] = icu_patients\n #\n # if total_tests:\n # entry[\"total_tests\"] = total_tests\n #\n # if positive_rate:\n # entry[\"positive_rate\"] = positive_rate\n\n lookup[iso_code].append(entry)\n\n # Connect to database.\n conn = None\n try:\n conn = get_conn()\n cur = conn.cursor()\n\n for iso_code in lookup:\n time_series = lookup[iso_code]\n\n # Split the time-series for two db fields.\n vaccinations_time_series = []\n hospitalisations_time_series = []\n for item in time_series:\n if \"vaccinations\" in item:\n vaccinations_time_series.append({\n \"date\": item[\"date\"],\n \"count\": int(float(item[\"vaccinations\"]))\n })\n if \"hospitalisations\" in item:\n hospitalisations_time_series.append({\n \"date\": item[\"date\"],\n \"count\": int(float(item[\"hospitalisations\"]))\n })\n\n # Update vaccinations.\n if len(vaccinations_time_series) > 0:\n cur.execute(\"\"\"\n UPDATE country SET\n covid_vaccinations = %s,\n covid_vaccinations_time_series = %s\n WHERE iso_alpha3 = %s\"\"\", (vaccinations_time_series[-1][\"count\"],\n json.dumps(vaccinations_time_series),\n iso_code))\n\n # Update hospitalisations.\n if len(hospitalisations_time_series) > 0:\n cur.execute(\"\"\"\n UPDATE country SET\n covid_hospitalisations = %s,\n covid_hospitalisations_time_series = %s\n WHERE iso_alpha3 = %s\"\"\", (int(hospitalisations_time_series[-1][\"count\"]),\n json.dumps(hospitalisations_time_series),\n iso_code))\n\n conn.commit()\n cur.close()\n return None\n\n except psycopg2.DatabaseError:\n print(f\"EXCEPTION\\n{traceback.format_exc()}\")\n\n except:\n print(f\"EXCEPTION\\n{traceback.format_exc()}\")\n\n finally:\n if conn is not None:\n conn.close()\n\n response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n return None\n", "sub_path": "app/routes/refresh_owid.py", "file_name": "refresh_owid.py", "file_ext": "py", "file_size_in_byte": 5218, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pydantic.BaseModel", "line_number": 15, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 19, "usage_type": "name"}, {"api_name": "fastapi.APIRouter", "line_number": 23, "usage_type": "call"}, {"api_name": "fastapi.Response", "line_number": 27, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 32, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 91, "usage_type": "call"}, {"api_name": "app.db.get_conn", "line_number": 131, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 159, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 169, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 176, "usage_type": "attribute"}, {"api_name": "traceback.format_exc", "line_number": 177, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 180, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 186, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 186, "usage_type": "name"}]} +{"seq_id": "33995800", "text": "\"\"\"\nModels for saving EventData queries\n\"\"\"\nimport json, jsonfield\nimport decimal\nimport hashlib\nfrom collections import OrderedDict\nfrom datetime import datetime\n\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.utils.text import slugify\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.utils.timesince import timesince as timesince_\n\nfrom model_utils.models import TimeStampedModel\n\nfrom tworaven_apps.raven_auth.models import User\nfrom tworaven_apps.utils.basic_response import (ok_resp,\n err_resp,\n err_resp_with_data)\n\n\nIN_PROCESS = u'PENDING'\nERROR = u'FAILURE'\nCOMPLETE = u'SUCCESS'\nSTATUS_STATES = (IN_PROCESS, ERROR, COMPLETE)\nSUBSET = u'subset'\nAGGREGATE = u'aggregate'\nTYPE_OPTIONS = (SUBSET, AGGREGATE)\nTYPE_CHOICES = [(x, x) for x in TYPE_OPTIONS]\nSTATUS_CHOICES = [(x, x) for x in STATUS_STATES]\nMETHOD_CHOICES = (u'find', u'aggregate', u'count') # the valid mongodb collection methods\nEXPORT_CHOICES = (u'csv', u'dataset')\nHOST_CHOICES = (u'TwoRavens', 'UTDallas')\nDATA_PARTITIONS = (u'TEST', u'TRAIN')\n\n# Create your models here.\nSEARCH_KEY_NAME = u'name'\nSEARCH_KEY_DESCRIPTION = u'description'\n#SEARCH_KEY_USERNAME = u'username'\n\nSEARCH_PARAMETERS = (SEARCH_KEY_NAME,\n SEARCH_KEY_DESCRIPTION,) # USERNAME)\n\n\nclass MongoDataset(TimeStampedModel):\n name = models.CharField(primary_key=True, blank=False, max_length=128)\n loading = models.BooleanField(blank=True, default=True)\n\n class Meta:\n ordering = ['created']\n\n\nclass EventDataSavedQuery(TimeStampedModel):\n \"\"\" Model to store queries\"\"\"\n name = models.CharField(blank=False,\n max_length=255)\n\n description = models.TextField(blank=True)\n\n user = models.ForeignKey(User,\n db_index=True,\n on_delete=models.PROTECT)\n\n query = jsonfield.JSONField(blank=False,\n load_kwargs=dict(object_pairs_hook=OrderedDict))\n\n result_count = models.IntegerField(default=-1)\n\n collection_type = models.CharField(blank=False,\n max_length=255,\n choices=TYPE_CHOICES,\n default=SUBSET)\n\n collection_name = models.CharField(blank=False,\n max_length=255,\n default=\"mongo dataset\")\n\n save_to_dataverse = models.BooleanField(blank=True,\n default=False)\n\n\n hash_id = models.CharField(help_text='(auto-generated)',\n max_length=255,\n blank=True)\n\n\n class Meta:\n \"\"\"order by creation date\"\"\"\n ordering = ('-created',)\n verbose_name_plural = 'Event data saved queries'\n\n def __str__(self):\n \"\"\"object repr\"\"\"\n # query_str = json.dumps(self.query, indent=4)\n return self.name\n\n def get_query_id(self):\n \"\"\"return id\"\"\"\n if self.id:\n return self.id\n\n return None\n\n\n def save(self, *args, **kwargs):\n \"\"\"For any auto-created fields\"\"\"\n if not self.id:\n super(EventDataSavedQuery, self).save(*args, **kwargs)\n\n if not self.hash_id:\n hash_str = '%s %s' % (self.id, self.created)\n self.hash_id = hashlib.sha224(hash_str.encode('utf-8')).hexdigest()\n\n super(EventDataSavedQuery, self).save(*args, **kwargs)\n\n def as_dict(self):\n \"\"\"return info dict\"\"\"\n od = OrderedDict()\n\n for attr_name in self.__dict__.keys():\n\n # check for attributes to skip...\n if attr_name.startswith('_') or attr_name == 'query':\n continue\n\n val = self.__dict__[attr_name]\n if isinstance(val, models.fields.files.FieldFile):\n # this is a file field...\n #\n val = str(val) # file path or empty string\n if not val:\n val = None\n od[attr_name] = val\n else:\n od[attr_name] = val\n\n od['query'] = self.query\n\n return od\n\n @staticmethod\n def get_all_objects():\n \"\"\"return all objects\"\"\"\n result = EventDataSavedQuery.objects.all()\n\n if not result:\n return err_resp('could not get the object list as %s' % result)\n else:\n return ok_resp(result)\n\n @staticmethod\n def get_objects_by_id(job_id):\n \"\"\"return object by id\"\"\"\n result = EventDataSavedQuery.objects.filter(id=job_id).first()\n\n if not result:\n return err_resp('could not get the object for id %s' % job_id)\n\n else:\n return ok_resp(result)\n\n @staticmethod\n def get_field_list_for_values():\n \"\"\"List of fields used for a queryset 'values' function\"\"\"\n return ['id', 'name', 'user__username',\n 'description', 'result_count',\n 'collection_name', 'collection_type',\n 'hash_id',\n 'created', 'modified',]\n\n\n def get_filtered_objects(self, **kwargs):\n \"\"\"get all the filtered objects\"\"\"\n arguments = {}\n for k, v in kwargs.items():\n if v:\n arguments[k] = v\n\n result = EventDataSavedQuery.objects.values(\\\n *EventDataSavedQuery.get_field_list_for_values()\\\n ).filter(**arguments).all()\n\n if result.count() == 0:\n return err_resp('could not get the object for the inputs')\n\n return ok_resp(result)\n\n\n @staticmethod\n def get_query_list_for_user(user, **additional_filters):\n \"\"\" get all fields expect query\"\"\"\n if not isinstance(user, User):\n user_msg = 'A user must be specified.'\n return err_resp(user_msg)\n\n if not user.is_active:\n user_msg = 'The user is no longer active.'\n return err_resp(user_msg)\n\n orig_list = EventDataSavedQuery.objects.filter(\\\n user=user\\\n )\n if additional_filters:\n orig_list = orig_list.filter(**additional_filters)\n\n orig_list = orig_list.values(\\\n *EventDataSavedQuery.get_field_list_for_values())\n\n if orig_list.count() == 0:\n err_msg = ('No saved queries found.')\n return err_resp(err_msg)\n\n # Format the list including adding a detail url\n #\n fmt_list = []\n for item in orig_list:\n item['username'] = item['user__username']\n del item['user__username']\n item['detail_url'] = reverse('api_retrieve_event_data_query',\n kwargs=dict(query_id=item['id']))\n fmt_list.append(item)\n\n final_results = OrderedDict()\n final_results['count'] = len(fmt_list)\n final_results['query_list'] = fmt_list\n\n return ok_resp(final_results)\n\n\n def queries_to_dataverse(self):\n \"\"\" get list of all the queries to be saved to dataverse\"\"\"\n result = EventDataSavedQuery.objects.filter(save_to_dataverse=True)\n\n if result.count() == 0:\n user_msg = ('No EventDataSavedQuery objects found that'\n ' have been saved to Dataverse')\n return err_resp(user_msg)\n\n return ok_resp(result)\n\n\n\nclass ArchiveQueryJob(TimeStampedModel):\n \"\"\"archive query job\"\"\"\n datafile_id = models.IntegerField(\\\n \"Datverse file id\",\n default=-1,\n unique=True)\n\n saved_query = models.ForeignKey(EventDataSavedQuery,\n on_delete=models.PROTECT)\n\n status = models.CharField(max_length=100,\n choices=STATUS_CHOICES,\n default=IN_PROCESS)\n\n is_finished = models.BooleanField(default=False)\n\n is_success = models.BooleanField(default=False)\n\n message = models.TextField(default=None)\n\n dataverse_response = jsonfield.JSONField(\\\n blank=True,\n load_kwargs=dict(object_pairs_hook=OrderedDict))\n\n archive_url = models.URLField(blank=True)\n\n hash_id = models.CharField(help_text='(auto-generated)',\n max_length=255,\n blank=True)\n\n class Meta:\n ordering = ('-created',)\n\n def __str__(self):\n return '%s' % self.saved_query\n\n def get_archive_id(self):\n \"\"\"return id\"\"\"\n if self.id:\n return self.id\n\n return None\n\n def get_datafile_id(self):\n \"\"\"return datafile id\"\"\"\n\n if self.datafile_id:\n return self.datafile_id\n\n return None\n\n def save(self, *args, **kwargs):\n\n if not self.id:\n super(ArchiveQueryJob, self).save(*args, **kwargs)\n\n if not self.hash_id:\n hash_str = '%s %s' % (self.id, self.created)\n self.hash_id = hashlib.sha224(hash_str.encode('utf-8')).hexdigest()\n\n super(ArchiveQueryJob, self).save(*args, **kwargs)\n\n def as_dict(self):\n \"\"\"return info dict\"\"\"\n od = OrderedDict()\n\n for attr_name in self.__dict__.keys():\n\n # check for attributes to skip...\n if attr_name.startswith('_'):\n continue\n\n val = self.__dict__[attr_name]\n if isinstance(val, models.fields.files.FieldFile):\n # this is a file field...\n #\n val = str(val) # file path or empty string\n if not val:\n val = None\n od[attr_name] = val\n else:\n od[attr_name] = val\n\n\n return od\n\n @staticmethod\n def get_all_objects():\n \"\"\"return all objects\"\"\"\n result = ArchiveQueryJob.objects.all()\n\n if result.count() == 0:\n user_msg = 'No ArchiveQueryJob objects found in the database.'\n return err_resp(user_msg)\n\n return ok_resp(result)\n\n @staticmethod\n def get_objects_by_id(datafile_id):\n \"\"\"return object by id\"\"\"\n result = ArchiveQueryJob.objects.filter(datafile_id=datafile_id).first()\n\n if not result:\n user_msg = 'No ArchiveQueryJob for Datafile id: %s' % datafile_id\n return err_resp(user_msg)\n\n return ok_resp(result)\n\n\n def get_filtered_objects(self, **kwargs):\n \"\"\"get all the filtered objects\"\"\"\n arguments = {}\n for k, v in kwargs.items():\n if v:\n arguments[k] = v\n\n result = ArchiveQueryJob.objects.filter(**arguments).all()\n\n if result.count() == 0:\n user_msg = 'No ArchiveQueryJob objects found for this query'\n return err_resp(user_msg)\n\n else:\n return ok_resp(result)\n\n\nclass UserNotification(TimeStampedModel):\n \"\"\"\"it is to store all the notifications sent to user\"\"\"\n\n recipient = models.ForeignKey(User,\n on_delete=models.CASCADE)\n\n unread = models.BooleanField(default=True,\n db_index=True)\n\n emailed = models.BooleanField(default=False, db_index=True)\n\n message = models.TextField()\n\n is_read = models.BooleanField(default=False)\n\n archived_query = jsonfield.JSONField(\\\n blank=False,\n load_kwargs=dict(object_pairs_hook=OrderedDict))\n\n hash_id = models.CharField(help_text='(auto-generated)',\n max_length=255,\n blank=True)\n class Meta:\n ordering = ('-created',)\n\n def __str__(self):\n return '%s' % self.user.username\n\n\n def timesince(self, current_time=None):\n \"\"\"\n src: https://github.com/django-notifications/django-notifications/blob/master/notifications/models.py\n Shortcut for the ``django.utils.timesince.timesince`` function of the\n current timestamp.\n \"\"\"\n if now is None:\n current_time = datetime.now()\n return timesince_(self.created, current_time)\n\n\n def save(self, *args, **kwargs):\n \"\"\"Create auto-populated fields\"\"\"\n if not self.id:\n super(UserNotification, self).save(*args, **kwargs)\n\n if not self.hash_id:\n hash_str = '%s %s' % (self.id, self.created)\n self.hash_id = hashlib.sha224(hash_str.encode('utf-8')).hexdigest()\n\n super(UserNotification, self).save(*args, **kwargs)\n\n def as_dict(self):\n \"\"\"convert into orederd dict\"\"\"\n\n od = OrderedDict()\n\n for attr_name in self.__dict__.keys():\n\n # check for attributes to skip...\n if attr_name.startswith('_'):\n continue\n\n val = self.__dict__[attr_name]\n if isinstance(val, models.fields.files.FieldFile):\n # this is a file field...\n #\n val = str(val) # file path or empty string\n if not val:\n val = None\n od[attr_name] = val\n else:\n od[attr_name] = val\n\n return od\n\n @staticmethod\n def get_all_objects():\n \"\"\"return all objects\"\"\"\n result = UserNotification.objects.all()\n\n if result.count() == 0:\n return err_resp('No UserNotification results found')\n else:\n return ok_resp(result)\n\n\n @staticmethod\n def get_objects_by_id(user_key):\n \"\"\"return object by id\"\"\"\n result = UserNotification.objects.filter(user=user_key).all()\n\n if not result:\n return err_resp('could not get the object for id %s' % user_key)\n\n else:\n return ok_resp(result)\n", "sub_path": "tworaven_apps/eventdata_queries/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 13995, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "model_utils.models.TimeStampedModel", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "model_utils.models.TimeStampedModel", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 64, "usage_type": "call"}, {"api_name": "tworaven_apps.raven_auth.models.User", "line_number": 64, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "jsonfield.JSONField", "line_number": 68, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 71, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 82, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 86, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 86, "usage_type": "name"}, {"api_name": "hashlib.sha224", "line_number": 116, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 122, "usage_type": "call"}, {"api_name": "django.db.models.fields", "line_number": 131, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 131, "usage_type": "name"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 151, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.ok_resp", "line_number": 153, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 161, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.ok_resp", "line_number": 164, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 188, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.ok_resp", "line_number": 190, "usage_type": "call"}, {"api_name": "tworaven_apps.raven_auth.models.User", "line_number": 196, "usage_type": "argument"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 198, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 202, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 215, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 223, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 227, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.ok_resp", "line_number": 231, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 241, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.ok_resp", "line_number": 243, "usage_type": "call"}, {"api_name": "model_utils.models.TimeStampedModel", "line_number": 247, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 249, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 249, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 254, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 254, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 255, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 255, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 257, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 257, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 261, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 261, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 263, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 263, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 265, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 265, "usage_type": "name"}, {"api_name": "jsonfield.JSONField", "line_number": 267, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 269, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 271, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 271, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 273, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 273, "usage_type": "name"}, {"api_name": "hashlib.sha224", "line_number": 305, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 311, "usage_type": "call"}, {"api_name": "django.db.models.fields", "line_number": 320, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 320, "usage_type": "name"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 340, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.ok_resp", "line_number": 342, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 351, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.ok_resp", "line_number": 353, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 367, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.ok_resp", "line_number": 370, "usage_type": "call"}, {"api_name": "model_utils.models.TimeStampedModel", "line_number": 373, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 376, "usage_type": "call"}, {"api_name": "tworaven_apps.raven_auth.models.User", "line_number": 376, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 376, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 377, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 377, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 379, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 379, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 382, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 382, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 384, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 384, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 386, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 386, "usage_type": "name"}, {"api_name": "jsonfield.JSONField", "line_number": 388, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 390, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 392, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 392, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 409, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 409, "usage_type": "name"}, {"api_name": "django.utils.timesince.timesince", "line_number": 410, "usage_type": "call"}, {"api_name": "hashlib.sha224", "line_number": 420, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 427, "usage_type": "call"}, {"api_name": "django.db.models.fields", "line_number": 436, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 436, "usage_type": "name"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 454, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.ok_resp", "line_number": 456, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.err_resp", "line_number": 465, "usage_type": "call"}, {"api_name": "tworaven_apps.utils.basic_response.ok_resp", "line_number": 468, "usage_type": "call"}]} +{"seq_id": "536695470", "text": "import base64\nimport hashlib\nimport hmac\nimport simplejson\nimport time\n\nLOGORA_SECRET_KEY = '123456'\n\ndef get_logora_sso(user):\n # create a JSON packet of our data attributes\n data = simplejson.dumps({\n 'uid': user['uid'],\n 'first_name': user['first_name'],\n 'last_name': user['last_name'],\n 'email': user['email'],\n })\n # encode the data to base64\n message = base64.b64encode(data)\n # generate a timestamp for signing the message\n timestamp = int(time.time())\n # generate our hmac signature\n sig = hmac.HMAC(LOGORA_SECRET_KEY, '%s %s' % (message, timestamp), hashlib.sha1).hexdigest()\n\n# return a script tag to insert the sso message\n return \"\"\"\"\"\" % dict(\n message=message,\n timestamp=timestamp,\n sig=sig,\n )\n", "sub_path": "python/sso.py", "file_name": "sso.py", "file_ext": "py", "file_size_in_byte": 928, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "simplejson.dumps", "line_number": 11, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 20, "usage_type": "call"}, {"api_name": "hmac.HMAC", "line_number": 22, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 22, "usage_type": "attribute"}]} +{"seq_id": "237380473", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# A simple function that plots the percentage of NA values in each column in a horizontal bar plot\ndef plotNAs(data, figsize=(15,15), color='red'):\n \n try:\n num_nulls = data.isna().sum()\n nulls_pct = pd.DataFrame({'NA_pct': [(x / 1460) * 100 for x in num_nulls], 'colnames': data.columns.values})\n nulls_pct.sort_values(by='NA_pct', inplace=True)\n except TypeError:\n print(\"TypeError: Input must be a pandas DataFrame.\")\n return\n \n ax, fig = plt.subplots(figsize=figsize)\n sns.barplot(data=nulls_pct, x='NA_pct', y='colnames', color='red')\n plt.title(\"Plot of the percentage of NA values in each column\")\n plt.xlabel(\"NA Percentage\")\n plt.ylabel(\"Feature\")\n\n\n \n\n\n\n", "sub_path": "Kaggle/utilities/.ipynb_checkpoints/functions-checkpoint.py", "file_name": "functions-checkpoint.py", "file_ext": "py", "file_size_in_byte": 824, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pandas.DataFrame", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "296268491", "text": "from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^$',views.Accueil,name=\"Accueil\"),\n url(r'^Connection$',views.ConnectionV,name=\"Connection\"),\n url(r'^Inscription/(?P\\d+)$',views.InscriptionV,name=\"Inscription\"),\n url(r'^VerifMail$', views.VerificationEmailV, name='VerifMail'),\n url(r'^Deconnection$',views.DeconnectionV,name=\"deconnection\"),\n url(r'validationEmail/(?P\\d+)$',views.ValidationCompte,name=\"validation\"),\n url(r'ProfilEtudiant$',views.DashbordAdmin,name=\"dashboard\"),\n url(r'monprofiletu$',views.monProfiletuV,name=\"monprofil\"),\n url(r'dashboardetu$',views.DashBoardEtu,name=\"dashetu\"),\n url(r'conseils_notif$',views.GestionNotificationV),\n url(r'recapprofil/(?P\\d+)$', views.RecapV),\n url(r'mescompetences$',views.CompetenceV,name=\"Competence\")\n\n]", "sub_path": "Projet/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 880, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "521226980", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport sqlite3 as lite\nimport config as c\nimport cherrypy\n\nint_none = None\n\n\nclass Easy:\n \"\"\"\n SQLite class.\n \"\"\"\n\n def __init__(self):\n self._conn = None\n connect = None\n try:\n try:\n connect = cherrypy.thread_data.cursor\n except AttributeError:\n cherrypy.log('Cant get cursor in Easy __init__')\n try:\n cherrypy.thread_data.cursor = lite.connect(c.DATABASEPATH)\n connect = cherrypy.thread_data.cursor\n except AttributeError:\n cherrypy.log('Can\\'t get cursor second time!')\n\n #TODO Restore Database func\n\n cur = connect.cursor()\n # Creating all tables if they not exist already.\n with connect:\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS PiFaceDevices\n (Id INTEGER PRIMARY KEY NOT NULL ,\n Pin INT NOT NULL ,\n TPin INT,\n Room INT,\n Status INT,\n Name TEXT NOT NULL,\n Type TEXT NOT NULL,\n Time INT );\"\"\")\n\n cur.execute('CREATE TABLE IF NOT EXISTS Rooms('\n 'Id INTEGER PRIMARY KEY NOT NULL,'\n 'Name TEXT NOT NULL)')\n\n cur.execute('CREATE TABLE IF NOT EXISTS Users('\n 'Id INTEGER PRIMARY KEY NOT NULL,'\n 'Name TEXT NOT NULL UNIQUE,'\n 'Login TEXT NOT NULL UNIQUE,'\n 'Password TEXT NOT NULL,'\n 'Group_ INT NOT NULL)')\n\n cur.execute('CREATE TABLE IF NOT EXISTS Activity('\n 'Id INTEGER PRIMARY KEY NOT NULL ,'\n 'Event TEXT,'\n 'Type INT,'\n 'A_Id INT)')\n\n cur.execute('CREATE TABLE IF NOT EXISTS ChildrensRooms('\n 'Id INTEGER PRIMARY KEY NOT NULL ,'\n 'Room_Id INT UNIQUE)')\n\n if not self.get_users_admin():\n meta = [('Aq', 'admin', 'admin', c.ADMIN), ]\n cur.execute('INSERT INTO Users VALUES '\n '(NULL, ?, ?, ?, ?)', *meta)\n\n except lite.OperationalError:\n cherrypy.log(\"Problems with database tables init().\")\n\n def add_dev(self, pin, name, dev_type, tpin=None, room=None, status=228, time=None):\n \"\"\"Add new device, need following's.\n\n :param pin: Pin at PiFace. Not NULL\n :type pin: int\n :param tpin: Target pin used by some devices.\n :type tpin: int\n :param room: Current position of device.\n :type room: int\n :param status: Current status of device. DO NOT change, it's for id.\n :type status: int\n :param name: Device name. Not NULL\n :type name: str\n :param dev_type: Device type. Not NULL\n :type dev_type: str\n :param time: Device time.\n :type time: int\n \"\"\"\n # Check for existing such room.\n if room and not self.check_room(room):\n room = None # Just make it None\n self._conn = cherrypy.thread_data.cursor\n meta = [(pin, tpin, room, status, name, dev_type, time), ]\n try:\n with self._conn:\n self._conn.execute('INSERT INTO PiFaceDevices VALUES '\n '(NULL, ?, ?, ?, ?, ?, ?, ?)', *meta)\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return None\n return self.get_last_id()\n\n def get_last_id(self):\n \"\"\"Using for get last row id...\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n c = self._conn.execute('SELECT Id FROM PiFaceDevices WHERE Status=228')\n newid = int(c.fetchone()[0])\n self.change_status(None, newid)\n return newid\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return None\n\n def del_dev(self, dev_id):\n \"\"\"Delete row from a table.\n\n :param dev_id: id of device in table\n :type dev_id: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute('DELETE FROM PiFaceDevices WHERE Id=?', (dev_id,))\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n return True\n\n def select_all_device(self):\n \"\"\"Get all devices.\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT * FROM PiFaceDevices')\n return c.fetchall()\n except lite.OperationalError:\n cherrypy.log('', traceback=True)\n return False\n\n def select_device_with_type(self, type):\n \"\"\"Select all device with specified type.\n\n :param type: specified dev type\n :type type: str\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT * FROM PiFaceDevices WHERE Type=?', (type,))\n return c.fetchall()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def select_specified_device(self, id):\n \"\"\"Select specified device.\n\n :param id: device id\n :type id: int\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT * FROM PiFaceDevices WHERE Id=?', (id,))\n return c.fetchone()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def select_all_rooms(self):\n \"\"\"Select all rooms\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT * FROM Rooms')\n return c.fetchall()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def select_specified_room(self, id):\n \"\"\"Select specified room.\n\n :param id: room id\n :type id: int\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT * FROM Rooms WHERE Id=?', (id,))\n return c.fetchone()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def _get_all_rooms_id(self):\n \"\"\"\n Returning all rooms ID.\n :return: dict\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n c = self._conn.execute('SELECT Id From Rooms')\n return c.fetchall()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def add_room(self, name):\n \"\"\"Add new Room\n\n :param name: Room name\n :type name: str\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute('INSERT INTO Rooms VALUES (NULL, ?)', (name,))\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n return True\n\n def del_room(self, id):\n \"\"\"Delete row from a table\n\n :param id: id of room in table\n :type id: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute('DELETE FROM Rooms WHERE Id=?', (id,))\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n return True\n\n def check_room(self, id):\n \"\"\"Check Room for existing\n\n :param id: room id\n :type id: int\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n return True if self._conn.execute('SELECT * FROM Rooms WHERE Id=?',(id,)).fetchone() or id == int_none else False\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def select_all_dev_from_room(self, id):\n \"\"\"Select all devices from a specified room\n\n :param id: Room id\n :type id: int\n \"\"\"\n try:\n if self.check_room(id):\n self._conn = cherrypy.thread_data.cursor\n if id == int_none:\n return self._conn.execute('SELECT * FROM PiFaceDevices '\n 'WHERE Room ISNULL').fetchall()\n else:\n return self._conn.execute('SELECT * FROM PiFaceDevices '\n 'WHERE Room=?', (id,)).fetchall()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def change_pin(self, pin, id):\n \"\"\"Change device pin\n\n :param pin: Device pin\n :type pin: int\n :param id: Device id\n :type id: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute(\"UPDATE PiFaceDevices set Pin=? WHERE Id==?\", (pin, id,))\n return self.select_specified_device(id)[1] == pin # If dev pin change, return True\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def change_tpin(self, tpin, id):\n \"\"\"Change device pin\n\n :param tpin: Device tpin\n :type tpin: int\n :param id: Device id\n :type id: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute(\"UPDATE PiFaceDevices set TPin=? WHERE Id==?\", (tpin, id,))\n return self.select_specified_device(id)[2] == tpin # If dev tpin change, return True\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def change_room(self, room_num, id):\n \"\"\"Change device room\n\n :param room_num: New Dev Room if int_none - out of any room\n :type room_num: int\n :param id: Device id\n :type id: int\n \"\"\"\n if not self.check_room(room_num):\n return False\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute(\"UPDATE PiFaceDevices set Room=? WHERE Id==?\", (room_num, id,))\n return self.select_specified_device(id)[3] == room_num # If dev room change, return True\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def change_status(self, status, id):\n \"\"\"Change device status\n\n :param status: Device status\n :type status: int\n :param id: Device id\n :type id: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute(\"UPDATE PiFaceDevices set Status=? WHERE Id==?\", (status, id,))\n return self.select_specified_device(id)[4] == status # If dev status change, return True\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def change_name(self, name, id):\n \"\"\"Change device name\n\n :param name: Device new name\n :type name: str\n :param id: Device id\n :type id: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute(\"UPDATE PiFaceDevices set Name=? WHERE Id==?\", (name, id,))\n return self.select_specified_device(id)[5] == name # If dev name change, return True\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def change_time(self, time, devid):\n \"\"\"Change device pin\n\n :param time: Device time\n :type time: int\n :param devid: Device id\n :type devid: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute(\"UPDATE PiFaceDevices set Time=? WHERE Id==?\", (time, devid,))\n return self.select_specified_device(devid)[7] == time # If dev status change, return True\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def rename_room(self, name, id):\n \"\"\"Change room name\n\n :param name: Room new name\n :type name: str\n :param id: Room id\n :type id: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute(\"UPDATE Rooms set Name=? WHERE Id==?\", (name, id,))\n return self.select_specified_room(id)[1] == name # If room name change, return True\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def create_user(self, name, login, password, group):\n \"\"\"Create new user\n\n :param name: user name\n :type name: str\n :param login: user login\n :type login: str\n :param password: user password\n :type password: str\n :param group: user permissions group\n :type group: int\n \"\"\"\n if login.lower() == 'admin':\n return None\n self._conn = cherrypy.thread_data.cursor\n group = int(group)\n meta = [(name, login, password, group), ]\n try:\n with self._conn:\n self._conn.execute('INSERT INTO Users VALUES '\n '(NULL, ?, ?, ?, ?)', *meta)\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return None\n\n def get_user(self, user_id):\n \"\"\"Get user info by user id\n\n :param user_id: user id in Users table\n :type user_id: int\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT Name, Group_ FROM Users WHERE Id=?', (user_id,))\n return c.fetchone()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def get_users(self):\n \"\"\"Get all users\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT Name, Group_ FROM Users')\n return c.fetchall()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def get_users_admin(self):\n \"\"\"Get all users with group admin.\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n r = self._conn.execute('SELECT Name, Group_ FROM Users WHERE Group_=?', (c.ADMIN,))\n return r.fetchall()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def get_users_id(self):\n \"\"\"Get all users with id\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT Id, Name, Group_ FROM Users')\n return c.fetchall()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def get_user_gropup(self, user_id):\n \"\"\"Get user group.\n\n :param user_id: user id.\n :type user_id: id\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT Group_ FROM Users WHERE Id=?', (user_id,))\n return c.fetchone()[0] # Get first [*,] list.\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def get_user_password(self, user_id):\n \"\"\"Get user password (only for private use)\n\n :param user_id: user id\n :type user_id: int\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT Name, Group_ FROM Users WHERE Id=?', (user_id,))\n return c.fetchone()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def change_user_name(self, user_id, user_name):\n \"\"\"Change user name\n\n :param user_id: user id in Users table\n :type user_id: int\n :param user_name: new user name\n :type user_name: str\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute(\"UPDATE Users set Name=? WHERE Id==?\", (user_name, user_id,))\n return self.get_user(user_id)[0] == user_name # If user name change, return True\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def change_user_password(self, user_id, user_password):\n \"\"\"Change user name\n\n :param user_id: user id in Users table\n :type user_id: int\n :param user_password: new user password\n :type user_password: str\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute(\"UPDATE Users set Password=? WHERE Id==?\", (user_password, user_id,))\n return self.get_user_password(user_id)[0] == user_password # If user password change, return True\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def change_user_group(self, user_id, user_group):\n \"\"\"Change user group\n\n :param user_id: user id in Users table\n :type user_id: int\n :param user_group: new user group\n :type user_group: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute(\"UPDATE Users set Group_=? WHERE Id==?\", (user_group, user_id,))\n return self.get_user(user_id)[1] == user_group # If user name group_, return True\n except (lite.DatabaseError, TypeError):\n cherrypy.log('', traceback=True)\n return False\n\n def check_login_pass(self, login, password):\n \"\"\"Return True if Login and password exist, False otherwise\n\n :param login: User login\n :type login: str\n :param password: User Password\n :type password: str\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT Id FROM Users WHERE Login=? AND Password=?', (login, password,))\n tmp = c.fetchone()\n if tmp:\n return tmp[0]\n else:\n return False\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def delete_user(self, user_id):\n \"\"\"Delete user\n\n :param user_id: user id to delete\n :type user_id: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n cur = self._conn.execute('DELETE FROM Users WHERE Id=?', (user_id,))\n if not self.get_users_admin():\n meta = [('Aq', 'admin', 'admin', c.ADMIN), ]\n cur.execute('INSERT INTO Users VALUES '\n '(NULL, ?, ?, ?, ?)', *meta)\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n return True\n\n def cr_add_room(self, room_id):\n \"\"\"Open new room for Child\n\n :param room_id: new room for child\n :type room_id: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute('INSERT INTO ChildrensRooms VALUES (NULL, ?)', (room_id,))\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n return True\n\n def cr_get_all_rooms(self):\n \"\"\"Get all rooms opened for child\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT * FROM ChildrensRooms')\n return c.fetchall()\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def cr_check_room(self, room_id):\n \"\"\"Check room for child.\n\n :param room_id: rooms id, which will be checked\n :type room_id: int\n \"\"\"\n try:\n self._conn = cherrypy.thread_data.cursor\n c = self._conn.execute('SELECT * FROM ChildrensRooms WHERE Room_Id=?', (room_id,))\n return True if c.fetchone() else False\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n\n def cr_delet_room(self, room_id):\n \"\"\"Delete room from opened for child\n\n :param room_id: rooms id, which will be deleted\n :type room_id: int\n \"\"\"\n self._conn = cherrypy.thread_data.cursor\n try:\n with self._conn:\n self._conn.execute('DELETE FROM ChildrensRooms WHERE Room_Id=?', (room_id,))\n except lite.DatabaseError:\n cherrypy.log('', traceback=True)\n return False\n return True\n", "sub_path": "sqlite.py", "file_name": "sqlite.py", "file_ext": "py", "file_size_in_byte": 21792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "cherrypy.thread_data", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 22, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "config.DATABASEPATH", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cherrypy.thread_data", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 27, "usage_type": "call"}, {"api_name": "config.ADMIN", "line_number": 66, "usage_type": "attribute"}, {"api_name": "sqlite3.OperationalError", "line_number": 70, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 71, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 100, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 101, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 108, "usage_type": "attribute"}, {"api_name": "config.fetchone", "line_number": 112, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 115, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 116, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 125, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 129, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 130, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 138, "usage_type": "attribute"}, {"api_name": "config.fetchall", "line_number": 140, "usage_type": "call"}, {"api_name": "sqlite3.OperationalError", "line_number": 141, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 142, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 152, "usage_type": "attribute"}, {"api_name": "config.fetchall", "line_number": 154, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 155, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 156, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 166, "usage_type": "attribute"}, {"api_name": "config.fetchone", "line_number": 168, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 169, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 170, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 177, "usage_type": "attribute"}, {"api_name": "config.fetchall", "line_number": 179, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 180, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 181, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 191, "usage_type": "attribute"}, {"api_name": "config.fetchone", "line_number": 193, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 194, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 195, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 203, "usage_type": "attribute"}, {"api_name": "config.fetchall", "line_number": 207, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 208, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 209, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 218, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 222, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 223, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 233, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 237, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 238, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 249, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 251, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 252, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 263, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 270, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 271, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 282, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 287, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 288, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 299, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 304, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 305, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 318, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 323, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 324, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 335, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 340, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 341, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 352, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 357, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 358, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 369, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 374, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 375, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 386, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 391, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 392, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 409, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 416, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 417, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 427, "usage_type": "attribute"}, {"api_name": "config.fetchone", "line_number": 429, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 430, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 431, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 438, "usage_type": "attribute"}, {"api_name": "config.fetchall", "line_number": 440, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 441, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 442, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 449, "usage_type": "attribute"}, {"api_name": "config.ADMIN", "line_number": 450, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 452, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 453, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 460, "usage_type": "attribute"}, {"api_name": "config.fetchall", "line_number": 462, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 463, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 464, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 474, "usage_type": "attribute"}, {"api_name": "config.fetchone", "line_number": 476, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 477, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 478, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 488, "usage_type": "attribute"}, {"api_name": "config.fetchone", "line_number": 490, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 491, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 492, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 503, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 508, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 509, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 520, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 525, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 526, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 537, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 542, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 543, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 555, "usage_type": "attribute"}, {"api_name": "config.fetchone", "line_number": 557, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 562, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 563, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 572, "usage_type": "attribute"}, {"api_name": "config.ADMIN", "line_number": 577, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 580, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 581, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 591, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 595, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 596, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 604, "usage_type": "attribute"}, {"api_name": "config.fetchall", "line_number": 606, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 607, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 608, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 618, "usage_type": "attribute"}, {"api_name": "config.fetchone", "line_number": 620, "usage_type": "call"}, {"api_name": "sqlite3.DatabaseError", "line_number": 621, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 622, "usage_type": "call"}, {"api_name": "cherrypy.thread_data", "line_number": 631, "usage_type": "attribute"}, {"api_name": "sqlite3.DatabaseError", "line_number": 635, "usage_type": "attribute"}, {"api_name": "cherrypy.log", "line_number": 636, "usage_type": "call"}]} +{"seq_id": "198912136", "text": "\nfrom markdown import markdown\nfrom itertools import count\nfrom django.utils.safestring import mark_safe\nfrom django.forms.renderers import get_default_renderer\n\n\nclass InfoButton(object):\n \"\"\"\n Clickable icon which opens reveal window showing info text\n\n Displays icon. Hovering show tooltip, clicking on will open \"reveal\" window\n showing info text (markdown supported).\n \"\"\"\n\n template_name = 'widgets/info_button.html'\n counter = count()\n\n def __init__(\n self,\n text='',\n tooltip='',\n is_markdown=False,\n ionicon_type='ion-information-circled',\n ionicon_size='small',\n ionicon_color=None\n ):\n \"\"\"\n\n Parameters\n ----------\n text : str\n Info text will appear in window. If text is written in markdown,\n parameter `is_markdown` must be set to True.\n tooltip : str\n Text shown when hovering question mark icon\n is_markdown : bool\n If set, text will be rendered as markdown into html\n ionicon_type : str\n Defines type of ionicon, for v2 refer to https://ionicons.com/v2/\n ionicon_size : str\n Sets size of icon, possible values: small, medium, large, xlarge,\n xxlarge\n ionicon_color : str\n Sets color of icon in hex color code, e.g. '#ff0000'\n \"\"\"\n self.id = next(self.counter)\n self.text = markdown(text) if is_markdown else text\n self.tooltip = tooltip\n self.ionicon_type = ionicon_type\n self.ionicon_size = ionicon_size\n self.ionicon_color = ionicon_color\n\n def __str__(self):\n return self.render()\n\n def get_context(self):\n return {\n 'info_id': f'info_{self.id}',\n 'text': self.text,\n 'tooltip': self.tooltip,\n 'ionicon_type': self.ionicon_type,\n 'ionicon_size': self.ionicon_size,\n 'ionicon_color': self.ionicon_color\n }\n\n def render(self):\n \"\"\"Render the widget as an HTML string.\"\"\"\n context = self.get_context()\n return self._render(self.template_name, context)\n\n @staticmethod\n def _render(template_name, context):\n renderer = get_default_renderer()\n return mark_safe(renderer.render(template_name, context))\n", "sub_path": "utils/widgets.py", "file_name": "widgets.py", "file_ext": "py", "file_size_in_byte": 2357, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "itertools.count", "line_number": 17, "usage_type": "call"}, {"api_name": "markdown.markdown", "line_number": 48, "usage_type": "call"}, {"api_name": "django.forms.renderers.get_default_renderer", "line_number": 74, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "258167068", "text": "# This script reads in the csv file of the combined data from the DE-STRESS output,\n# normalises the data with min max scaling and reduces the dimensionality with PCA.\n\n# 0. Loading the relevant packages-------------------------------------------\n\n# Loading relevant packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn import decomposition\nfrom scipy.stats import multivariate_normal\nfrom helper_functions import *\nfrom scipy import stats\n\n\n# 1. Reading in the data and preprocessing-----------------------------------\n\n# Setting the file paths\ndestress_output = \"de-stress_output/\"\nanalysis_output = \"analysis/\"\n\n# Loading in the combined_data\ncombined_data = pd.read_csv(destress_output + \"combined_data.csv\")\n\n# Extracting the labels\ndecoy_or_native = combined_data[\"decoy or native\"]\npdb_id = combined_data[\"pdb id\"].str.slice(start=0, stop=4)\nstructure_group = combined_data[\"structure group\"].str.slice(start=0, stop=4)\n# extra_native_flag = combined_data[\"extra native flag\"]\n\n# Dividing energy values by number of residues\ncombined_data.loc[\n :,\n [\n \"hydrophobic fitness\",\n \"budeff: total\",\n \"budeff: steric\",\n \"budeff: desolvation\",\n \"budeff: charge\",\n \"evoef2: total\",\n \"evoef2: ref total\",\n \"evoef2: intraR total\",\n \"evoef2: interS total\",\n \"evoef2 - interD total\",\n \"dfire2 - total\",\n \"rosetta - total\",\n \"rosetta - fa_atr\",\n \"rosetta - fa_rep\",\n \"rosetta - fa_intra_rep\",\n \"rosetta - fa_elec\",\n \"rosetta - fa_sol\",\n \"rosetta - lk_ball_wtd\",\n \"rosetta - fa_intra_sol_xover4\",\n \"rosetta - hbond_lr_bb\",\n \"rosetta - hbond_sr_bb\",\n \"rosetta - hbond_bb_sc\",\n \"rosetta - hbond_sc\",\n \"rosetta - dslf_fa13\",\n \"rosetta - rama_prepro\",\n \"rosetta - p_aa_pp\",\n \"rosetta - fa_dun\",\n \"rosetta - omega\",\n \"rosetta - pro_close\",\n \"rosetta - yhh_planarity\",\n ],\n] = combined_data.loc[\n :,\n [\n \"hydrophobic fitness\",\n \"budeff: total\",\n \"budeff: steric\",\n \"budeff: desolvation\",\n \"budeff: charge\",\n \"evoef2: total\",\n \"evoef2: ref total\",\n \"evoef2: intraR total\",\n \"evoef2: interS total\",\n \"evoef2 - interD total\",\n \"dfire2 - total\",\n \"rosetta - total\",\n \"rosetta - fa_atr\",\n \"rosetta - fa_rep\",\n \"rosetta - fa_intra_rep\",\n \"rosetta - fa_elec\",\n \"rosetta - fa_sol\",\n \"rosetta - lk_ball_wtd\",\n \"rosetta - fa_intra_sol_xover4\",\n \"rosetta - hbond_lr_bb\",\n \"rosetta - hbond_sr_bb\",\n \"rosetta - hbond_bb_sc\",\n \"rosetta - hbond_sc\",\n \"rosetta - dslf_fa13\",\n \"rosetta - rama_prepro\",\n \"rosetta - p_aa_pp\",\n \"rosetta - fa_dun\",\n \"rosetta - omega\",\n \"rosetta - pro_close\",\n \"rosetta - yhh_planarity\",\n ],\n].div(\n combined_data[\"number of residues\"], axis=0\n)\n\n\n# Dropping some columns\ncombined_data.drop(\n [\n \"design name\",\n \"number of residues\",\n \"mass (da)\",\n \"decoy or native\",\n \"evoef2 - interD total\",\n \"rosetta - yhh_planarity\",\n \"aggrescan3d: total_value\",\n \"isoelectric point (pH)\",\n \"pdb id\",\n \"structure group\",\n \"evoef2: ref total\",\n \"rosetta - dslf_fa13\",\n ],\n axis=1,\n inplace=True,\n)\n\n\n# Dropping composition metrics as these only change across different pdb ids\n# and not natives vs decoy structures. This is important as we are interested\n# in looking at metrics that separate out decoys vs native structures. If these\n# values are included they dominate the variance in the data set as each structure\n# has a different amino acid composition.\ncombined_data = combined_data.loc[\n :, ~combined_data.columns.str.startswith(\"composition\")\n]\n\n# Saving a list of the features used\nprint(combined_data.columns)\n\n\n# 2. Histograms of the features------------------------------------------------------------------------\ncombined_data[\"rosetta - pro_close\"].plot.hist(\n grid=True, bins=50, rwidth=0.9, color=\"#87ceeb\"\n)\nplt.title(\"Histogram of the rosetta - pro_close metric values\")\nplt.xlabel(\"rosetta - pro_close\")\nplt.ylabel(\"counts\")\nplt.savefig(\n analysis_output + \"hist_rosetta_pro_close.png\",\n bbox_inches=\"tight\",\n)\nplt.close()\n\n\n# 3. Performing PCA and plotting components\n# against variance with two different scaling methods---------------------------------------------------\nn_components = [2, 3, 4, 5, 6, 7, 8, 9, 10]\nscaling_method = [\"minmax\"]\n\n# Creating a data frame to collect results\nvar_explained_df = pd.DataFrame(\n columns=[\"n_components\", \"scaling_method\", \"var_explained\"]\n)\n\n# Looping through different scaling methods and components\nfor i in n_components:\n for j in scaling_method:\n\n # Performing the scaling method\n if j == \"stand\":\n scaled_data = StandardScaler().fit_transform(combined_data)\n\n elif j == \"minmax\":\n scaled_data = MinMaxScaler(feature_range=(0, 1)).fit_transform(\n combined_data\n )\n\n # Scaling the data\n scaled_data_df = pd.DataFrame(scaled_data, columns=combined_data.columns)\n\n # Performing PCA with the specified components\n pca = decomposition.PCA(n_components=i)\n pca.fit(scaled_data_df)\n\n # Calculating the variance explained\n var_explained = np.sum(pca.explained_variance_ratio_)\n\n # Appending to the data frame\n var_explained_df = var_explained_df.append(\n {\"n_components\": i, \"scaling_method\": j, \"var_explained\": var_explained},\n ignore_index=True,\n )\n\n# Saving as a csv file\nvar_explained_df.to_csv(analysis_output + \"var_explained.csv\", index=False)\n\n# Plotting the data and saving\nvar_plot = sns.lineplot(\n x=\"n_components\",\n y=\"var_explained\",\n data=var_explained_df,\n hue=\"scaling_method\",\n)\nplt.title(\"\"\"Variance explained by number of pca components and scaling method.\"\"\")\nplt.xlabel(\"Number of components\")\nplt.legend(loc=\"upper right\", ncol=2, handletextpad=0.1)\nplt.savefig(analysis_output + \"var_explained.png\")\nplt.close()\n\n\nfig, (ax1, ax2) = plt.subplots(2, 1)\nfig.set_size_inches(6, 7)\n\ncombined_data[\"dfire2 - total\"].plot.hist(\n grid=True, bins=50, rwidth=0.9, color=\"#87ceeb\", ax=ax1\n)\nax1.set_xlabel(\"DFIRE2 - Total\")\nax1.set_ylabel(\"Counts\")\n\nvar_plot = sns.lineplot(\n x=\"n_components\",\n y=\"var_explained\",\n data=var_explained_df,\n hue=\"scaling_method\",\n ax=ax2,\n legend=False,\n)\nax2.set_xlabel(\"Number of Components\")\nax2.set_ylabel(\"Variance Explained\")\nplt.savefig(\n analysis_output + \"hist_dfire2_var_explained.png\",\n dpi=600,\n)\nplt.close()\n\n\n# 4. Performing PCA with 2 components and plotting against different labels\n\n# Normalising the data with min max transform as not all the\n# features will have a gaussian distribution\nscaled_data = MinMaxScaler(feature_range=(0, 1)).fit_transform(combined_data)\nscaled_data_df = pd.DataFrame(scaled_data, columns=combined_data.columns)\n\n# Checking normality of the features\nfor col in scaled_data_df.columns:\n scaled_data_df[col].plot.hist(grid=True, bins=50, rwidth=0.9, color=\"#87ceeb\")\n plt.title(col)\n plt.xlabel(\"col\")\n plt.ylabel(\"counts\")\n plt.savefig(\n analysis_output + \"hist_transf_\" + col.replace(\" \", \"\") + \".png\",\n bbox_inches=\"tight\",\n )\n plt.close()\n\n# Outputting the pandas data frame as a csv file\nscaled_data_df.to_csv(analysis_output + \"scaled_data.csv\", index=False)\n\n# Performing PCA\npca = decomposition.PCA(n_components=2)\npca.fit(scaled_data_df)\n\n# Saving contributions of the features to the principal components\nfeat_contr_to_cmpts = pd.DataFrame(\n np.round(abs(pca.components_), 4), columns=combined_data.columns\n)\nfeat_contr_to_cmpts.to_csv(analysis_output + \"feat_contr_to_cmpts.csv\", index=True)\n\n# Selecting the 10 largest contributers to pca component 1\ncomp_1_contr = feat_contr_to_cmpts.iloc[0].nlargest(10, keep=\"first\")\ncomp_1_contr.to_csv(analysis_output + \"comp_1_contr.csv\", index=True)\n\n# Selecting the 10 largest contributers to pca component 2\ncomp_2_contr = feat_contr_to_cmpts.iloc[1].nlargest(10, keep=\"first\")\ncomp_2_contr.to_csv(analysis_output + \"comp_2_contr.csv\", index=True)\n\n# Transforming the data\ntransformed_data = pca.transform(scaled_data_df)\n\n# Converting to data frame and renaming columns\ntransformed_data = pd.DataFrame(transformed_data).rename(\n columns={0: \"pca_dim0\", 1: \"pca_dim1\"}\n)\n\n# Adding the labels back\ntransformed_data = pd.concat(\n [transformed_data, decoy_or_native, pdb_id, structure_group],\n axis=1,\n)\n\n# 5. Fitting 2d Gaussians to each of the sets of decoy structures by pdb id----------------------------\n\n# 6. Various different plots for paper and report---------------------------------------\n\n# Scatter plot of all PCA component 1 and PCA component 2 for all structures\nplot = sns.scatterplot(\n x=\"pca_dim0\",\n y=\"pca_dim1\",\n data=transformed_data,\n hue=\"structure group\",\n hue_order=[\n \"1N8V\",\n \"1ZI8\",\n \"2HS1\",\n \"2XOD\",\n \"3CHB\",\n \"3LDC\",\n \"3NJN\",\n \"3WCQ\",\n \"3WDC\",\n ],\n style=\"decoy or native\",\n size=\"decoy or native\",\n markers=[\"o\", \"s\", \"*\"],\n alpha=0.6,\n edgecolor=\"black\",\n sizes=[50, 50, 150],\n)\nplt.xlabel(\"Principal Component 1\")\nplt.ylabel(\"Principal Component 2\")\nh, l = plot.get_legend_handles_labels()\nplt.legend(\n h[1:10],\n l[1:10],\n bbox_to_anchor=(1.05, 1),\n loc=\"upper left\",\n title=\"PDB ID\",\n)\nplot.add_artist(\n plt.legend(\n h[11:15],\n l[11:15],\n bbox_to_anchor=(1.05, 0.3),\n # loc=\"upper left\",\n title=\"Decoy or Native\",\n )\n)\nplot.add_artist(\n plt.legend(\n h[1:10],\n l[1:10],\n bbox_to_anchor=(1.08, 1),\n loc=\"upper left\",\n title=\"PDB ID\",\n )\n)\nplt.savefig(analysis_output + \"pca_2dproj.png\", bbox_inches=\"tight\", dpi=600)\nplt.savefig(analysis_output + \"pca_2dproj.svg\", bbox_inches=\"tight\", dpi=600)\nplt.close()\n\n\n# Individual scatter plots for each structure\nsns.set(font_scale=1.5)\nrel_plot = sns.relplot(\n data=transformed_data,\n x=\"pca_dim0\",\n y=\"pca_dim1\",\n col=\"structure group\",\n col_wrap=3,\n hue=\"decoy or native\",\n facet_kws={\n \"sharey\": False,\n \"sharex\": False,\n },\n s=200,\n legend=False,\n style=\"decoy or native\",\n size=\"decoy or native\",\n markers=[\"o\", \"s\", \"*\"],\n alpha=0.6,\n edgecolor=\"black\",\n sizes=[300, 300, 450],\n)\nrel_plot.set_axis_labels(\"PCA Component 1\", \"PCA Component 2\")\nplt.savefig(\n analysis_output + \"pca_2dproj_subplots.png\",\n bbox_inches=\"tight\",\n dpi=600,\n)\nplt.savefig(\n analysis_output + \"pca_2dproj_subplots.svg\",\n bbox_inches=\"tight\",\n dpi=600,\n)\nplt.close()\n\n# Strip plot of PCA component 2 for all structures\n# split out by decoy or native structure\nsns.set(style=\"white\")\nstrip_plot1 = sns.stripplot(\n x=\"structure group\",\n y=\"pca_dim1\",\n data=transformed_data[transformed_data[\"decoy or native\"] == \"decoy\"],\n hue=decoy_or_native,\n edgecolor=\"black\",\n alpha=0.7,\n jitter=True,\n linewidth=1,\n marker=\"o\",\n s=6,\n)\nstrip_plot1.legend_.remove()\nstrip_plot2 = sns.stripplot(\n x=\"structure group\",\n y=\"pca_dim1\",\n data=transformed_data[transformed_data[\"decoy or native\"] == \"native\"],\n hue=decoy_or_native,\n edgecolor=\"black\",\n alpha=0.7,\n jitter=True,\n linewidth=1,\n marker=\"*\",\n s=10,\n)\nstrip_plot2.legend_.remove()\nplt.xlabel(\"PDB ID\")\nplt.ylabel(\"Principal Component 2\")\nplt.savefig(analysis_output + \"strip_plot.svg\", bbox_inches=\"tight\", dpi=600)\nplt.savefig(analysis_output + \"strip_plot.png\", bbox_inches=\"tight\", dpi=600)\nplt.close()\n\n\nfig, (ax1, ax2) = plt.subplots(2, 1)\nfig.set_size_inches(6, 8)\n\nplot = sns.scatterplot(\n x=\"pca_dim0\",\n y=\"pca_dim1\",\n data=transformed_data,\n hue=\"structure group\",\n hue_order=[\n \"1N8V\",\n \"1ZI8\",\n \"2HS1\",\n \"2XOD\",\n \"3CHB\",\n \"3LDC\",\n \"3NJN\",\n \"3WCQ\",\n \"3WDC\",\n ],\n style=\"decoy or native\",\n size=\"decoy or native\",\n markers=[\"o\", \"s\", \"*\"],\n alpha=0.7,\n edgecolor=\"black\",\n sizes=[50, 50, 150],\n ax=ax1,\n)\nax1.set_xlabel(\"Principal Component 1\")\nax1.set_ylabel(\"Principal Component 2\")\nh, l = plot.get_legend_handles_labels()\nax1.legend(h[1:10], l[1:10], bbox_to_anchor=(1.05, 1), loc=\"upper left\")\n\n\n# Strip plot of PCA component 2 for all structures\n# split out by decoy or native structure\nstrip_plot1 = sns.stripplot(\n x=\"structure group\",\n y=\"pca_dim1\",\n data=transformed_data[transformed_data[\"decoy or native\"] == \"decoy\"],\n hue=decoy_or_native,\n edgecolor=\"black\",\n alpha=0.7,\n jitter=True,\n linewidth=1,\n marker=\"o\",\n s=6,\n ax=ax2,\n)\nstrip_plot1.legend_.remove()\nstrip_plot2 = sns.stripplot(\n x=\"structure group\",\n y=\"pca_dim1\",\n data=transformed_data[transformed_data[\"decoy or native\"] == \"native\"],\n hue=decoy_or_native,\n edgecolor=\"black\",\n alpha=0.7,\n jitter=True,\n linewidth=1,\n marker=\"*\",\n s=10,\n ax=ax2,\n)\nstrip_plot2.legend_.remove()\nax2.set_xlabel(\"PDB ID\")\nax2.set_ylabel(\"Principal Component 2\")\nplt.savefig(analysis_output + \"strip_plot_combined.png\", bbox_inches=\"tight\", dpi=600)\nplt.close()\n", "sub_path": "pca_analysis.py", "file_name": "pca_analysis.py", "file_ext": "py", "file_size_in_byte": 13540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 161, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 171, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 174, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 179, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 182, "usage_type": "call"}, {"api_name": "sklearn.decomposition", "line_number": 182, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 186, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 241, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 247, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 248, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 249, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 250, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 260, "usage_type": "call"}, {"api_name": "sklearn.decomposition", "line_number": 260, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 265, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 281, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 286, "usage_type": "call"}, {"api_name": "seaborn.scatterplot", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 319, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 320, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 322, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 330, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 330, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 347, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 347, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 348, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 349, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 353, "usage_type": "call"}, {"api_name": "seaborn.relplot", "line_number": 354, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 375, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 375, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 380, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 380, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 389, "usage_type": "call"}, {"api_name": "seaborn.stripplot", "line_number": 390, "usage_type": "call"}, {"api_name": "seaborn.stripplot", "line_number": 403, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 416, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 416, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 417, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 417, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 418, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 418, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 419, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 419, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 420, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 420, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 423, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 423, "usage_type": "name"}, {"api_name": "seaborn.scatterplot", "line_number": 426, "usage_type": "call"}, {"api_name": "seaborn.stripplot", "line_number": 458, "usage_type": "call"}, {"api_name": "seaborn.stripplot", "line_number": 472, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 488, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 488, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 489, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 489, "usage_type": "name"}]} +{"seq_id": "149986803", "text": "#!/usr/local/bin/python\n\nimport os\nimport errno\nimport sys\n\nsys.path.extend([\n '/usr/local/www',\n '/usr/local/www/freenasUI'\n])\n\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"freenasUI.settings\"\n\nfrom django.db.models.loading import cache\ncache.get_apps()\n\nRESOLV_CONF_PATH = \"/etc/resolv.conf\"\n\nfrom freenasUI.common.system import domaincontroller_enabled\nfrom freenasUI.network.models import (\n GlobalConfiguration,\n Interfaces\n)\nfrom freenasUI.services.models import (\n CIFS,\n DomainController\n)\n\n\ndef main():\n\n domain = None\n nameservers = []\n\n if domaincontroller_enabled():\n try:\n cifs = CIFS.objects.all()[0]\n dc = DomainController.objects.all()[0]\n\n domain = dc.dc_realm\n if cifs.cifs_srv_bindip:\n for ip in cifs.cifs_srv_bindip:\n nameservers.append(ip)\n else:\n nameservers.append(\"127.0.0.1\")\n\n except Exception as e:\n print >> sys.stderr, \"ix-resolv: ERROR: %s\" % e\n sys.exit(1)\n\n else:\n try:\n gc = GlobalConfiguration.objects.all()[0]\n if gc.gc_domain:\n domain = gc.gc_domain\n if gc.gc_nameserver1:\n nameservers.append(gc.gc_nameserver1)\n if gc.gc_nameserver2:\n nameservers.append(gc.gc_nameserver2)\n if gc.gc_nameserver3:\n nameservers.append(gc.gc_nameserver3)\n\n except Exception as e:\n print >> sys.stderr, \"ix-resolv: ERROR: %s\" % e\n sys.exit(1)\n\n if (\n not nameservers and\n (Interfaces.objects.count() == 0 or Interfaces.objects.filter(int_dhcp=True))\n ):\n # since we have set a dhclient hook that disables dhclient from writing to /etc/resolv.conf\n # we should remove it now\n try:\n os.remove(\"/etc/dhclient-enter-hooks\")\n except OSError as e:\n # if this error is not due to the file not existing then we have a problem\n if e.errno != errno.ENOENT:\n raise\n # else we never wrote that file so....moving on\n pass\n sys.exit(0)\n\n try:\n fd = os.open(RESOLV_CONF_PATH, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0x0644)\n if domain:\n os.write(fd, \"search %s\\n\" % domain)\n for ns in nameservers:\n os.write(fd, \"nameserver %s\\n\" % ns)\n os.close(fd)\n with open(\"/etc/dhclient-enter-hooks\", 'w') as f:\n f.write(\n \"\"\"\n add_new_resolv_conf() {\n # We don't want /etc/resolv.conf changed\n # So this is an empty function\n return 0\n }\n \"\"\"\n )\n os.chmod(\"/etc/dhclient-enter-hooks\", 0x0744)\n\n except Exception as e:\n print >> sys.stderr, \"can't create %s: %s\" % (RESOLV_CONF_PATH, e)\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/freenas/usr/local/libexec/nas/generate_resolv_conf.py", "file_name": "generate_resolv_conf.py", "file_ext": "py", "file_size_in_byte": 2995, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.path.extend", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models.loading.cache.get_apps", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models.loading.cache", "line_number": 15, "usage_type": "name"}, {"api_name": "freenasUI.common.system.domaincontroller_enabled", "line_number": 35, "usage_type": "call"}, {"api_name": "freenasUI.services.models.CIFS.objects.all", "line_number": 37, "usage_type": "call"}, {"api_name": "freenasUI.services.models.CIFS.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "freenasUI.services.models.CIFS", "line_number": 37, "usage_type": "name"}, {"api_name": "freenasUI.services.models.DomainController.objects.all", "line_number": 38, "usage_type": "call"}, {"api_name": "freenasUI.services.models.DomainController.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "freenasUI.services.models.DomainController", "line_number": 38, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 49, "usage_type": "call"}, {"api_name": "freenasUI.network.models.GlobalConfiguration.objects.all", "line_number": 53, "usage_type": "call"}, {"api_name": "freenasUI.network.models.GlobalConfiguration.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "freenasUI.network.models.GlobalConfiguration", "line_number": 53, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "freenasUI.network.models.Interfaces.objects.count", "line_number": 69, "usage_type": "call"}, {"api_name": "freenasUI.network.models.Interfaces.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "freenasUI.network.models.Interfaces", "line_number": 69, "usage_type": "name"}, {"api_name": "freenasUI.network.models.Interfaces.objects.filter", "line_number": 69, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 74, "usage_type": "call"}, {"api_name": "errno.ENOENT", "line_number": 77, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 81, "usage_type": "call"}, {"api_name": "os.open", "line_number": 84, "usage_type": "call"}, {"api_name": "os.O_WRONLY", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.O_CREAT", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.O_TRUNC", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.write", "line_number": 86, "usage_type": "call"}, {"api_name": "os.write", "line_number": 88, "usage_type": "call"}, {"api_name": "os.close", "line_number": 89, "usage_type": "call"}, {"api_name": "os.chmod", "line_number": 100, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 103, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "358405253", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom . import streams\nimport json\nfrom django.db import connections\nimport numpy as np\nfrom datetime import datetime\nfrom .Registro import Registro\nimport cv2\nimport sys\nfrom EmoPy.src.fermodel import FERModel\nfrom pkg_resources import resource_filename\nimport os\nfrom PIL import Image, ImageDraw, ImageFont\nfrom keras import backend as K\nglobal map\n\ndef index(request):\n template='index.html'\n map = streams.otherMain()\n a = ''\n for x, y in map.items():\n a += '{}:{}\\n'.format(x, y)\n # return HttpResponse(a)\n print(map)\n #return HttpResponse(json.dumps(map), content_type='application/json')\n return render(request, template)\n\n\ndef voy(request, pk):\n template='base.html'\n datos=[]\n y=[]\n z=[]\n with connections['default2'].cursor() as cursor:\n cursor.execute(\"SELECT * FROM lecturas \")\n row = cursor.fetchone()\n\n print(type(row))\n\n while row is not None:\n print(row)\n datos.append(list(row))\n m= str(datetime.fromtimestamp(row[0]))\n y.append(m)\n print(type(row))\n\n row = cursor.fetchone()\n print(\"callo\")\n inputs = np.array(datos)\n print(inputs[:,0])\n x=range(len(inputs))\n print(datetime.fromtimestamp(inputs[0,0]))\n va=list(inputs[:,int(pk)])\n cont=0\n for w in va:\n p= Registro(y[cont],pk,w)\n z.append(p)\n cont=cont+1\n context = {\"categories\": list(y), 'values': va, 'table_data':inputs, 'tamx':z}\n return render(request, template, context=context)\ndef sensores(request):\n template='basesenso.html'\n datos=[]\n z=[]\n x1=[]\n y1=[]\n tam=30\n with connections['default2'].cursor() as cursor:\n cursor.execute(\"SELECT * FROM lecturas \")\n row = cursor.fetchone()\n cont=0\n while row is not None:\n datos.append(list(row))\n m= str(datetime.fromtimestamp(row[0]))\n for x in range(1, len(row)):\n y= Registro(m,x,row[x])\n z.append(y)\n if(cont==0):\n u=0\n x1=range(len(row)-1)\n x1=list(range(len(row)-1))\n\n for t in row:\n if(u!=0):\n y1.append(t)\n u=1\n cont=cont+1\n\n row = cursor.fetchone()\n inputs = np.array(datos)\n tamx=[]\n print(len(z))\n if(len(z)>tam-1):\n for x in range(len(z)-tam-1, len(z)):\n tamx.append(z[x])\n context = {'query': datos, 'table_data':inputs, 'tam':tam, 'tamx':tamx, 'x1':x1,'y1':y1}\n return render(request, template, context=context)\n\ndef redNeuronal(request):\n template='basered.html'\n datos=[]\n z=[]\n x1=[]\n y1=[]\n tam=30\n with connections['default2'].cursor() as cursor:\n cursor.execute(\"SELECT * FROM pred \")\n row = cursor.fetchone()\n cont=0\n while row is not None:\n datos.append(list(row))\n m= str(datetime.fromtimestamp(row[0]))\n for x in range(1, len(row)):\n y= Registro(m,x,row[x])\n z.append(y)\n if(cont==0):\n u=0\n x1=range(len(row)-1)\n x1=list(range(len(row)-1))\n\n for t in row:\n if(u!=0):\n y1.append(t)\n u=1\n cont=cont+1\n\n row = cursor.fetchone()\n inputs = np.array(datos)\n tamx=[]\n print(len(z))\n if(len(z)>tam-1):\n for x in range(len(z)-tam-1, len(z)):\n tamx.append(z[x])\n context = {'query': datos, 'table_data':inputs, 'tam':tam, 'tamx':tamx, 'x1':x1,'y1':y1}\n return render(request, template, context=context)\n\ndef emopy(request):\n template='video.html'\n fontFace = cv2.FONT_HERSHEY_SIMPLEX;\n fontScale = 1;\n thickness = 2;\n\n #Specify the camera which you want to use. The default argument is '0'\n video_capture = cv2.VideoCapture(0)\n #Capturing a smaller image fçor speed purposes\n video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)\n video_capture.set(cv2.CAP_PROP_FPS, 15)\n\n #Can choose other target emotions from the emotion subset defined in fermodel.py in src directory. The function\n # defined as `def _check_emotion_set_is_supported(self):`\n target_emotions = ['happiness','disgust','surprise']\n model = FERModel(target_emotions, verbose=True)\n\n\n ret, frame = video_capture.read()\n r=os.path.dirname(os.path.abspath(__file__))\n r=r.replace(\"monitoring\",\"IoTransit_web\")\n file = r+'/static/image_data/image.jpg'\n cv2.imwrite(file,frame)\n\n frameString = model.predict(file)\n m=frameString\n image = Image.open(file)\n draw = ImageDraw.Draw(image)\n # O bien /usr/share/fonts/truetype/ttf-dejavu/DejaVuSerif.ttf.\n font = ImageFont.load_default()\n color = 'rgb(255, 255, 255)'\n draw.text((50, 50), frameString, font=font, fill=\"black\")\n image.save(file)\n K.clear_session()\n context = {'image': image, 'estado':m}\n return render(request, template, context=context)\n", "sub_path": "IoTransit_web/monitoring/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5315, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.connections", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "Registro.Registro", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.connections", "line_number": 69, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "name"}, {"api_name": "Registro.Registro", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 98, "usage_type": "call"}, {"api_name": "django.db.connections", "line_number": 107, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 113, "usage_type": "name"}, {"api_name": "Registro.Registro", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 136, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 140, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 145, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 147, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 148, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 149, "usage_type": "attribute"}, {"api_name": "EmoPy.src.fermodel.FERModel", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 158, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 161, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 165, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 165, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 166, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 166, "usage_type": "name"}, {"api_name": "PIL.ImageFont.load_default", "line_number": 168, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 168, "usage_type": "name"}, {"api_name": "keras.backend.clear_session", "line_number": 172, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 172, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 174, "usage_type": "call"}]} +{"seq_id": "11663967", "text": "import cv2\nimport os\nfrom config import *\n\ndef draw_points(lands, img, img_name, save_dir):\n for i in range(0, len(lands), 2):\n point = (lands[i], lands[i+1])\n cv2.circle(img, point, 1, (255, 0, 0))\n result_name = \"result_\"+img_name\n print(result_name)\n save_path = os.path.join(save_dir, result_name)\n print(save_path)\n cv2.imwrite(save_path, img)\n print('done')\n\n\nif __name__ == '__main__':\n\n read_from_file = False\n img_path = './test_images/webwxgetmsgimg.png'\n image_txt = \"./image_names.txt\"\n image_dir = \"./data/images/\"\n resize_save_dir = \"./results/\"\n\n if read_from_file == True:\n with open(image_txt, 'r') as fin:\n for line in fin:\n line = line.strip.split(\" \")\n image_name = line[0]\n land_marks = line[-1]\n img_path = os.path.join(image_dir, image_name)\n img = cv2.imread(img_path)\n img_resize = cv2.resize(img, args.height, args.width)\n draw_points(land_marks, img_resize, image_name)\n\n else:\n img = cv2.imread(img_path)\n img = cv2.resize(img, (300, 300), interpolation=cv2.INTER_AREA)\n #lands = [47, 30, 65, 31, 58, 38, 46, 43, 64, 43]\n #location :(140.268341,202.364410,161.242096,261.564728),\n #[Point(144, 100), Point(173, 100), Point(158, 112), Point(144, 119), Point(173, 117)]\n #Point(113, 151), Point(131, 151), Point(124, 169), Point(115, 179), Point(131, 179)]\n\n lands = [113,151,131,151,124,169,115,179,131,179]\n\n lands = [int(i) for i in lands]\n print('lands', lands)\n image_name = img_path.split(\"/\")[-1]\n print(type(image_name))\n draw_points(lands, img, image_name, resize_save_dir)\n\n", "sub_path": "landmark/draw_points.py", "file_name": "draw_points.py", "file_ext": "py", "file_size_in_byte": 1773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "cv2.circle", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "163037838", "text": "import os\nimport sys\nimport argparse\nimport glob\n\ndef _check_and_get_args(args=None):\n parser = argparse.ArgumentParser(description='Histograms generator')\n parser.add_argument('-ET', '--encryptiontype', help='encryption type (EC/AES)', default='EC', required=True, type=_restricted_encryption_types)\n results = parser.parse_args(args)\n return results.encryptiontype\n\ndef _restricted_encryption_types(enc_type):\n if not (enc_type == 'AES' or enc_type == 'CE'):\n raise argparse.ArgumentTypeError(\"Chosen encryption type is correct.\")\n return enc_type\n\n# -------------------------------------------------------\n\nif not \"MSC_HOME\" in os.environ:\n print(\"Environment variables like $MSC_HOME are not set.\")\n exit(1)\n\nencryption_type = _check_and_get_args(sys.argv[1:]) # AES/CE\n\nos.system(f\"rm -rf {encryption_type}\")\nos.system(f\"mkdir {encryption_type}\")\nos.system(f\"mkdir {encryption_type}/test_images\")\n\nsample_paths = os.environ['MSC_SAMPLES'].replace('\\n', '').split(',')\n\nfor sample_path in sample_paths:\n#01 kluczkluczkluczkluczkluczkluczkl\n#02 abcdefghijklmnopqrstuwxyz1234567\n#03 syf1W7dTap3NxJlNy1b09sotPn4DxSzH\n\n#04 U9Dm6dPvda7HDvxwqeS5B5qzFrJySOXN\n#05 jpDwN5bYOos8UXH45JqkkteY37f8xdMu\n#06 8iEr95Odq4rFYB6OomAgHRwK3zowbFqI\n#07 G7WKTR67SSEGL4jDgg8H230wJs5hl9xm\n#08 UDvFCd3ySr0INlJw2odpAfpEaSzycW7s\n#09 HZ3csaIJS5cgeSx5kOLrsQi1B9C0BxGL\n#10 gWt7kvXsM0zsPFdnWANiTRm0ehK7Ofm8\n#11 2D7dS5AacFUacFuI1ZuHs9c4NseELNWm\n#12 8CiP9eDBjPkcPJuUfWdtf4VTf3NcDxdm\n#13 4aAhuKrXnmDRjUCy2CEc37pBlJRlqf3j\n#14 7anDe0VfGSpisz3CnRaITIBWUnOvxNgh\n#15 MsLqyR4Pd41x5vNj0RSAhYXAAqPttkby\n#16 XtACMWr0fWw808mpN35JtZUXEZsO4NUp\n#17 BhTAZkIZt6uDmvoivyOizMRum9W357Be\n#18 tyLkLSHyCMgWq2jVry8VT45Y922UeZXs\n#19 s9iMBD3Rhor9FCVhEeeHPRVo61GRrvMa\n#20 4L62RKcuF8OLC3XlpPzFVE4YEqTtkKgo\n os.system(f\"{encryption_type}.py --source {sample_path} --destination {encryption_type}/test_images --key syf1W7dTap3NxJlNy1b09sotPn4DxSzH\")\n\nencrypted_images = glob.glob(f\"{encryption_type}/test_images/*.png\")\n\nfor encrypted_image in encrypted_images:\n os.system(f\"python ./sp800_22_tests/sp800_22_tests.py {encrypted_image} > {encryption_type}/{os.path.basename(encrypted_image)}.txt\")\n\n\n\n\n\n", "sub_path": "research_programs/NIST/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 2171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 25, "usage_type": "call"}, {"api_name": "os.system", "line_number": 26, "usage_type": "call"}, {"api_name": "os.system", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 53, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 55, "usage_type": "call"}, {"api_name": "os.system", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}]} +{"seq_id": "392899700", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Carga la configuración de Domus.\n Busca el fichero domus.conf en la misma ruta que domus.py.\n De no existir el archivo .conf, hay un archivo de ejemplo.\n\"\"\"\n\nimport configparser\nimport os\nimport __main__\n\n\ndef print_error():\n print(\"\\n\\tNo se encuentra el archivo domus.conf\")\n print(\"\\tPor favor, configure la aplicación antes de iniciar.\")\n # print(\"Exception Config.py\")\n raise SystemExit(1)\n\ntry:\n config = configparser.ConfigParser()\n # config.read('/home/pi/Proyectos/domus/app/domus.conf')\n path = os.path.join(os.path.abspath(os.path.dirname(__main__.__file__)),\n 'domus.conf')\n dataset = config.read(path)\n\nexcept:\n print_error()\n\nelse:\n # Separamos secciones de la configuración\n # para cargarlo individualmente en sus respectivos módulos\n if not len(dataset) > 0:\n print_error()\n\n config_logger = config['Logger']\n config_db = config['Database']\n config_telegram = config['Telegram']\n", "sub_path": "app/core/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1036, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "configparser.ConfigParser", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 24, "usage_type": "call"}, {"api_name": "__main__.__file__", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "502638201", "text": "from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom .models import Quotes\nfrom .form import Quotes_form\nfrom django.contrib.auth.models import User\nfrom django.http import Http404\n\n# Create your views here.\ndef home(request):\n \"\"\"\n return views at / url\n \"\"\"\n if not request.user.is_authenticated():\n return redirect('/login')\n quotes = Quotes.objects.all()\n user = request.user\n return render(request, \"home.html\", {\"quotes\": quotes, \"user\": user})\n\ndef user_profile(request, username):\n \"\"\"\n views for user profile\n \"\"\"\n try:\n user = User.objects.get(username= username).username\n except User.DoesNotExist:\n raise Http404\n return render(request, \"user_profile.html\", {\"username\" : user})\n\ndef add_quote(request):\n if not request.user.is_authenticated():\n return redirect('/login')\n\n form_class = Quotes_form\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n quote = form.cleaned_data['quotes']\n author = form.cleaned_data['author']\n user = request.user\n obj = Quotes.objects.create(quotes=quote, author=author, user=user)\n return redirect('home')\n else:\n form = form_class()\n return render(request, 'add_quote.html',\n {'form':form,\n\n })", "sub_path": "project/QuoteApp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1369, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.shortcuts.redirect", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Quotes.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Quotes.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.Quotes", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 24, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.DoesNotExist", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 25, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "form.Quotes_form", "line_number": 33, "usage_type": "name"}, {"api_name": "form.is_valid", "line_number": 36, "usage_type": "call"}, {"api_name": "form.cleaned_data", "line_number": 37, "usage_type": "attribute"}, {"api_name": "form.cleaned_data", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Quotes.objects.create", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Quotes.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Quotes", "line_number": 40, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "474903817", "text": "#===============================================================================\n# Copyright 2012 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\n#============= enthought library imports =======================\nfrom traits.api import HasTraits, Property, Instance, on_trait_change, Any, \\\n cached_property, Int, Button, Event, DelegatesTo\nfrom traitsui.api import View, Item, HGroup, Group, TabularEditor\nfrom traitsui.tabular_adapter import TabularAdapter\n#============= standard library imports ========================\n# import numpy as np\nfrom numpy import Inf, array, random\nimport re\n#============= local library imports ==========================\nfrom src.database.isotope_analysis.summary import Summary\nfrom src.graph.graph import Graph\nfrom src.database.orms.isotope_orm import proc_SelectedHistoriesTable\nfrom pyface.timer.do_later import do_later\nfrom src.constants import PLUSMINUS\nimport time\n\n\nclass HistoryView(HasTraits):\n summary = Any\n apply = Button\n applied_history = Event\n\n selected_history = Any\n\n def _apply_fired(self):\n summary = self.summary\n dbhist = summary.selected_history.history\n record = summary.record\n selhistory = record.selected_histories\n if not selhistory:\n selhistory = proc_SelectedHistoriesTable(analysis=record)\n\n setattr(selhistory, summary.apply_name, dbhist)\n self.summary.oselected_history = summary.selected_history\n self.applied_history = summary.selected_history\n\n def traits_view(self):\n v = View(Group(\n Item('object.summary.histories', show_label=False,\n editor=TabularEditor(\n adapter=HistoryTabularAdapter(),\n editable=False,\n operations=[],\n auto_update=True,\n horizontal_lines=True,\n selected='object.selected_history')),\n Item('apply',\n enabled_when='summary.selected_history!=summary.oselected_history',\n show_label=False),\n show_border=True,\n label='histories',\n )\n )\n return v\n\nclass HistoryTabularAdapter(TabularAdapter):\n columns = [('User', 'user'), ('Date', 'create_date')]\n\n user_text = Property\n user_width = Int(50)\n create_date_width = Int(120)\n\n def get_font(self, obj, trait, row):\n import wx\n s = 9\n f = wx.FONTFAMILY_DEFAULT\n st = wx.FONTSTYLE_NORMAL\n# w = wx.FONTWEIGHT_BOLD\n w = wx.FONTWEIGHT_NORMAL\n name = 'Bitstream Vera Sans Mono'\n return wx.Font(s, f, st, w, False, name)\n\n def _get_user_text(self):\n u = self.item.user\n return u if u is not None else '---'\n\nclass History(HasTraits):\n history = Any\n\n @cached_property\n def _get_user(self):\n return self.summary.user\n\n @cached_property\n def _get_create_date(self):\n return self.summary.create_date\n\n def __getattr__(self, attr):\n return getattr(self.history, attr)\n\nclass HistorySummary(Summary):\n histories = Property\n graph = Instance(Graph)\n history_view = Instance(HistoryView)\n selected_history = DelegatesTo('history_view')\n history_name = ''\n def _graph_default(self):\n g = Graph(container_dict=dict(padding=5, stack_order='top_to_bottom'))\n g.width = self.record.item_width * 0.73\n return g\n\n def _history_view_default(self):\n return HistoryView(summary=self)\n\n def refresh(self):\n hist = None\n if self.histories:\n selh = self.record._dbrecord.selected_histories\n hist = getattr(selh, self.apply_name)\n\n sh = next((hi for hi in self.histories if hi.history == hist), None)\n def up():\n self.oselected_history = sh\n self.selected_history = None\n self.selected_history = sh\n\n super(HistorySummary, self).build_summary(history=hist)\n do_later(up)\n\n def _get_isotope_keys(self, history, name):\n isokeys = sorted([bi.isotope for bi in getattr(history, name)\n# if bi.use_set\n ],\n key=lambda x:re.sub('\\D', '', x),\n reverse=True)\n return isokeys\n\n @on_trait_change('selected_history')\n def _update_summary(self):\n if self.selected_history:\n self._build_summary()\n\n @cached_property\n def _get_histories(self):\n hn = self.history_name\n dbr = self.record._dbrecord\n return [History(history=hii) for hii in getattr(dbr, '{}_histories'.format(hn))]\n\n def _build_summary(self, history=None):\n if self.histories:\n if history is None:\n if self.selected_history:\n history = self.selected_history\n\n if history:\n self._build_graph(history)\n\n def _build_graph(self, hi):\n hn = self.history_name\n dbr = self.record\n#\n g = self.graph\n g.clear()\n# self.graph = g\n isokeys = self._get_isotope_keys(hi, hn)\n xma = -Inf\n xmi = Inf\n\n for i, iso in enumerate(isokeys):\n bi = next((bii for bii in getattr(hi, hn)\n if bii.isotope == iso), None)\n\n g.new_plot(padding=[50, 5, 30, 5],\n title=iso\n\n )\n if bi.use_set:\n# xs = [dbr.make_timestamp(str(bs.analysis.rundate),\n# str(bs.analysis.runtime)) for bs in bi.sets]\n xs = [time.mktime(bs.analysis.analysis_timestamp.timetuple()) for bs in bi.sets ]\n xs = array(xs)\n if xs.shape[0]:\n xs = xs - min(xs)\n ys = random.random(xs.shape[0])\n g.new_series(xs, ys, type='scatter')\n xma = max(xma, max(xs))\n xmi = min(xmi, min(xs))\n else:\n uv = bi.user_value\n ue = bi.user_error\n g.set_plot_title('{} {:0.5f} {}{:0.6f}'.format(iso, uv, PLUSMINUS, ue), plotid=i)\n\n kw = dict(plotid=i, color=(1, 0, 0))\n g.add_horizontal_rule(uv, line_style='solid',\n **kw)\n kw = dict(plotid=i, color=(0, 0, 0))\n g.add_horizontal_rule(uv + ue, **kw)\n g.add_horizontal_rule(uv - ue, **kw)\n g.set_y_limits(min=uv - ue,\n max=uv + ue, pad='0.1', plotid=i)\n\n\n def traits_view(self):\n v = View(HGroup(\n Item('history_view', style='custom',\n show_label=False,\n width=0.27),\n Item('graph', show_label=False,\n style='custom',\n width=0.73\n )\n )\n )\n\n return v\n#============= EOF =============================================\n", "sub_path": "src/database/isotope_analysis/history_summary.py", "file_name": "history_summary.py", "file_ext": "py", "file_size_in_byte": 7941, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "traits.api.HasTraits", "line_number": 35, "usage_type": "name"}, {"api_name": "traits.api.Any", "line_number": 36, "usage_type": "name"}, {"api_name": "traits.api.Button", "line_number": 37, "usage_type": "name"}, {"api_name": "traits.api.Event", "line_number": 38, "usage_type": "name"}, {"api_name": "traits.api.Any", "line_number": 40, "usage_type": "name"}, {"api_name": "src.database.orms.isotope_orm.proc_SelectedHistoriesTable", "line_number": 48, "usage_type": "call"}, {"api_name": "traitsui.api.View", "line_number": 55, "usage_type": "call"}, {"api_name": "traitsui.api.Group", "line_number": 55, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 56, "usage_type": "call"}, {"api_name": "traitsui.api.TabularEditor", "line_number": 57, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 64, "usage_type": "call"}, {"api_name": "traitsui.tabular_adapter.TabularAdapter", "line_number": 73, "usage_type": "name"}, {"api_name": "traits.api.Property", "line_number": 76, "usage_type": "name"}, {"api_name": "traits.api.Int", "line_number": 77, "usage_type": "call"}, {"api_name": "traits.api.Int", "line_number": 78, "usage_type": "call"}, {"api_name": "wx.FONTFAMILY_DEFAULT", "line_number": 83, "usage_type": "attribute"}, {"api_name": "wx.FONTSTYLE_NORMAL", "line_number": 84, "usage_type": "attribute"}, {"api_name": "wx.FONTWEIGHT_NORMAL", "line_number": 86, "usage_type": "attribute"}, {"api_name": "wx.Font", "line_number": 88, "usage_type": "call"}, {"api_name": "traits.api.HasTraits", "line_number": 94, "usage_type": "name"}, {"api_name": "traits.api.Any", "line_number": 95, "usage_type": "name"}, {"api_name": "traits.api.cached_property", "line_number": 97, "usage_type": "name"}, {"api_name": "traits.api.cached_property", "line_number": 101, "usage_type": "name"}, {"api_name": "src.database.isotope_analysis.summary.Summary", "line_number": 108, "usage_type": "name"}, {"api_name": "traits.api.Property", "line_number": 109, "usage_type": "name"}, {"api_name": "traits.api.Instance", "line_number": 110, "usage_type": "call"}, {"api_name": "src.graph.graph.Graph", "line_number": 110, "usage_type": "argument"}, {"api_name": "traits.api.Instance", "line_number": 111, "usage_type": "call"}, {"api_name": "traits.api.DelegatesTo", "line_number": 112, "usage_type": "call"}, {"api_name": "src.graph.graph.Graph", "line_number": 115, "usage_type": "call"}, {"api_name": "pyface.timer.do_later.do_later", "line_number": 135, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 141, "usage_type": "call"}, {"api_name": "traits.api.on_trait_change", "line_number": 145, "usage_type": "call"}, {"api_name": "traits.api.cached_property", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.Inf", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.Inf", "line_number": 174, "usage_type": "name"}, {"api_name": "time.mktime", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 191, "usage_type": "name"}, {"api_name": "src.constants.PLUSMINUS", "line_number": 198, "usage_type": "argument"}, {"api_name": "traitsui.api.View", "line_number": 211, "usage_type": "call"}, {"api_name": "traitsui.api.HGroup", "line_number": 211, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 212, "usage_type": "call"}, {"api_name": "traitsui.api.Item", "line_number": 215, "usage_type": "call"}]} +{"seq_id": "456054954", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#------------------------------------------------------------------------------\n__author__ = 'Patrice Carbonneau'\n__contact__ = 'patrice.carbonneau@durham.ac.uk'\n__copyright__ = '(c) Patrice Carbonneau'\n__license__ = 'MIT'\n\n\n'''\n\nThis script will attempt to train a fuzzy classifier with endmembers infered from manual digitisation.\nIt is assumed that the image interpretation process will lead the user to digitise pure classes.\nThe pure class rasters are then transformed to categorical (1-hot encoding) which in effect means\nthat the digitised class pixels will be assigned a pure membership.\n\nThis script will use tis information in a DNN. The script will only train and save the model.\nUse UAV2SEN_GetErrorsDNN to estiate associated errors.\n\n\n\n'''\n###############################################################################\n\"\"\" Libraries\"\"\"\n\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras import optimizers\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, BatchNormalization\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom skimage.transform import downscale_local_mean, resize\nfrom skimage import io\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib.patches as mpatches\nimport os\n\n\n\n\n##########################################################################################\n\"\"\"User data input. Use the site template and list training and validation choices\"\"\"\n#########################################################################################\n'''Folder Settgings'''\nMainData = 'EMPTY' #main data output from UAV2SEN_MakeCrispTensor.py. no extensions, will be fleshed out below\nSiteList = 'EMPTY'#this has the lists of sites with name, month, year and 1s and 0s to identify training and validation sites\nDataFolder = 'EMPTY' #location of processed tif files\nModelName = 'EMPTY' #Name and location of the final model to be saved in DataFolder. Add .h5 extension\n\n'''Model Features and Labels'''\nUAVtrain = False #if true use the UAV class data to train the model, if false use desk-based data for training\nMajType= 'Pure' #Majority type. only used if UAVtrain or valid is true. The options are RelMaj (relative majority class), Maj (majority) and Pure (95% unanimous).\nFeatureSet = ['B2','B3','B4','B5','B6','B7','B8','B9','B11','B12'] # pick predictor bands from: ['B1','B2','B3','B4','B5','B6','B7','B8','B9','B10','B11','B12']\n\n'''CNN parameters'''\nTrainingEpochs = 200#Use model tuning to adjust this and prevent overfitting\nsize=3#size of the tensor tiles\nKernelSize=3 # size of the convolution kernels\nLearningRate = 0.0005\nChatty = 1 # set the verbosity of the model training. \nNAF = 'relu' #NN activation function\nModelTuning = False #Plot the history of the training losses. Increase the TrainingEpochs if doing this.\n\n'''Validation Settings'''\nUAVvalid = True #if true use the UAV class data to validate. If false, use desk-based polygons\nShowValidation = False #if true will show predicted class rasters for validation images from the site list\n\n\n\n\n\n\n#################################################################################\n'''Function definitions'''\ndef slide_raster_to_tiles(im, size):\n h=im.shape[0]\n w=im.shape[1]\n di=im.shape[2]\n TileTensor = np.zeros(((h-size)*(w-size), size,size,di))\n\n \n B=0\n for y in range(0, h-size):\n for x in range(0, w-size):\n\n TileTensor[B,:,:,:] = im[y:y+size,x:x+size,:]\n B+=1\n\n return TileTensor\n######################################################################################\n \n '''Check that the specified folders and files exist before processing starts'''\nif not(os.path.isfile(MainData+'_fuzzy_'+str(size)+'_T.npy')):\n raise Exception('Main data file does not exist')\nelif not(os.path.isfile(SiteList)):\n raise Exception('Site list csv file does not exist')\nelif not(os.path.isdir(DataFolder)):\n raise Exception('Data folder with pre-processed data not defined')\n\n\n\n'''Load the tensors and filter out the required training and validation data.'''\nTensorFileName = MainData+'_crisp_'+str(size)+'_T.npy'\nLabelFileName = MainData+'_crisp_'+str(size)+'_L.csv'\n\nSiteDF = pd.read_csv(SiteList)\nMasterTensor = np.load(TensorFileName)\nMasterLabelDF=pd.read_csv(LabelFileName)\n\n#Select the features in the tensor\n\nValid=np.zeros(12)\nfor n in range(1,13):\n if ('B'+str(n)) in FeatureSet:\n Valid[n-1]=1\n \nMasterTensor = np.compress(Valid, MasterTensor, axis=3)\n\n\n#Start the filter process to isolate training and validation data\nTrainingSites = SiteDF[SiteDF.Training == 1]\nValidationSites = SiteDF[SiteDF.Validation == 1]\nTrainingSites.index = range(0,len(TrainingSites.Year))\nValidationSites.index = range(0,len(ValidationSites.Year))\n#initialise the training and validation DFs to the master\nTrainingDF = MasterLabelDF\nTrainingTensor = MasterTensor\nValidationDF = MasterLabelDF\nValidationTensor = MasterTensor\n\n#isolate the sites, months and year and isolate the associated tensor values\nMasterValid = (np.zeros(len(MasterLabelDF.index)))==1\nfor s in range(len(TrainingSites.Site)):\n Valid = (TrainingDF.Site == TrainingSites.Abbrev[s])&(TrainingDF.Year==TrainingSites.Year[s])&(TrainingDF.Month==TrainingSites.Month[s])\n MasterValid = MasterValid | Valid\n \nTrainingDF = TrainingDF.loc[MasterValid]\nTrainingTensor=np.compress(MasterValid,TrainingTensor, axis=0)#will delete where valid is false\n\nMasterValid = (np.zeros(len(MasterLabelDF.index)))==1\nfor s in range(len(ValidationSites.Site)):\n Valid = (ValidationDF.Site == ValidationSites.Abbrev[s])&(ValidationDF.Year==ValidationSites.Year[s])&(ValidationDF.Month==ValidationSites.Month[s])\n MasterValid = MasterValid | Valid\n \nValidationDF = ValidationDF.loc[MasterValid]\nValidationTensor = np.compress(MasterValid,ValidationTensor, axis=0)#will delete where valid is false \n\n \nMajType=MajType+'Class'\n\n\n#select desk-based or UAV-based for training and validation, if using UAV data, select the majority type\nif UAVtrain & UAVvalid:\n TrainLabels= TrainingDF[MajType]\n ValidationLabels = ValidationDF[MajType]\n TrainingTensor = np.compress(TrainLabels>0, TrainingTensor, axis=0)\n ValidationTensor = np.compress(ValidationLabels>0,ValidationTensor, axis=0)\n TrainLabels=TrainLabels.loc[TrainLabels>0]\n ValidationLabels=ValidationLabels.loc[ValidationLabels>0]\n \n \nelif UAVtrain and ~(UAVvalid):\n TrainLabels= TrainingDF[MajType]\n ValidationLabels = ValidationDF.PolyClass\n TrainingTensor = np.compress(TrainLabels>0, TrainingTensor, axis=0)\n ValidationTensor = np.compress(ValidationLabels>0,ValidationTensor, axis=0)\n TrainLabels=TrainLabels.loc[TrainLabels>0]\n ValidationLabels=ValidationLabels.loc[ValidationLabels>0]\n \nelif ~(UAVtrain) & UAVvalid:\n TrainLabels= TrainingDF.PolyClass\n ValidationLabels = ValidationDF[MajType]\n TrainingTensor = np.compress(TrainLabels>0, TrainingTensor, axis=0)\n ValidationTensor = np.compress(ValidationLabels>0,ValidationTensor, axis=0)\n TrainLabels=TrainLabels.loc[TrainLabels>0]\n ValidationLabels=ValidationLabels.loc[ValidationLabels>0]\n \n#\nelse:\n TrainLabels= TrainingDF.PolyClass\n ValidationLabels = ValidationDF.PolyClass\n TrainingTensor = np.compress(TrainLabels>0, TrainingTensor, axis=0)\n ValidationTensor = np.compress(ValidationLabels>0,ValidationTensor, axis=0)\n TrainLabels=TrainLabels.loc[TrainLabels>0]\n ValidationLabels=ValidationLabels.loc[ValidationLabels>0]\n \n#Select the central pixel in each tensor tile and make a table for non-convolutional NN classification\nTrainingFeatures = np.squeeze(TrainingTensor[:,size//2,size//2,:])\nValidationFeatures = np.squeeze(ValidationTensor[:,size//2, size//2,:]) \n \n#check for empty dataframes and raise an error if found\n\nif (len(TrainingDF.index)==0):\n raise Exception('There is an empty dataframe for training')\n \nif (len(ValidationDF.index)==0):\n raise Exception('There is an empty dataframe for validation')\n \n#Check that tensor lengths match label lengths\n\nif (len(TrainLabels.index)) != TrainingTensor.shape[0]:\n raise Exception('Sample number mismatch for TRAINING tensor and labels')\n \nif (len(ValidationLabels.index)) != ValidationTensor.shape[0]:\n raise Exception('Sample number mismatch for VALIDATION tensor and labels')\n \n \n\n \n\n\n\n\n\n##############################################################################\n\"\"\"Instantiate the Neural Network pixel-based classifier\"\"\" \nNdims = TrainingFeatures.shape[1] # Feature Dimensions. \nNClasses = len(np.unique(TrainLabels)) #The number of classes in the data. This MUST be the same as the classes used to retrain the model\n\n\n \t# create model\n \nEstimator = Sequential()\nEstimator.add(Dense(64, kernel_regularizer= regularizers.l2(0.001),input_dim=Ndims, kernel_initializer='normal', activation=NAF))\nEstimator.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))\nEstimator.add(Dense(32, kernel_regularizer= regularizers.l2(0.001), kernel_initializer='normal', activation=NAF))\nEstimator.add(Dense(16, kernel_regularizer= regularizers.l2(0.001), kernel_initializer='normal', activation=NAF))\nEstimator.add(Dense(NClasses+1, kernel_initializer='normal', activation='linear')) \n\n\n\n#Tune an optimiser\nOptim = optimizers.Adam(lr=LearningRate, beta_1=0.9, beta_2=0.999, decay=0.0, amsgrad=True)\n\n# Compile model\nEstimator.compile(loss='mean_squared_error', optimizer=Optim, metrics = ['accuracy'])\nEstimator.summary()\n \n###############################################################################\n\"\"\"Data Splitting\"\"\"\nTrainLabels1Hot = to_categorical(TrainLabels)\nValidationLabels1Hot = to_categorical(ValidationLabels)\nX_train, X_test, y_train, y_test = train_test_split(TrainingFeatures, TrainLabels1Hot, test_size=0.2, random_state=42)\n\n\n\n\"\"\"Data Fitting\"\"\"\nprint('Fitting CNN Classifier on ' + str(len(X_train)) + ' pixels')\nEstimator.fit(X_train, y_train, batch_size=1000, epochs=TrainingEpochs, verbose=Chatty)\n\n\n\n'''Save model'''\nModelName=os.path.join(DataFolder,ModelName+'.h5')\nEstimator.save(ModelName,save_format='h5')\n", "sub_path": "code/UAV2SEN_CrispendMemberDNN.py", "file_name": "UAV2SEN_CrispendMemberDNN.py", "file_ext": "py", "file_size_in_byte": 10621, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.zeros", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.compress", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 224, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 229, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers.l2", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers", "line_number": 230, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 231, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers.l2", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers", "line_number": 232, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 233, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers.l2", "line_number": 233, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers", "line_number": 233, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 234, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 239, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers", "line_number": 239, "usage_type": "name"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 247, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 248, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 249, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path", "line_number": 260, "usage_type": "attribute"}]} +{"seq_id": "110165780", "text": "#相同的`con_deal_id`,`con_shop_id`,`dt`求和并且一些文字信息使用的是\n\nimport sqlalchemy\nimport sql_name\nimport datetime\nimport pymysql\nfrom sqlalchemy.dialects.mysql import DATE\nimport arg_del\nimport pandas as pd\nimport numpy as np\n#每周六运行\n\nstart_time,end_time,fdatabase,tdatabase=arg_del.get_attr()\nstart_time,end_time,fdatabase,tdatabase=arg_del.set_attr_def_value(start_time,end_time,fdatabase,tdatabase)\nprint('start_time=%s***********************,end_time=%s*****************'%(start_time,end_time))\nfor now_time in arg_del.week_alter(start_time,end_time):\n day={}\n day['start_time']=now_time.strftime('%Y-%m-%d')\n day['end_time']=(now_time+datetime.timedelta(7)).strftime('%Y-%m-%d')\n print('************************day=%s*************************' % day['start_time'])\n localhost_conn = pymysql.connect(host=fdatabase['ip'], user=fdatabase['user'], passwd=fdatabase['password'],\n db='o2o', charset='utf8',\n connect_timeout=7200 * 3, cursorclass=pymysql.cursors.DictCursor)\n localhost_cur = localhost_conn.cursor()\n localhost_cur.execute(sql_name.sql_source_meituan_and_dianping %day)\n\n datas=localhost_cur.fetchall()\n localhost_cur.close()\n localhost_conn.close()\n datas=pd.DataFrame(datas)\n datas.drop(['index','check_url'], axis=1, inplace=True)\n datas.rename(columns={'ccc.check_url':'check_url'},inplace=True)\n datas=datas.groupby(['deal_id','platform'],as_index=False).fillna(method='ffill')\n datas = datas.groupby(['deal_id','platform'],as_index=False).fillna(method='bfill')\n datas['sales']=datas['sales'].astype(float)\n datas['sales_meitun']=datas['sales_meitun'].astype(float)\n datas['sales']=datas['sales']+datas['sales_meitun']\n datas['pv']=datas['pv'].astype(float)\n datas['pv_meitun']=datas['pv_meitun'].astype(float)\n datas['pv'] = datas['pv'] + datas['pv_meitun']\n datas['today_hits']=datas['today_hits'].astype(float)\n datas['today_hits_meitun']=datas['today_hits_meitun'].astype(float)\n datas['today_hits'] = datas['today_hits'] + datas['today_hits_meitun']\n datas['hits']=datas['hits'].astype(float)\n datas['hits_meitun']=datas['hits_meitun'].astype(float)\n datas['hits'] = datas['hits'] + datas['hits_meitun']\n datas['click_count']=datas['click_count'].astype(float)\n datas['click_count_meitun']=datas['click_count_meitun'].astype(float)\n datas['hits'] = datas['hits'] + datas['hits_meitun']\n\n datas['start_time']=pd.to_datetime(datas['start_time'])\n datas['start_time_meitun'] = pd.to_datetime(datas['start_time_meitun'])\n\n #pandas 不能直接比较两列。\n\n datas['start_time'] = datas[['start_time', 'start_time_meitun']].max(axis=1)\n datas['shop_num']=datas['shop_num'].astype(float)\n datas['shop_num_meitun'] = datas['shop_num_meitun'].astype(float)\n datas['shop_num'] = datas[['shop_num','shop_num_meitun']].max(axis=1)\n # print(datas[datas['con_deal_id'] == '27253461'][['deal_id', 'shop_id', 'platform', 'dt', 'sales','pv']])\n conn = sqlalchemy.create_engine('mysql+pymysql://%(user)s:%(password)s@%(ip)s:%(port)s/o2o?charset=utf8' %tdatabase,\n connect_args={'charset': 'utf8'})\n datas = datas.reset_index()\n datas.drop(['index','click_count_meitun','start_time_meitun','hits_meitun','sales_meitun','shop_num_meitun','today_hits_meitun','pv_meitun'],\n axis=1, inplace=True)\n datas['platform']='美团+点评'\n datas.to_sql('tuangou_pet_source_data', conn, if_exists='append')", "sub_path": "pet_hostipal/source_from_meituan_and_dianping.py", "file_name": "source_from_meituan_and_dianping.py", "file_ext": "py", "file_size_in_byte": 3587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "arg_del.get_attr", "line_number": 13, "usage_type": "call"}, {"api_name": "arg_del.set_attr_def_value", "line_number": 14, "usage_type": "call"}, {"api_name": "arg_del.week_alter", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 19, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 21, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sql_name.sql_source_meituan_and_dianping", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "47548508", "text": "# coding: UTF-8\n# convert markdown to html\n# in order to get module attributes(argv)\nimport sys\n# markdown\n#import markdown\nimport gfm\n\n# get command line args\nargs = sys.argv\n\nfile_name = args[1]\nprint(\"file_name : \"+file_name)\n\n# markdown file の読み込み\nf = open(file_name, 'r', encoding='utf-8')\ntext_md = f.read()\nf.close()\n\nprint(\"************ markdown **************\")\nprint(text_md)\n\n# markdown file -> html file\n#md = markdown.Markdown()\n#html = md.convert(text_md)\nhtml = gfm.markdown(text_md)\n\nprint(\"************ html **************\")\nprint(html)\n", "sub_path": "src/convert-markdown/convert_markdown_html.py", "file_name": "convert_markdown_html.py", "file_ext": "py", "file_size_in_byte": 564, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "gfm.markdown", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "225553992", "text": "\"\"\"\nImplementation of double transformer\n\"\"\"\nimport torch\n\nfrom onmt.decoders import TransformerDecoder\nfrom onmt.encoders import TransformerEncoder\nfrom onmt.encoders.encoder import EncoderBase\n\nfrom torch import Tensor\nimport torch.nn.functional as f\n\nfrom datetime import datetime\nimport torch.nn as nn\n\nfrom onmt.encoders.encoder import EncoderBase\n\n\n\nclass DoubleTransformerEncoder(EncoderBase):\n \"\"\"The Transformer encoder from \"Attention is All You Need\"\n :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`\n\n .. mermaid::\n\n graph BT\n A[input]\n B[multi-head self-attn]\n C[feed forward]\n O[output]\n A --> B\n B --> C\n C --> O\n\n Args:\n num_layers (int): number of encoder layers\n d_model (int): size of the model\n heads (int): number of heads\n d_ff (int): size of the inner FF layer\n dropout (float): dropout parameters\n embeddings (onmt.modules.Embeddings):\n embeddings to use, should have positional encodings\n\n Returns:\n (torch.FloatTensor, torch.FloatTensor):\n\n * embeddings ``(src_len, batch_size, model_dim)``\n * memory_bank ``(src_len, batch_size, model_dim)``\n \"\"\"\n\n def __init__(self, opt, embeddings, tg_embeddings=None):\n super(DoubleTransformerEncoder, self).__init__()\n self.first_encoder = TransformerEncoder.from_opt(opt, embeddings, 0)\n self.decoder = TransformerDecoder.from_opt(opt, tg_embeddings)\n self.second_encoder = TransformerEncoder.from_opt(opt, tg_embeddings)\n self.bptt = False\n self.counter = 0\n\n @classmethod\n def from_opt(cls, opt, embeddings, tg_embeddings=None):\n \"\"\"Alternate constructor.\"\"\"\n return cls(opt, embeddings, tg_embeddings)\n\n\n def forward(self, src, lengths=None, dec_in=None, bptt=False):\n \"\"\"See :func:`EncoderBase.forward()`\"\"\"\n enc_state, memory_bank, lengths = self.first_encoder(src, lengths)\n if self.bptt is False:\n self.decoder.init_state(src, memory_bank, enc_state)\n\n dec_out1, attns = self.decoder(dec_in, memory_bank,\n memory_lengths=lengths,\n with_align=False)\n\n weights = self.decoder.embeddings.word_lut.weight # we need to multiply by the embeddings to C\n\n # multiply by weights(t) - to vocab dimensions\n dec_out = torch.tensordot(dec_out1, weights.t(), ([2], [0]))\n # gumbel softmax - choose the words we want from the vocab\n dec_out = nn.functional.gumbel_softmax(dec_out, tau=0.01, hard=True, dim=2)\n # multiply by weights back to embeddings dimensions\n dec_out = torch.tensordot(dec_out, weights, ([2], [0]))\n\n # lengths2 = (torch.ones(dec_out.shape[1]) * dec_out.shape[0]).long().to('cuda')\n # only 1 size batch\n lengths2 = torch.tensor([dec_out.shape[0]]).to('cuda')\n\n enc_state2, memory_bank2, lengths2 = self.second_encoder(dec_out, lengths2)\n\n return enc_state2, memory_bank2, lengths2, dec_out, dec_out1, attns\n\n def update_dropout(self, dropout, attention_dropout):\n self.embeddings.update_dropout(dropout)\n for layer in self.transformer:\n layer.update_dropout(dropout, attention_dropout)\n", "sub_path": "onmt/encoders/double_transformer.py", "file_name": "double_transformer.py", "file_ext": "py", "file_size_in_byte": 3311, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "onmt.encoders.encoder.EncoderBase", "line_number": 20, "usage_type": "name"}, {"api_name": "onmt.encoders.TransformerEncoder.from_opt", "line_number": 53, "usage_type": "call"}, {"api_name": "onmt.encoders.TransformerEncoder", "line_number": 53, "usage_type": "name"}, {"api_name": "onmt.decoders.TransformerDecoder.from_opt", "line_number": 54, "usage_type": "call"}, {"api_name": "onmt.decoders.TransformerDecoder", "line_number": 54, "usage_type": "name"}, {"api_name": "onmt.encoders.TransformerEncoder.from_opt", "line_number": 55, "usage_type": "call"}, {"api_name": "onmt.encoders.TransformerEncoder", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.tensordot", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn.functional.gumbel_softmax", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.tensordot", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "178125311", "text": "#!/usr/bin/env python3\n\nimport socket\nimport argparse\nimport sys\nimport select\n\ndef main():\n parser = argparse.ArgumentParser(description='ncd-server client')\n\n parser.add_argument('ip', nargs='?', default='localhost', help='ncd-server ip address')\n parser.add_argument('port', nargs='?', default=5123, type=int, help='ncd-server port')\n\n args = parser.parse_args()\n\n (family, type, proto, canonname, sockaddr) = socket.getaddrinfo(args.ip, args.port)[0];\n\n sock = socket.socket(family, type, proto)\n\n try:\n sock.connect(sockaddr)\n\n for line in sys.stdin:\n if not line:\n break\n\n if line == 'exit\\n' or line == 'quit\\n' or line == 'q\\n':\n break\n\n sock.send(bytes(line, 'utf-8'))\n response = sock.recv(4096)\n if not response:\n print(\"Connection closed.\")\n break\n print(response.decode('utf-8'), end='', flush=True)\n except KeyboardInterrupt:\n sys.exit(130)\n except ConnectionRefusedError:\n print(\"Connection refused.\")\n except ConnectionResetError:\n print(\"Connction closed.\")\n\n \n\n sock.close()\n\n\nif __name__ == '__main__':\n main()\n\n", "sub_path": "client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1235, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "socket.getaddrinfo", "line_number": 16, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "148525561", "text": "import numpy as np\nimport logging\nimport json\nimport pyperclip\nfrom time import time, perf_counter\n\nfrom qcodes import VisaInstrument\nfrom qcodes.instrument.parameter import Parameter\nfrom qcodes.utils import validators as vals\nfrom time import sleep\n\n\nlogger = logging.getLogger(__name__)\ncmdbase = \"TERM LF\\nFLSH\\nFLOQ\\n\"\n\n\nclass SIM928(Parameter):\n \"\"\"\n This is the parameter class for the SIM928 rechargeable isolated voltage source module\n\n Args:\n channel (int): SIM900 channel for the SIM928 module\n\n name (Optional[str]): Module name (default 'channel_{channel}')\n\n max_voltage (Optional[float]): Maximum voltage (default 20)\n \"\"\"\n\n def __init__(\n self,\n channel,\n name=None,\n max_voltage=20,\n step=0.001,\n inter_delay=0.035,\n t_recheck_cycles=3600,\n **kwargs\n ):\n if not name:\n name = \"channel_{}\".format(channel)\n\n self.t_last_cycle_check = None\n self.t_recheck_cycles = t_recheck_cycles\n self._latest_charge_cycles = None\n\n self.send_cmd = cmdbase + \"SNDT {:d} ,\".format(channel)\n\n super().__init__(\n name=name,\n unit=\"V\",\n get_cmd=self.get_voltage,\n set_cmd=self.send_cmd + '\"VOLT {:.4f}\"',\n step=step,\n inter_delay=inter_delay,\n vals=vals.Numbers(-max_voltage, max_voltage),\n **kwargs\n )\n self.channel = channel\n\n self._meta_attrs.extend([\"reset\", \"charge_cycles\"])\n\n @property\n def charge_cycles(self):\n if (\n self.t_last_cycle_check is None\n or time() - self.t_last_cycle_check < self.t_recheck_cycles\n ):\n\n self._instrument.write(self.send_cmd + '\"BIDN? CYCLES\"')\n sleep(0.08)\n return_str = self._instrument.ask(\"GETN?{:d},100\".format(self.channel))\n\n try:\n self._latest_charge_cycles = int(return_str.rstrip()[5:])\n except Exception:\n logger.warning(\"Return string not understood: \" + return_str)\n self._latest_charge_cycles = -1\n\n self.t_last_cycle_check = time()\n\n return self._latest_charge_cycles\n\n def get_voltage(self):\n \"\"\"\n Retrieves the DAC voltage.\n Note that there is a small delay, since two commands must be sent.\n\n Returns:\n Channel voltage\n \"\"\"\n # Two commands must be sent to the instrument to retrieve the channel voltage\n self._instrument.write(self.send_cmd + '\"VOLT?\"')\n # A small wait is needed before the actual voltage can be retrieved\n sleep(0.035)\n return_str = self._instrument.ask(\"GETN?{:d},100\".format(self.channel))\n for k in range(5):\n if return_str == \"#3000\\n\":\n logger.warning(\n \"Received return string {}, \"\n \"resetting SIM {}\".format(return_str, self.name)\n )\n self._instrument.reset_slot(self.channel)\n sleep(1)\n self._instrument.write(self.send_cmd + '\"VOLT?\"')\n sleep(1)\n return_str = self._instrument.ask(\"GETN?{:d},100\".format(self.channel))\n else:\n break\n return float(return_str[5:-3])\n\n\nclass SIM900(VisaInstrument):\n \"\"\"\n This is the qcodes driver for the Stanford Research SIM900.\n It is currently only programmed for DAC voltage sources.\n\n Args:\n name (str): name of the instrument.\n address (str): The GPIB address of the instrument.\n min_delay (float): Minimum delay between successive visa commands.\n The SIM900 is known to cause issues if there is no delay between\n successive commands.\n \"\"\"\n\n # Dictionary containing current module classes\n modules = {\"SIM928\": SIM928}\n\n def __init__(self, name, address, min_delay=0.03, **kwargs):\n super().__init__(name, address, **kwargs)\n\n # The SIM900 has eight channels\n self.number_of_channels = 8\n\n # Dictionary with (channel, module) elements\n self._modules = {}\n\n self._last_visa_command = None\n self.min_delay = min_delay\n\n # Start with empty list of channels. These are\n self.add_parameter(\n \"channels\",\n initial_value={},\n set_cmd=None,\n vals=vals.Anything(),\n snapshot_value=False,\n )\n\n def define_slot(self, channel, name=None, module=\"SIM928\", **kwargs):\n \"\"\"\n Define a module for a SIM900 slot.\n Args:\n channel (int): The SIM900 slot channel for the module\n name (Optional[str]): Module name (default 'channel_{channel}')\n module (Optional[str]): Module type (default 'SIM928)\n **kwargs: Module-specific kwargs, and StandardParameter kwargs\n\n Returns:\n None\n \"\"\"\n assert isinstance(channel, int), \"Channel {} must be an integer\".format(channel)\n assert (\n channel not in self.channels().keys()\n ), \"Channel {} already exists\".format(channel)\n assert module in self.modules.keys(), \"Module {} is not programmed\".format(\n module\n )\n\n parameter = self.add_parameter(\n name=name, channel=channel, parameter_class=self.modules[module], **kwargs\n )\n\n # Add\n channels = self.channels()\n channels[channel] = name\n self.channels(channels)\n\n return parameter\n\n def reset_slot(self, channel):\n self.write(cmdbase + \"SRST {}\".format(channel))\n\n def write(self, cmd: str) -> None:\n # Add a delay to ensure commands aren't sent too rapidly\n if self._last_visa_command is not None:\n dt = perf_counter() - self._last_visa_command\n if dt < self.min_delay:\n sleep(self.min_delay - dt)\n\n super().write(cmd)\n self._last_visa_command = perf_counter()\n\n def ask(self, cmd: str) -> str:\n # Add a delay to ensure commands aren't sent too rapidly\n if self._last_visa_command is not None:\n dt = perf_counter() - self._last_visa_command\n if dt < self.min_delay:\n sleep(self.min_delay - dt)\n\n result = super().ask(cmd)\n\n self._last_visa_command = perf_counter()\n\n return result\n\nvoltage_parameters = []\n\n\ndef get_voltages(copy=True):\n \"\"\" Get scaled parameter voltages as dict \"\"\"\n voltage_dict = {param.name: param() for param in voltage_parameters}\n if copy:\n voltage_json = json.dumps(voltage_dict)\n pyperclip.copy(voltage_json)\n return voltage_dict\n\n\ndef ramp_voltages(target_voltage=None, gate_names=None, delay=0.03, **kwargs):\n \"\"\"\n Ramp multiple gates in multiple steps.\n\n Note that voltage_parameters must contain the parameters to be varied\n\n Usage:\n ramp_voltages(target_voltage)\n Ramp voltages of all gates to target_voltage\n ramp_voltages(target_voltage, channels)\n Ramp voltages of gates with names in channels to target_voltage\n ramp_voltages(gate1=val1, gate2=val2, ...)\n Ramp voltage of gate1 to val1, gate2 to val2, etc.\n delay: Optional sleep after changing voltage in each gate\n\n Args:\n target_voltage (int): target voltage (can be omitted)\n gate_names (str list): Names of gates to be ramped (can be omitted)\n use_scaled: Use scaled SIM parameter (SIM900_scaled_parameters)\n **kwargs:\n\n Returns:\n None\n \"\"\"\n parameters = {param.name: param for param in voltage_parameters}\n\n if target_voltage is not None:\n if isinstance(target_voltage, dict):\n # Accidentally passed kwargs dict without splat\n kwargs = target_voltage\n else:\n if gate_names is None:\n gate_names = parameters.keys()\n target_voltages = {gate_name: target_voltage for gate_name in gate_names}\n elif kwargs:\n gate_names = kwargs.keys()\n target_voltages = {gate_name: val for gate_name, val in kwargs.items()}\n\n initial_voltages = {}\n for gate_name in gate_names:\n initial_voltages[gate_name] = parameters[gate_name]()\n\n if delay is not None:\n sleep(delay)\n\n for ratio in np.linspace(0, 1, 11):\n for gate_name in gate_names:\n voltage = (1 - ratio) * initial_voltages[\n gate_name\n ] + ratio * target_voltages[gate_name]\n parameters[gate_name](voltage)\n\n if delay is not None:\n sleep(delay)\n", "sub_path": "qcodes/instrument_drivers/stanford_research/SIM900.py", "file_name": "SIM900.py", "file_ext": "py", "file_size_in_byte": 8666, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "qcodes.instrument.parameter.Parameter", "line_number": 17, "usage_type": "name"}, {"api_name": "qcodes.utils.validators.Numbers", "line_number": 55, "usage_type": "call"}, {"api_name": "qcodes.utils.validators", "line_number": 55, "usage_type": "name"}, {"api_name": "time.time", "line_number": 66, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 70, "usage_type": "call"}, {"api_name": "time.time", "line_number": 79, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 103, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 105, "usage_type": "call"}, {"api_name": "qcodes.VisaInstrument", "line_number": 112, "usage_type": "name"}, {"api_name": "qcodes.utils.validators.Anything", "line_number": 145, "usage_type": "call"}, {"api_name": "qcodes.utils.validators", "line_number": 145, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 186, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 188, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 191, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 196, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 198, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 202, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 213, "usage_type": "call"}, {"api_name": "pyperclip.copy", "line_number": 214, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 263, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 271, "usage_type": "call"}]} +{"seq_id": "652086101", "text": "import arcade\n\nUPDATES_PER_FRAME = 7\n\n# Constants used to track if the player is facing left or right\nRIGHT_FACING = 0\nLEFT_FACING = 1\n\n# Game specific animation details\nANIMATION_TUPLES = {\n \"kingkrool\": [(\"idle\", 4), (\"walk\", 8), (\"attack\", 4)],\n}\nANIMATION_DEFAULT = [(\"idle\", 1)]\n\n\ndef load_texture_pair(filename):\n \"\"\" Load a texture pair, with the second being a mirror image. \"\"\"\n return [\n arcade.load_texture(filename),\n arcade.load_texture(filename, flipped_horizontally=True),\n ]\n\n\nclass AnimatedSprite(arcade.Sprite):\n def __init__(self, sprite_name, scale=1):\n \"\"\" sprite_name folder containing sprite, e.g. kingkrool \"\"\"\n super().__init__(scale=scale)\n animation_name_frames_tuples = ANIMATION_TUPLES.get(\n sprite_name, ANIMATION_DEFAULT\n )\n self.sprite_name = sprite_name\n\n # Default to face-right\n self.character_face_direction = RIGHT_FACING\n\n # Used for flipping between image sequences\n self.current_animation_name = \"idle\"\n self.current_frame = 0\n self.loop = True\n\n # Load textures\n self.animation_name_texture_pairs = {}\n for animation_name, frames in animation_name_frames_tuples:\n self.animation_name_texture_pairs[animation_name] = []\n animation_textures = self.animation_name_texture_pairs[animation_name]\n for i in range(frames):\n animation_textures.append(\n load_texture_pair(\n f\"images/npcs/{sprite_name}/{animation_name}{i}.png\"\n )\n )\n self.texture = self.get_current_animation()[0][self.character_face_direction]\n\n def update(self):\n super().update()\n # Figure out if we need to flip face left or right\n if self.change_x < 0 and self.character_face_direction == RIGHT_FACING:\n self.character_face_direction = LEFT_FACING\n elif self.change_x > 0 and self.character_face_direction == LEFT_FACING:\n self.character_face_direction = RIGHT_FACING\n\n # Animation\n self.current_frame += 1\n current_animation = self.get_current_animation()\n if self.current_frame // UPDATES_PER_FRAME >= len(current_animation):\n self.current_frame = 0\n if not self.loop:\n self.set_animation(\"idle\")\n self.texture = current_animation[self.current_frame // UPDATES_PER_FRAME][\n self.character_face_direction\n ]\n\n def get_current_animation(self):\n return self.animation_name_texture_pairs[self.current_animation_name]\n\n def get_current_animation_total_frames(self):\n return len(self.get_current_animation()) * UPDATES_PER_FRAME\n\n def set_animation(self, animation_name, loop=True):\n if not self.has_animation(animation_name):\n print(f\"Animation {animation_name} not in sprite {self.sprite_name}\")\n return\n if self.current_animation_name == animation_name:\n print(f\"Animation {animation_name} already active\")\n self.current_animation_name = animation_name\n self.current_frame = 0\n self.loop = loop\n\n def has_animation(self, animation_name):\n return animation_name in self.animation_name_texture_pairs\n", "sub_path": "abbot/ui/animated_sprite.py", "file_name": "animated_sprite.py", "file_ext": "py", "file_size_in_byte": 3314, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "arcade.load_texture", "line_number": 19, "usage_type": "call"}, {"api_name": "arcade.load_texture", "line_number": 20, "usage_type": "call"}, {"api_name": "arcade.Sprite", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "632074113", "text": "import kivy\nimport socket\n\nfrom kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.gridlayout import GridLayout\n\nred = [1,0,0,1]\ngreen = [0,1,0,1]\nblue = [0,0,1,1]\npurple = [1,0,1,1]\n\n\nHOST = '192.168.0.109' # The server's hostname or IP address\nPORT = 23 # The port used by the server\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n s.sendall(b'Hello, world')\n data = s.recv(1024)\n\nprint('Received', repr(data))\n\n\nclass MainApp(App):\n\n def build(self):\n\n # this code writing sucks but as mentioned i am way too lazy to write clean code.\n\n button_lightON = Button(text=\"Light ON\",background_color=red)\n \n button_lightON.bind(on_press=self.sendLightON)\n \n\n layout.add_widget(button_lightON)\n \n\n return layout\n\n def sendLightON(self,instance):\n print(\"light on\")\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n s.sendall(b'lightON')\n\n \n class Devices(App):\n def __init__(self, name ):\n self.name = name\n\n def build(self):\n layout = GridLayout(cols=1)\n\n\n\n\nif __name__ == '__main__':\n app = MainApp()\n app.run()", "sub_path": "ti_IOT/testing/testingmulticlass.py", "file_name": "testingmulticlass.py", "file_ext": "py", "file_size_in_byte": 1267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "socket.socket", "line_number": 17, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 17, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 17, "usage_type": "attribute"}, {"api_name": "kivy.app.App", "line_number": 25, "usage_type": "name"}, {"api_name": "kivy.uix.button.Button", "line_number": 31, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 43, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 43, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 43, "usage_type": "attribute"}, {"api_name": "kivy.app.App", "line_number": 48, "usage_type": "name"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "458277837", "text": "# Kenny Yip and Rafael Trinidad worked on this script\n\nfrom fractions import Fraction\ndef sublistRegulation(input, output_frame_count) -> list:\n assert isinstance(input, list)\n assert isinstance(output_frame_count, int)\n assert all(isinstance(x, float) or isinstance(x, int) for x in input)\n\n slopes = []\n offsets = []\n i = 0\n for _ in input:\n offsets.append(input[i])\n\n if i == len(input) - 1:\n slopes.append(0)\n else:\n slopes.append(input[i+1] - input[i])\n\n i += 1\n\n iterpolated_list = []\n step_count = Fraction(len(input)-1, output_frame_count-1)\n pointer = 0\n while pointer <= len(input)-1:\n current_index = int(pointer // 1)\n current_value = slopes[current_index] * (pointer-current_index) + offsets[current_index]\n iterpolated_list.append(current_value)\n pointer += step_count\n\n return iterpolated_list\n\ndef regulation(input_frames, output_frame_count) -> list:\n assert isinstance(input_frames, list)\n assert isinstance(output_frame_count, int)\n coordinates_per_frame = len(input_frames[0])\n assert all(len(x) == coordinates_per_frame for x in input_frames)\n master_list = []\n slices = []\n\n for s in range(coordinates_per_frame):\n sliced = []\n for i in range(0, len(input_frames)):\n sliced.append(input_frames[i][s])\n slices.append(sublistRegulation(sliced, output_frame_count))\n\n for j in range(output_frame_count):\n newFrames = []\n for t in range(coordinates_per_frame):\n newFrames.append(slices[t][j])\n master_list.append(newFrames)\n\n return master_list\n\nprint(\"OUTPUT: \", regulation([(1,2),(2,3),(3,4),(4,5),(5,6),(6,7),(7,8)],5))\n", "sub_path": "scripts/regulation_python.py", "file_name": "regulation_python.py", "file_ext": "py", "file_size_in_byte": 1712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "fractions.Fraction", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "370131350", "text": "#!/usr/bin/env python3\n# coding=utf-8\nimport logging\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom create_db import Base, File\nimport datetime\n\n\n# 操作数据库类\nclass Sql(object):\n # 连接根目录下的db_demo.db数据库,echo为显示日志\n engine = create_engine('sqlite:///db_demo.db', echo=False)\n # engine = create_engine('mysql+pymysql://root:root@localhost/pricemonitor?charset=utf8&autocommit=true')\n\n Base.metadata.bind = engine\n # 建立对话\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n\n # 调用这个方法时需要参数:user_name,email_address\n def write_file(self, file_name, file_path):\n # 实例化User类\n new_file = File(file_name=file_name, file_path=file_path)\n self.session.add(new_file)\n self.session.commit()\n\n def get_file_path(self):\n #返回数据库文件路径\n file_paths = self.session.query(File).all()\n file_path = {}\n i = 0\n for itm in file_paths:\n file_path[i] = itm.file_path\n i += 1\n return file_path\n\n\n''' :return\n # 调用这个方法时需要参数:item_info(list)\n def write_item(self, item_info): # item_id, user_price, user_id\n # 获取当前时间\n time_now = datetime.datetime.now()\n # 实例化Moinitor类\n new_item = Monitor(item_id=item_info[0], user_price=item_info[1], user_id=item_info[2], status=1,\n add_time=time_now, update_time=time_now)\n # 将new_item添加到数据库。\n self.session.add(new_item)\n self.session.commit()\n\n def read_all_not_updated_item(self):\n # 获取当前时间\n time_now = datetime.datetime.now()\n # 创建一个空的列表,名字为:items_need\n items_need = []\n # 获取Monitor表的所有内容,items_all(list)\n items_all = self.session.query(Monitor).all()\n\n # 循环取出list(items_all)的内容。\n for item_all in items_all:\n # 创建一个空的集合\n item_need = {}\n\n # 如果 item_all.status ==1\n if item_all.status == 1:\n # 时间差,当前时间天数减去上传到数据库时间的天数乘以86400加,当前时间秒数减去,上传到数据库秒数。\n time_delta = (time_now - item_all.update_time).days * 86400 + (time_now - item_all.update_time).seconds\n\n logging.info('%s\\'s time delta: %s', item_all.item_id, time_delta)\n # 如果 time_delta小于等于 600\n if time_delta >= UPDATE_TIME:\n # 将 item_all.column_id,item_all.item_id存入 item_need列表中。\n item_need['column_id'] = item_all.column_id\n item_need['item_id'] = item_all.item_id\n # 将 item_need,存入,items_need集合中。\n items_need.append(item_need)\n return items_need\n\n # update_item_name,需要传入参数,column_id,item_name\n def update_item_name(self, column_id, item_name):\n # 获取monitor表中 column_id.\n update_item = self.session.query(Monitor).get(column_id)\n update_item.item_name = item_name\n self.session.commit()\n\n def update_item_price(self, column_id, item_price):\n # 获取当前时间\n time_now = datetime.datetime.now()\n update_item = self.session.query(Monitor).get(column_id)\n\n if update_item.item_price and update_item.item_price != item_price: # if new price\n update_item.last_price = update_item.item_price\n update_item.discount = round(float(item_price) / float(update_item.last_price), 2) # round(,2) set to 0.01\n update_item.item_price = item_price\n update_item.update_time = time_now\n self.session.commit()\n\n def update_item_subtitle(self, column_id, subtitle):\n update_item = self.session.query(Monitor).get(column_id)\n update_item.subtitle = subtitle\n self.session.commit()\n\n def update_item_plus_price(self, column_id, plus_price):\n update_item = self.session.query(Monitor).get(column_id)\n update_item.plus_price = plus_price\n self.session.commit()\n\n def update_item_max_price(self, column_id, highest_price):\n update_item = self.session.query(Monitor).get(column_id)\n update_item.highest_price = highest_price\n self.session.commit()\n\n def update_item_min_price(self, column_id, lowest_price):\n update_item = self.session.query(Monitor).get(column_id)\n update_item.lowest_price = lowest_price\n self.session.commit()\n\n def update_status(self, column_id):\n update_item = self.session.query(Monitor).get(column_id)\n update_item.status = 0\n self.session.commit()\n\n def check_item_need_to_remind(self):\n # items_alert = {column_id, item_id, user_price, item_price, name, email}\n items_alert = []\n items = self.session.query(Monitor).all()\n for item in items:\n item_alert = {}\n if item.status == 1 and item.user_price:\n if float(item.user_price) > float(item.item_price):\n user = self.session.query(User).filter_by(column_id=item.user_id)\n item_alert['email'] = user[0].email\n item_alert['name'] = item.item_name\n item_alert['item_price'] = item.item_price\n item_alert['user_price'] = item.user_price\n item_alert['item_id'] = item.item_id\n item_alert['column_id'] = item.column_id\n items_alert.append(item_alert)\n return items_alert\n'''\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n sql = Sql()\n\n # add user named 'test'\n # sql.write_file('test', '1712340552')\n print(type(sql.get_file_path()))\n\n\n # add test item\n # item_id, user_price, user_id\n # sql.write_item(['100005857580', '2000', '1'])\n # sql.write_item(['7437690', '200', '2'])\n\n # read all items needed update\n # print(sql.read_all_not_updated_item())\n", "sub_path": "con_sql.py", "file_name": "con_sql.py", "file_ext": "py", "file_size_in_byte": 6211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 13, "usage_type": "call"}, {"api_name": "create_db.Base.metadata", "line_number": 16, "usage_type": "attribute"}, {"api_name": "create_db.Base", "line_number": 16, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 18, "usage_type": "call"}, {"api_name": "create_db.File", "line_number": 24, "usage_type": "call"}, {"api_name": "create_db.File", "line_number": 30, "usage_type": "argument"}, {"api_name": "logging.basicConfig", "line_number": 143, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 143, "usage_type": "attribute"}]} +{"seq_id": "445821072", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport json\nimport inspect\nimport logging\nimport itertools\n\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.views.generic import View\nfrom django.http.response import JsonResponse, HttpResponseBadRequest, HttpResponse\nfrom pyparsing import ParseException\nimport docutils\nimport docutils.core\n\nfrom django.views.decorators.cache import cache_page\n\nfrom hub.odhql import parser\nfrom hub.odhql.parser import OdhQLParser\nfrom hub.odhql.functions.core import OdhQLFunction\n\nlogger = logging.getLogger(__name__)\n\n\nclass ParseView(View):\n def post(self, request):\n try:\n body = json.loads(request.body, encoding=request.encoding)\n params = body['params']\n\n statement = params['query']\n logger.debug('Validating ODHQL query \"%s\"', statement)\n\n query = OdhQLParser().parse(statement)\n query = itertools.chain(*[q.data_sources for q in query.queries]) if \\\n isinstance(query, parser.Union) else query.data_sources\n\n data_sources = {'tables': [{'name': table.name, 'alias': table.alias} for table in query]}\n except ParseException as e:\n return JsonResponse({'error': e.message,\n 'type': 'parse',\n 'line': e.line,\n 'lineno': e.lineno,\n 'col': e.col},\n status=HttpResponseBadRequest.status_code)\n except MultiValueDictKeyError:\n return JsonResponse({'error': 'Es wurde keine ODHQL Abfrage angegeben.',\n 'type': 'execution'},\n status=HttpResponseBadRequest.status_code)\n\n return JsonResponse(data_sources)\n\n\nclass DocumentationView(View):\n def get(self, request):\n doc = \"\"\"\n OpenDataHub Query Language (ODHQL)\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n .. contents:: Inhalt\n :backlinks: top\n\n {}\n {}\n \"\"\"\n doc = inspect.cleandoc(doc).format(OdhQLParser.gen_doc(), OdhQLFunction.gen_all_docs())\n html = docutils.core.publish_parts(doc, writer_name='html', settings_overrides={'syntax_highlight': 'short'})[\n 'html_body']\n html = html.replace('href=\"#', ' 2:\n queryset = queryset.filter(place_name__istartswith = query)\n return queryset\n else:\n return None\n\n\nclass ServiceAreaSpatialData(APIView):\n # serializer_class = ServiceAreaSpatialSerializer\n\n def get(self, request, *args, **kwargs):\n input_serializer = ServiceAreaSpatialSearchSerializer(data=request.query_params)\n input_serializer.is_valid(raise_exception=True)\n input_data = input_serializer.validated_data\n service_id = input_data.get('service_id')\n service_area_objs = Service.objects.get(pk=service_id).service_areas.all()\n service_area_features = []\n for service_area_obj in service_area_objs:\n type = service_area_obj.type\n code = service_area_obj.code\n returned = []\n if type == 0:\n if code == \"XS\":\n returned.append(return_feature(type, \"S92000003\"))\n else:\n uk_codes = [\"S92000003\", \"E92000001\", \"W92000004\", \"N92000002\"]\n for uk_code in uk_codes:\n returned.append(return_feature(type, uk_code))\n else:\n returned.append(return_feature(type, code))\n\n service_area_features += returned\n queryset = list(service_area_features)\n return Response(queryset)\n\nclass ServiceAreaFullSpatialDataSet(APIView):\n def get(self, request, *args, **kwargs):\n\n dataset_name_keys = {\n 0: \"ctry17nm\",\n 2: \"lad18nm\",\n 3: \"HBName\",\n 4: \"HIAName\",\n }\n\n input_serializer = ServiceAreaFullSpatialDataSetSearchSerializer(data=request.query_params)\n input_serializer.is_valid(raise_exception=True)\n input_data = input_serializer.validated_data\n area_type = input_data.get('type')\n service_area_features = return_feature(area_type, 0, True)\n queryset = list(service_area_features)\n result = {\"name_key\": dataset_name_keys.get(area_type), \"data\": queryset}\n return Response(result)\n", "sub_path": "aliss/api/v4_views.py", "file_name": "v4_views.py", "file_ext": "py", "file_size_in_byte": 7980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "serializers.v4SearchSerializer", "line_number": 43, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 66, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 71, "usage_type": "call"}, {"api_name": "serializers.v4SearchSerializer", "line_number": 74, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 81, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 86, "usage_type": "call"}, {"api_name": "serializers.v4ServiceAreaSerializer", "line_number": 92, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 93, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 94, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 98, "usage_type": "call"}, {"api_name": "aliss.models.Category.objects.filter", "line_number": 104, "usage_type": "call"}, {"api_name": "aliss.models.Category.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "aliss.models.Category", "line_number": 104, "usage_type": "name"}, {"api_name": "serializers.v4CategorySerializer", "line_number": 108, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 109, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 110, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 113, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 116, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 120, "usage_type": "call"}, {"api_name": "aliss.models.Organisation", "line_number": 120, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 122, "usage_type": "call"}, {"api_name": "aliss.models.Organisation", "line_number": 122, "usage_type": "argument"}, {"api_name": "collections.OrderedDict", "line_number": 124, "usage_type": "call"}, {"api_name": "serializers.v4OrganisationDetailSerializer", "line_number": 125, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 126, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 129, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 133, "usage_type": "call"}, {"api_name": "aliss.models.Service", "line_number": 133, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 135, "usage_type": "call"}, {"api_name": "aliss.models.Service", "line_number": 135, "usage_type": "argument"}, {"api_name": "collections.OrderedDict", "line_number": 137, "usage_type": "call"}, {"api_name": "serializers.v4ServiceSerializer", "line_number": 138, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 139, "usage_type": "call"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 142, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 142, "usage_type": "name"}, {"api_name": "serializers.PostcodeLocationSerializer", "line_number": 143, "usage_type": "name"}, {"api_name": "aliss.models.Postcode.objects.exclude", "line_number": 146, "usage_type": "call"}, {"api_name": "aliss.models.Postcode.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "aliss.models.Postcode", "line_number": 146, "usage_type": "name"}, {"api_name": "serializers.PostcodeLocationSearchSerializer", "line_number": 150, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 164, "usage_type": "name"}, {"api_name": "serializers.ServiceAreaSpatialSearchSerializer", "line_number": 168, "usage_type": "call"}, {"api_name": "aliss.models.Service.objects.get", "line_number": 172, "usage_type": "call"}, {"api_name": "aliss.models.Service.objects", "line_number": 172, "usage_type": "attribute"}, {"api_name": "aliss.models.Service", "line_number": 172, "usage_type": "name"}, {"api_name": "aliss.search.return_feature", "line_number": 180, "usage_type": "call"}, {"api_name": "aliss.search.return_feature", "line_number": 184, "usage_type": "call"}, {"api_name": "aliss.search.return_feature", "line_number": 186, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 190, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 192, "usage_type": "name"}, {"api_name": "serializers.ServiceAreaFullSpatialDataSetSearchSerializer", "line_number": 202, "usage_type": "call"}, {"api_name": "aliss.search.return_feature", "line_number": 206, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 209, "usage_type": "call"}]} +{"seq_id": "124380285", "text": "import json\nimport requests\nfrom flask import Flask, render_template, request\nfrom flask_ask import Ask, statement, question\nfrom proto_time_db import start, stop\nfrom proto_eremote import temp_on, temp_off,hum_on, hum_off\nfrom tinydb import TinyDB,Query\n\ndb = TinyDB(\"db.json\")\nsensor = Query()\n\napp = Flask(__name__)\nask = Ask(app, '/')\n\n@app.route(\"/temp\", methods=['POST'])\ndef temp():\n temp = request.data.decode()\n db.update({\"value\":temp},sensor.key == \"temp\")\n return temp\n\n@app.route(\"/hum\", methods=['POST'])\ndef hum():\n hum = request.data.decode()\n db.update({\"value\":hum},sensor.key == \"hum\")\n return hum\n\n@ask.launch\ndef launched():\n text = \"はいプロトです。本を読む時は、読書をはじめるよって言ってね。\"\n return question(text)\n\n@ask.intent('readingStart')\ndef readingStart():\n start()\n t = float(db.search(sensor.key == \"temp\")[0][\"value\"])\n h = int(db.search(sensor.key == \"hum\")[0][\"value\"])\n print(\"temp:\" + str(t),\"hum:\" + str(h))\n if t < 20 and h < 60 :\n text = \"ちょっと寒いですね、暖房を点けますか���\"\n return question(\"読書をはじめます。\" + text)\n else:\n text = \"お部屋の温度はちょうどいいですね。\"\n return statement(\"読書をはじめます。\" + text)\n\n@ask.intent('readingEnd')\ndef readingEnd():\n r = stop()\n p1 = db.search(sensor.key == \"temp_power\")[0][\"value\"]\n p2 = db.search(sensor.key == \"hum_power\")[0][\"value\"]\n if p1 == 1:\n text = \"暖房を消しますか?\"\n return question('読書の時間は' + str(r) + 'でした。' + text)\n else:\n return statement('読書の時間は' + str(r) + 'でした。お疲れさまでした。')\n\n@ask.intent('onIntent')\ndef on():\n temp_on()\n db.update({\"value\":1},sensor.key == \"temp_power\")\n\n h = int(db.search(sensor.key == \"hum\")[0][\"value\"])\n if h < 60:\n hum_on()\n db.update({\"value\":1},sensor.key == \"hum_power\")\n else:\n pass\n return statement('暖房を入れました。快適に読書を楽しんでね。')\n\n@ask.intent('offIntent')\ndef off():\n temp_off()\n db.update({\"value\":0},sensor.key == \"temp_power\")\n\n p2 = db.search(sensor.key == \"hum_power\")[0][\"value\"]\n if p2 == 1:\n hum_off()\n db.update({\"value\":0},sensor.key == \"hum_power\")\n else:\n pass\n return statement('暖房を消しました。お疲れさまでした。')\n\n@ask.session_ended\ndef session_ended():\n return \"{}\", 200\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", port=5000)\n", "sub_path": "Chapter5/AmazonEcho_room/proto_a_room.py", "file_name": "proto_a_room.py", "file_ext": "py", "file_size_in_byte": 2596, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "tinydb.TinyDB", "line_number": 9, "usage_type": "call"}, {"api_name": "tinydb.Query", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_ask.Ask", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.data.decode", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.data.decode", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask_ask.question", "line_number": 30, "usage_type": "call"}, {"api_name": "proto_time_db.start", "line_number": 34, "usage_type": "call"}, {"api_name": "flask_ask.question", "line_number": 40, "usage_type": "call"}, {"api_name": "flask_ask.statement", "line_number": 43, "usage_type": "call"}, {"api_name": "proto_time_db.stop", "line_number": 47, "usage_type": "call"}, {"api_name": "flask_ask.question", "line_number": 52, "usage_type": "call"}, {"api_name": "flask_ask.statement", "line_number": 54, "usage_type": "call"}, {"api_name": "proto_eremote.temp_on", "line_number": 58, "usage_type": "call"}, {"api_name": "proto_eremote.hum_on", "line_number": 63, "usage_type": "call"}, {"api_name": "flask_ask.statement", "line_number": 67, "usage_type": "call"}, {"api_name": "proto_eremote.temp_off", "line_number": 71, "usage_type": "call"}, {"api_name": "proto_eremote.hum_off", "line_number": 76, "usage_type": "call"}, {"api_name": "flask_ask.statement", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "4834627", "text": "# -*- coding: utf-8 -*-\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport proto # type: ignore\n\nfrom google.cloud.servicedirectory_v1beta1.types import endpoint\n\n\n__protobuf__ = proto.module(\n package='google.cloud.servicedirectory.v1beta1',\n manifest={\n 'Service',\n },\n)\n\n\nclass Service(proto.Message):\n r\"\"\"An individual service. A service contains a name and optional\n metadata. A service must exist before\n [endpoints][google.cloud.servicedirectory.v1beta1.Endpoint] can be\n added to it.\n\n Attributes:\n name (str):\n Immutable. The resource name for the service in the format\n 'projects/*/locations/*/namespaces/*/services/*'.\n metadata (Sequence[google.cloud.servicedirectory_v1beta1.types.Service.MetadataEntry]):\n Optional. Metadata for the service. This data\n can be consumed by service clients. The entire\n metadata dictionary may contain up to 2000\n characters, spread across all key-value pairs.\n Metadata that goes beyond any these limits will\n be rejected.\n endpoints (Sequence[google.cloud.servicedirectory_v1beta1.types.Endpoint]):\n Output only. Endpoints associated with this\n service. Returned on LookupService.Resolve.\n Control plane clients should use\n RegistrationService.ListEndpoints.\n \"\"\"\n\n name = proto.Field(\n proto.STRING,\n number=1,\n )\n metadata = proto.MapField(\n proto.STRING,\n proto.STRING,\n number=2,\n )\n endpoints = proto.RepeatedField(\n proto.MESSAGE,\n number=3,\n message=endpoint.Endpoint,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n", "sub_path": "google/cloud/servicedirectory/v1beta1/servicedirectory-v1beta1-py/google/cloud/servicedirectory_v1beta1/types/service.py", "file_name": "service.py", "file_ext": "py", "file_size_in_byte": 2271, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "proto.module", "line_number": 21, "usage_type": "call"}, {"api_name": "proto.Message", "line_number": 29, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 53, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 54, "usage_type": "attribute"}, {"api_name": "proto.MapField", "line_number": 57, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 58, "usage_type": "attribute"}, {"api_name": "proto.STRING", "line_number": 59, "usage_type": "attribute"}, {"api_name": "proto.RepeatedField", "line_number": 62, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "google.cloud.servicedirectory_v1beta1.types.endpoint.Endpoint", "line_number": 65, "usage_type": "attribute"}, {"api_name": "google.cloud.servicedirectory_v1beta1.types.endpoint", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "411982832", "text": "import numpy as np\nfrom scipy import fftpack\nimport matplotlib.pyplot as plt\nfrom matplotlib.pylab import mpl\n\nmpl.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文\nmpl.rcParams['axes.unicode_minus'] = False # 显示负号\n\n\"\"\"\n1111111111111111111111111111111\n\"\"\"\n# 采样点选择1400个,因为设置的信号频率分量最高为600赫兹,\n# 根据采样定理知采样频率要大于信号频率2倍,所以这里设置采样频率为1400赫兹(即一秒内有1400个采样点,一样意思的)\nx = np.linspace(0, 1, 1400)\n\n# 设置需要采样的信号,频率分量有200,400和600\ny = 7 * np.sin(2 * np.pi * 200 * x) + 5 * np.sin(2 * np.pi * 400 * x) + 3 * np.sin(2 * np.pi * 600 * x)\n\nplt.figure()\nplt.plot(x, y)\nplt.title('原始波形')\n\nplt.figure()\nn = 50\nplt.plot(x[0:n], y[0:n])\nplt.title('原始部分波形(前50组样本)')\nplt.show()\n\n\"\"\"\n22222222222222222222222222\n\"\"\"\n\nfft_y = fftpack.fft(y)\nprint(len(fft_y))\nprint(fft_y[0:5])\n\n\"\"\"\n33333333333333333333333333333\n\"\"\"\nN = 1400\nx = np.arange(N) # 频率个数\n\nabs_y = np.abs(fft_y) # 取复数的绝对值,即复数的模(双边频谱)\nangle_y = np.angle(fft_y) # 取复数的角度\n\nplt.figure()\nplt.plot(x, abs_y)\nplt.title('双边振幅谱(未归一化)')\n\nplt.figure()\nplt.plot(x, angle_y)\nplt.title('双边相位谱(未归一化)')\nplt.show()\n\n\"\"\"\n444444444444444444444444444\n\"\"\"\nnormalization_y = abs_y / N # 归一化处理(双边频谱)\nplt.figure()\nplt.plot(x, normalization_y, 'g')\nplt.title('双边频谱(归一化)', fontsize=9, color='green')\nplt.show()\n\n\"\"\"\n55555555555555555555555555555\n\"\"\"\n\nhalf_x = x[range(int(N / 2))] # 取一半区间\nnormalization_half_y = normalization_y[range(int(N / 2))] # 由于对称性,只取一半区间(单边频谱)\nplt.figure()\nplt.plot(half_x, normalization_half_y, 'b')\nplt.title('单边频谱(归一化)', fontsize=9, color='blue')\nplt.show()\n", "sub_path": "fourier/demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 1914, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "matplotlib.pylab.mpl.rcParams", "line_number": 6, "usage_type": "attribute"}, {"api_name": "matplotlib.pylab.mpl", "line_number": 6, "usage_type": "name"}, {"api_name": "matplotlib.pylab.mpl.rcParams", "line_number": 7, "usage_type": "attribute"}, {"api_name": "matplotlib.pylab.mpl", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "scipy.fftpack.fft", "line_number": 33, "usage_type": "call"}, {"api_name": "scipy.fftpack", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.angle", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "301303911", "text": "import matplotlib.pyplot as plt\r\nimport sys\r\nfrom matplotlib import dates\r\n\r\nimport numpy as np\r\n\r\nimport datetime\r\nfilename = sys.argv[1]\r\nwith open(filename, 'r') as f:\r\n\tlines = f.readlines()\r\nlines = lines[5:]\r\nt1 = np.zeros(len(lines))\r\nt2 = np.zeros(len(lines))\r\nt3 = np.zeros(len(lines))\r\ntime = np.zeros(len(lines))\r\nfor i, line in enumerate(lines):\r\n\ttime[i] = int(line.split()[0])\r\n\tt1[i] = float(line.split()[1])\r\n\tt2[i] = float(line.split()[2])\r\n\tt3[i] = float(line.split()[3])\r\n\r\n\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(111)\r\nsecs = dates.epoch2num(time)\r\nplt.plot_date(secs, t1, 'r.', label='Ambient')\r\nplt.plot_date(secs, t2, 'b.', label='Plate')\r\nplt.plot_date(secs, t3, 'g.', label='Sample')\r\n\r\ndate_fmt = '%m/%d\\n%H:%M'\r\n# Use a DateFormatter to set the data to the correct format.\r\ndate_formatter = dates.DateFormatter(date_fmt)\r\nax.xaxis.set_major_formatter(date_formatter)\r\n\r\n# Sets the tick labels diagonal so they fit easier.\r\n#fig.autofmt_xdate()\r\n\r\nplt.xlabel('Time')\r\nplt.ylabel('Temperature (deg C)')\r\nplt.legend(loc='best')\r\n\r\nplt.show()\r\n", "sub_path": "logging/misc/fit_temp.py", "file_name": "fit_temp.py", "file_ext": "py", "file_size_in_byte": 1072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.dates.epoch2num", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot_date", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot_date", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot_date", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "21271587", "text": "# Copyright 2015 Metaswitch Networks\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUsage:\n calicoctl checksystem [--fix]\n\nDescription:\n Check for incompatibilities between calico and the host system\n\nOptions:\n --fix Allow calicoctl to attempt to correct any issues detected on the host\n\"\"\"\nimport sys\nimport re\nimport sh\n\nimport docker\nfrom requests import ConnectionError\n\nfrom utils import DOCKER_VERSION\nfrom utils import enforce_root\nfrom utils import sysctl\nfrom connectors import docker_client\n\ndef checksystem(arguments):\n \"\"\"\n Main dispatcher for checksystem commands. Calls the corresponding helper\n function. checksystem only has one main function, so we call that function\n directly.\n\n :param arguments: A dictionary of arguments already processed through\n this file's docstring with docopt\n :return: None\n \"\"\"\n check_system(fix=arguments[\"--fix\"], quit_if_error=True)\n\n\ndef check_system(fix=False, quit_if_error=False):\n \"\"\"\n Checks that the system is setup correctly. fix==True, this command will\n attempt to fix any issues it encounters. If any fixes fail, it will\n exit(1). Fix will automatically be set to True if the user specifies --fix\n at the command line.\n\n :param fix: if True, try to fix any system dependency issues that are\n detected.\n :param quit_if_error: if True, quit with error code 1 if any issues are\n detected, or if any fixes are unsuccesful.\n :return: True if all system dependencies are in the proper state, False if\n they are not. This function will sys.exit(1) instead of returning false if\n quit_if_error == True\n \"\"\"\n # modprobe and sysctl require root privileges.\n enforce_root()\n\n system_ok = (_check_kernel_modules(fix) and\n _check_ip_forwarding(fix) and\n _check_docker_version())\n\n if quit_if_error and not system_ok:\n sys.exit(1)\n\n return system_ok\n\n\ndef module_loaded(module):\n \"\"\"\n Checks if the specified kernel-module has been loaded.\n :param module: Name of the module to check\n :return: True if the module is loaded, False if not.\n \"\"\"\n return any(s.startswith(module) for s in open(\"/proc/modules\").readlines())\n\n\ndef normalize_version(version):\n \"\"\"\n This function convers a string representation of a version into\n a list of integer values.\n e.g.: \"1.5.10\" => [1, 5, 10]\n http://stackoverflow.com/questions/1714027/version-number-comparison\n \"\"\"\n return [int(x) for x in re.sub(r'(\\.0+)*$', '', version).split(\".\")]\n\n\ndef _check_kernel_modules(fix):\n \"\"\"\n Check system kernel modules\n :param fix: if True, try to fix any system dependency issues that are\n detected.\n :return: True if kernel modules are ok.\n \"\"\"\n modprobe = sh.Command._create('modprobe')\n ip6tables = sh.Command._create('ip6tables')\n system_ok = True\n try:\n ip6tables(\"-L\")\n except:\n if fix:\n try:\n modprobe('ip6_tables')\n except sh.ErrorReturnCode:\n print >> sys.stderr, \"ERROR: Could not enable ip6_tables.\"\n system_ok = False\n else:\n print >> sys.stderr, \"WARNING: Unable to detect the ip6_tables \" \\\n \"module. Load with `modprobe ip6_tables`\"\n system_ok = False\n\n for module in [\"xt_set\", \"ipip\"]:\n if not module_loaded(module):\n if fix:\n try:\n modprobe(module)\n except sh.ErrorReturnCode:\n print >> sys.stderr, \"ERROR: Could not enable %s.\" % module\n system_ok = False\n else:\n print >> sys.stderr, \"WARNING: Unable to detect the %s \" \\\n \"module. Load with `modprobe %s`\" % \\\n (module, module)\n system_ok = False\n return system_ok\n\n\ndef _check_ip_forwarding(fix):\n \"\"\"\n Check IP forwarding is enabled.\n :param fix: if True, try to fix any system dependency issues that are\n detected.\n :return: True if IP forwarding is ok.\n \"\"\"\n system_ok = True\n # Enable IP forwarding since all compute hosts are vRouters.\n # IPv4 forwarding should be enabled already by docker.\n if \"1\" not in sysctl(\"net.ipv4.ip_forward\"):\n if fix:\n if \"1\" not in sysctl(\"-w\", \"net.ipv4.ip_forward=1\"):\n print >> sys.stderr, \"ERROR: Could not enable ipv4 forwarding.\"\n system_ok = False\n else:\n print >> sys.stderr, \"WARNING: ipv4 forwarding is not enabled.\"\n system_ok = False\n\n if \"1\" not in sysctl(\"net.ipv6.conf.all.forwarding\"):\n if fix:\n if \"1\" not in sysctl(\"-w\", \"net.ipv6.conf.all.forwarding=1\"):\n print >> sys.stderr, \"ERROR: Could not enable ipv6 forwarding.\"\n system_ok = False\n else:\n print >> sys.stderr, \"WARNING: ipv6 forwarding is not enabled.\"\n system_ok = False\n return system_ok\n\n\ndef _check_docker_version():\n \"\"\"\n Check the Docker version is supported.\n\n :return: True if Docker version is OK.\n \"\"\"\n system_ok = True\n # Check docker version compatability\n try:\n info = docker_client.version()\n except ConnectionError:\n print >> sys.stderr, \"ERROR: Docker daemon not running.\"\n system_ok = False\n except docker.errors.APIError:\n print >> sys.stderr, \"ERROR: Docker server must support Docker \" \\\n \"Remote API v%s or greater.\" % DOCKER_VERSION\n system_ok = False\n else:\n api_version = normalize_version(info['ApiVersion'])\n # Check that API Version is above the minimum supported version\n if cmp(api_version, normalize_version(DOCKER_VERSION)) < 0:\n print >> sys.stderr, \"ERROR: Docker server must support Docker \" \\\n \"Remote API v%s or greater.\" % DOCKER_VERSION\n system_ok = False\n\n return system_ok\n", "sub_path": "calico_containers/calico_ctl/checksystem.py", "file_name": "checksystem.py", "file_ext": "py", "file_size_in_byte": 6546, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "utils.enforce_root", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 72, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 93, "usage_type": "call"}, {"api_name": "sh.Command._create", "line_number": 103, "usage_type": "call"}, {"api_name": "sh.Command", "line_number": 103, "usage_type": "attribute"}, {"api_name": "sh.Command._create", "line_number": 104, "usage_type": "call"}, {"api_name": "sh.Command", "line_number": 104, "usage_type": "attribute"}, {"api_name": "sh.ErrorReturnCode", "line_number": 112, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 113, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 116, "usage_type": "attribute"}, {"api_name": "sh.ErrorReturnCode", "line_number": 125, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 126, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 129, "usage_type": "attribute"}, {"api_name": "utils.sysctl", "line_number": 146, "usage_type": "call"}, {"api_name": "utils.sysctl", "line_number": 148, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 149, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 152, "usage_type": "attribute"}, {"api_name": "utils.sysctl", "line_number": 155, "usage_type": "call"}, {"api_name": "utils.sysctl", "line_number": 157, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 158, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 161, "usage_type": "attribute"}, {"api_name": "connectors.docker_client.version", "line_number": 175, "usage_type": "call"}, {"api_name": "connectors.docker_client", "line_number": 175, "usage_type": "name"}, {"api_name": "requests.ConnectionError", "line_number": 176, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 177, "usage_type": "attribute"}, {"api_name": "docker.errors", "line_number": 179, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 180, "usage_type": "attribute"}, {"api_name": "utils.DOCKER_VERSION", "line_number": 181, "usage_type": "name"}, {"api_name": "utils.DOCKER_VERSION", "line_number": 186, "usage_type": "argument"}, {"api_name": "sys.stderr", "line_number": 187, "usage_type": "attribute"}, {"api_name": "utils.DOCKER_VERSION", "line_number": 188, "usage_type": "name"}]} +{"seq_id": "524297126", "text": "# -*- coding: utf-8 -*-\n\"\"\"Cisco Identity Services Engine ACISettings API wrapper.\n\nCopyright (c) 2021 Cisco and/or its affiliates.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom builtins import *\n\nfrom past.builtins import basestring\n\nfrom ...pagination import get_next_page\nfrom ...restsession import RestSession\nfrom ...utils import (\n apply_path_params,\n check_type,\n dict_from_items_with_values,\n dict_of_str,\n)\n\n\nclass AciSettings(object):\n \"\"\"Identity Services Engine ACISettings API (version: 3.1.0).\n\n Wraps the Identity Services Engine ACISettings\n API and exposes the API as native Python\n methods that return native Python objects.\n\n | ACI Settings API allows the client to get and update the ACI Settings. In addition, testing the ACI Domain Manager connection is also possible using the TestACIConnection.\n\n **Revision History**\n\n +----------------+----------------------+-----------------------+---------------------------+\n | **Revision #** | **Resource Version** | **Cisco ISE Version** | **Description** |\n +----------------+----------------------+-----------------------+---------------------------+\n | 0 | 1.0 | 3.0 | Initial Cisco ISE Version |\n +----------------+----------------------+-----------------------+---------------------------+\n\n |\n\n **Resource Definition**\n\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | **Attribute** | **Type** | **Required** | **Description** | **Default Values** | **Example Values** |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | name | String | Yes | Resource Name | | AciSettings |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | id | String | No | Resource UUID value | | 29fb45ab-6a8e-4658-8a28-02521c258178 |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | description | String | No | | | Aci Settings |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | enableAci | Boolean | Yes | Enable ACI Integration | false | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | isAci50 | Boolean | Yes | Enable 5.0 ACI Version | false | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | ipAddressHostName | String | No | ACI Cluster IP Address / Host name | | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | adminName | String | No | ACI Cluster Admin name | | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | adminPassword | String | No | ACI Cluster Admin password | | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | tenantName | String | No | ACI Cluster Tenant name | ISE | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | l3RouteNetwork | String | No | ACI Cluster L3 Route network name | L3_ROUTE | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | suffixToEpg | String | No | Name Conversion - EPG suffix | SGT | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | suffixToSgt | String | No | Name Conversion - SGT suffix | EPG | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | allSxpDomain | Boolean | No | SXP Propagation to all the SXP domains | false | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | specificSxpDomain | Boolean | No | SXP Propagation to specific SXP domains | true | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | specifixSxpDomainList | List | No | Specific SXP domains list | [default] | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | isAci51 | Boolean | Yes | Enable 5.1 ACI Version | false | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | aciipaddress | String | No | ACI Domain manager Ip Address | | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | aciuserName | String | No | ACI Domain manager Username | | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | acipassword | String | No | ACI Domain manager Password | | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | enableDataPlane | Boolean | No | Enable data plane | false | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | untaggedPacketIepgName | String | No | Untagged IEPG packets name | untagged | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | defaultSgtName | String | No | Default SGT name | Unknown | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | enableElementsLimit | Boolean | No | Enable Elements Limit | false | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | maxNumIepgFromAci | Integer | No | Max number of IEPGs | 1000 | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n | maxNumSgtToAci | Integer | No | Max number of SGTs | 500 | |\n +------------------------+----------+--------------+-----------------------------------------+--------------------+--------------------------------------+\n\n \"\"\"\n\n def __init__(self, session, object_factory, request_validator):\n \"\"\"Initialize a new AciSettings\n object with the provided RestSession.\n\n Args:\n session(RestSession): The RESTful session object to be used for\n API calls to the Identity Services Engine service.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n\n \"\"\"\n check_type(session, RestSession)\n\n super(AciSettings, self).__init__()\n\n self._session = session\n self._object_factory = object_factory\n self._request_validator = request_validator\n\n def get_aci_settings(self,\n headers=None,\n **query_parameters):\n \"\"\"This API allows the client to get ACI Settings.\n\n Args:\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n \"\"\"\n check_type(headers, dict)\n\n if headers is not None:\n if 'Content-Type' in headers:\n check_type(headers.get('Content-Type'),\n basestring, may_be_none=False)\n if 'Accept' in headers:\n check_type(headers.get('Accept'),\n basestring, may_be_none=False)\n if 'ERS-Media-Type' in headers:\n check_type(headers.get('ERS-Media-Type'),\n basestring)\n if 'X-CSRF-Token' in headers:\n check_type(headers.get('X-CSRF-Token'),\n basestring)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n\n _params = {\n }\n _params.update(query_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n }\n\n e_url = ('/ers/config/acisettings')\n endpoint_full_url = apply_path_params(e_url, path_params)\n if with_custom_headers:\n _api_response = self._session.get(endpoint_full_url, params=_params,\n headers=_headers)\n else:\n _api_response = self._session.get(endpoint_full_url, params=_params)\n\n return self._object_factory('bpm_ea5c865993b56f48f7f43475294a20c_v3_1_0', _api_response)\n\n def get_all(self,\n headers=None,\n **query_parameters):\n \"\"\"Alias for `get_aci_settings <#ciscoisesdk.\n api.v3_1_0.aci_settings.\n AciSettings.get_aci_settings>`_\n \"\"\"\n return self.get_aci_settings(\n headers=headers,\n **query_parameters\n )\n\n def test_aci_connectivity(self,\n headers=None,\n **query_parameters):\n \"\"\"This API allows the client to test ACI Domain Manager\n connection.\n\n Args:\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n \"\"\"\n check_type(headers, dict)\n\n if headers is not None:\n if 'Content-Type' in headers:\n check_type(headers.get('Content-Type'),\n basestring, may_be_none=False)\n if 'Accept' in headers:\n check_type(headers.get('Accept'),\n basestring, may_be_none=False)\n if 'ERS-Media-Type' in headers:\n check_type(headers.get('ERS-Media-Type'),\n basestring)\n if 'X-CSRF-Token' in headers:\n check_type(headers.get('X-CSRF-Token'),\n basestring)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n\n _params = {\n }\n _params.update(query_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n }\n\n e_url = ('/ers/config/acisettings/testACIConnectivity')\n endpoint_full_url = apply_path_params(e_url, path_params)\n\n if with_custom_headers:\n _api_response = self._session.put(endpoint_full_url, params=_params,\n headers=_headers)\n else:\n _api_response = self._session.put(endpoint_full_url, params=_params)\n\n return self._object_factory('bpm_b155c91eec153338302d492db1afb80_v3_1_0', _api_response)\n\n def update_aci_settings_by_id(self,\n id,\n aci50=None,\n aci51=None,\n aciipaddress=None,\n acipassword=None,\n aciuser_name=None,\n admin_name=None,\n admin_password=None,\n all_sxp_domain=None,\n default_sgt_name=None,\n enable_aci=None,\n enable_data_plane=None,\n enable_elements_limit=None,\n ip_address_host_name=None,\n l3_route_network=None,\n max_num_iepg_from_aci=None,\n max_num_sgt_to_aci=None,\n specific_sxp_domain=None,\n specifix_sxp_domain_list=None,\n suffix_to_epg=None,\n suffix_to_sgt=None,\n tenant_name=None,\n untagged_packet_iepg_name=None,\n headers=None,\n payload=None,\n active_validation=True,\n **query_parameters):\n \"\"\"This API allows the client to update ACI settings.\n\n Args:\n aci50(boolean): Enable 5.0 ACI Version, property of the\n request body.\n aci51(boolean): Enable 5.1 ACI Version, property of the\n request body.\n aciipaddress(string): ACI Domain manager Ip Address.,\n property of the request body.\n acipassword(string): ACI Domain manager Password.,\n property of the request body.\n aciuser_name(string): ACI Domain manager Username.,\n property of the request body.\n admin_name(string): ACI Cluster Admin name, property of\n the request body.\n admin_password(string): ACI Cluster Admin password,\n property of the request body.\n all_sxp_domain(boolean): allSxpDomain, property of the\n request body.\n default_sgt_name(string): defaultSgtName, property of\n the request body.\n enable_aci(boolean): Enable ACI Integration, property of\n the request body.\n enable_data_plane(boolean): enableDataPlane, property of\n the request body.\n enable_elements_limit(boolean): enableElementsLimit,\n property of the request body.\n id(string): Resource UUID value, property of the request\n body.\n ip_address_host_name(string): ACI Cluster IP Address /\n Host name, property of the request body.\n l3_route_network(string): l3RouteNetwork, property of\n the request body.\n max_num_iepg_from_aci(integer): maxNumIepgFromAci,\n property of the request body.\n max_num_sgt_to_aci(integer): maxNumSgtToAci, property of\n the request body.\n specific_sxp_domain(boolean): specificSxpDomain,\n property of the request body.\n specifix_sxp_domain_list(list): specifixSxpDomainList,\n property of the request body (list of\n strings).\n suffix_to_epg(string): suffixToEpg, property of the\n request body.\n suffix_to_sgt(string): suffixToSgt, property of the\n request body.\n tenant_name(string): tenantName, property of the request\n body.\n untagged_packet_iepg_name(string):\n untaggedPacketIepgName, property of the\n request body.\n id(basestring): id path parameter.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n payload(dict): A JSON serializable Python object to send in the\n body of the Request.\n active_validation(bool): Enable/Disable payload validation.\n Defaults to True.\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n \"\"\"\n check_type(headers, dict)\n\n if headers is not None:\n if 'Content-Type' in headers:\n check_type(headers.get('Content-Type'),\n basestring, may_be_none=False)\n if 'Accept' in headers:\n check_type(headers.get('Accept'),\n basestring, may_be_none=False)\n if 'ERS-Media-Type' in headers:\n check_type(headers.get('ERS-Media-Type'),\n basestring)\n if 'X-CSRF-Token' in headers:\n check_type(headers.get('X-CSRF-Token'),\n basestring)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n is_xml_payload = 'application/xml' in _headers.get('Content-Type', [])\n if active_validation and is_xml_payload:\n check_type(payload, basestring)\n if active_validation and not is_xml_payload:\n check_type(payload, dict)\n check_type(id, basestring,\n may_be_none=False)\n\n _params = {\n }\n _params.update(query_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n 'id': id,\n }\n if is_xml_payload:\n _payload = payload\n else:\n _tmp_payload = {\n 'id':\n id,\n 'enableAci':\n enable_aci,\n 'ipAddressHostName':\n ip_address_host_name,\n 'adminName':\n admin_name,\n 'adminPassword':\n admin_password,\n 'aciipaddress':\n aciipaddress,\n 'aciuserName':\n aciuser_name,\n 'acipassword':\n acipassword,\n 'tenantName':\n tenant_name,\n 'l3RouteNetwork':\n l3_route_network,\n 'suffixToEpg':\n suffix_to_epg,\n 'suffixToSgt':\n suffix_to_sgt,\n 'allSxpDomain':\n all_sxp_domain,\n 'specificSxpDomain':\n specific_sxp_domain,\n 'specifixSxpDomainList':\n specifix_sxp_domain_list,\n 'enableDataPlane':\n enable_data_plane,\n 'untaggedPacketIepgName':\n untagged_packet_iepg_name,\n 'defaultSgtName':\n default_sgt_name,\n 'enableElementsLimit':\n enable_elements_limit,\n 'maxNumIepgFromAci':\n max_num_iepg_from_aci,\n 'maxNumSgtToAci':\n max_num_sgt_to_aci,\n 'aci50':\n aci50,\n 'aci51':\n aci51,\n }\n _payload = {\n 'AciSettings': dict_from_items_with_values(_tmp_payload)\n }\n _payload.update(payload or {})\n _payload = dict_from_items_with_values(_payload)\n if active_validation and not is_xml_payload:\n self._request_validator('jsd_cea2e785ee57908a9ee3b118e49cfa_v3_1_0')\\\n .validate(_payload)\n\n e_url = ('/ers/config/acisettings/{id}')\n endpoint_full_url = apply_path_params(e_url, path_params)\n\n request_params = {'data': _payload} if is_xml_payload else {'json': _payload}\n if with_custom_headers:\n _api_response = self._session.put(endpoint_full_url, params=_params,\n headers=_headers,\n **request_params)\n\n else:\n _api_response = self._session.put(endpoint_full_url, params=_params,\n **request_params)\n\n return self._object_factory('bpm_cea2e785ee57908a9ee3b118e49cfa_v3_1_0', _api_response)\n\n def update_by_id(self,\n id,\n aci50=None,\n aci51=None,\n aciipaddress=None,\n acipassword=None,\n aciuser_name=None,\n admin_name=None,\n admin_password=None,\n all_sxp_domain=None,\n default_sgt_name=None,\n enable_aci=None,\n enable_data_plane=None,\n enable_elements_limit=None,\n ip_address_host_name=None,\n l3_route_network=None,\n max_num_iepg_from_aci=None,\n max_num_sgt_to_aci=None,\n specific_sxp_domain=None,\n specifix_sxp_domain_list=None,\n suffix_to_epg=None,\n suffix_to_sgt=None,\n tenant_name=None,\n untagged_packet_iepg_name=None,\n headers=None,\n payload=None,\n active_validation=True,\n **query_parameters):\n \"\"\"Alias for `update_aci_settings_by_id <#ciscoisesdk.\n api.v3_1_0.aci_settings.\n AciSettings.update_aci_settings_by_id>`_\n \"\"\"\n return self.update_aci_settings_by_id(\n id=id,\n aci50=aci50,\n aci51=aci51,\n aciipaddress=aciipaddress,\n acipassword=acipassword,\n aciuser_name=aciuser_name,\n admin_name=admin_name,\n admin_password=admin_password,\n all_sxp_domain=all_sxp_domain,\n default_sgt_name=default_sgt_name,\n enable_aci=enable_aci,\n enable_data_plane=enable_data_plane,\n enable_elements_limit=enable_elements_limit,\n ip_address_host_name=ip_address_host_name,\n l3_route_network=l3_route_network,\n max_num_iepg_from_aci=max_num_iepg_from_aci,\n max_num_sgt_to_aci=max_num_sgt_to_aci,\n specific_sxp_domain=specific_sxp_domain,\n specifix_sxp_domain_list=specifix_sxp_domain_list,\n suffix_to_epg=suffix_to_epg,\n suffix_to_sgt=suffix_to_sgt,\n tenant_name=tenant_name,\n untagged_packet_iepg_name=untagged_packet_iepg_name,\n payload=payload,\n active_validation=active_validation,\n headers=headers,\n **query_parameters\n )\n\n def get_version(self,\n headers=None,\n **query_parameters):\n \"\"\"This API helps to retrieve the version information related to\n the Cisco ACI settings.\n\n Args:\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **query_parameters: Additional query parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n\n RestResponse: REST response with following properties:\n\n - headers(MyDict): response headers.\n - response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation\n or the bracket notation.\n - content(bytes): representation of the request's response\n - text(str): representation of the request's response\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the Identity Services Engine cloud returns an error.\n \"\"\"\n check_type(headers, dict)\n\n if headers is not None:\n if 'Content-Type' in headers:\n check_type(headers.get('Content-Type'),\n basestring, may_be_none=False)\n if 'Accept' in headers:\n check_type(headers.get('Accept'),\n basestring, may_be_none=False)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n\n _params = {\n }\n _params.update(query_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n }\n\n e_url = ('/ers/config/acisettings/versioninfo')\n endpoint_full_url = apply_path_params(e_url, path_params)\n if with_custom_headers:\n _api_response = self._session.get(endpoint_full_url, params=_params,\n headers=_headers)\n else:\n _api_response = self._session.get(endpoint_full_url, params=_params)\n\n return self._object_factory('bpm_ea47f65521bcf0ab949b5d72b5_v3_1_0', _api_response)\n", "sub_path": "ciscoisesdk/api/v3_1_0/aci_settings.py", "file_name": "aci_settings.py", "file_ext": "py", "file_size_in_byte": 31358, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "utils.check_type", "line_number": 131, "usage_type": "call"}, {"api_name": "restsession.RestSession", "line_number": 131, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 165, "usage_type": "call"}, {"api_name": "utils.check_type", "line_number": 169, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 170, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 172, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 173, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 175, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 176, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 178, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 179, "usage_type": "argument"}, {"api_name": "utils.dict_of_str", "line_number": 184, "usage_type": "call"}, {"api_name": "utils.dict_from_items_with_values", "line_number": 190, "usage_type": "call"}, {"api_name": "utils.apply_path_params", "line_number": 196, "usage_type": "call"}, {"api_name": "utils.check_type", "line_number": 244, "usage_type": "call"}, {"api_name": "utils.check_type", "line_number": 248, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 249, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 251, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 252, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 254, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 255, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 257, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 258, "usage_type": "argument"}, {"api_name": "utils.dict_of_str", "line_number": 263, "usage_type": "call"}, {"api_name": "utils.dict_from_items_with_values", "line_number": 269, "usage_type": "call"}, {"api_name": "utils.apply_path_params", "line_number": 275, "usage_type": "call"}, {"api_name": "utils.check_type", "line_number": 389, "usage_type": "call"}, {"api_name": "utils.check_type", "line_number": 393, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 394, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 396, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 397, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 399, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 400, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 402, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 403, "usage_type": "argument"}, {"api_name": "utils.dict_of_str", "line_number": 408, "usage_type": "call"}, {"api_name": "utils.check_type", "line_number": 412, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 412, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 414, "usage_type": "call"}, {"api_name": "utils.check_type", "line_number": 415, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 415, "usage_type": "argument"}, {"api_name": "utils.dict_from_items_with_values", "line_number": 421, "usage_type": "call"}, {"api_name": "utils.dict_from_items_with_values", "line_number": 478, "usage_type": "call"}, {"api_name": "utils.dict_from_items_with_values", "line_number": 481, "usage_type": "call"}, {"api_name": "utils.apply_path_params", "line_number": 487, "usage_type": "call"}, {"api_name": "utils.check_type", "line_number": 590, "usage_type": "call"}, {"api_name": "utils.check_type", "line_number": 594, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 595, "usage_type": "argument"}, {"api_name": "utils.check_type", "line_number": 597, "usage_type": "call"}, {"api_name": "past.builtins.basestring", "line_number": 598, "usage_type": "argument"}, {"api_name": "utils.dict_of_str", "line_number": 603, "usage_type": "call"}, {"api_name": "utils.dict_from_items_with_values", "line_number": 609, "usage_type": "call"}, {"api_name": "utils.apply_path_params", "line_number": 615, "usage_type": "call"}]} +{"seq_id": "253152141", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport asyncio\nimport random\n\nimport aiohttp\nfrom lxml import etree\n\nfrom data import agents\nimport setting\nimport logging\n\nclass BaseSpider():\n\n def __init__(self):\n self.urls = []\n self.next= None\n self.loop = asyncio.get_event_loop()\n self.idle = 5\n\n def run(self):\n logging.debug(\"Running spider [%s] now!\" % (type(self).__name__))\n results = []\n tasks = [self._feth(results, u) for u in self.urls]\n self.loop.run_until_complete(asyncio.gather(*tasks))\n\n while self.next:\n next_url = self.next\n self.next = None\n self.loop.run_until_complete(self._feth(results, next_url))\n\n return results\n\n def _headers(self):\n number = random.randint(0, len(agents)-1)\n return {'user-agent': agents[number]}\n\n async def _feth(self, results, url):\n logging.debug(\"Fetching page [%s] by spider [%s].\" % (url, type(self).__name__))\n try:\n html = None\n async with aiohttp.ClientSession() as session:\n async with session.get(url, headers=self._headers(), proxy=setting.spider_proxy) as response:\n html = await response.text()\n\n self._parse(results, html)\n\n except Exception as e:\n logging.error(\"Fetch page error:%s by spider [%s]\" % (e, type(self).__name__))\n\n def _parse(self, results, text):\n pass\n", "sub_path": "src/spiders/baseSpider.py", "file_name": "baseSpider.py", "file_ext": "py", "file_size_in_byte": 1471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "asyncio.get_event_loop", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 23, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 26, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "data.agents", "line_number": 36, "usage_type": "argument"}, {"api_name": "data.agents", "line_number": 37, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 40, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 43, "usage_type": "call"}, {"api_name": "setting.spider_proxy", "line_number": 44, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "652572886", "text": "import bpy\n\n\n__all__ = [\n \"delete_all_data\",\n]\n\n\ndef delete_all_data():\n \"\"\"Delete all collections, mesh and curve objects, meshes, curves, materials.\"\"\"\n for collection in bpy.data.collections:\n bpy.data.collections.remove(collection)\n for obj in bpy.data.objects:\n if obj.type == 'MESH':\n bpy.data.objects.remove(obj)\n elif obj.type == 'CURVE':\n bpy.data.objects.remove(obj)\n for mesh in bpy.data.meshes:\n bpy.data.meshes.remove(mesh)\n for curve in bpy.data.curves:\n bpy.data.curves.remove(curve)\n for material in bpy.data.materials:\n bpy.data.materials.remove(material)\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == '__main__':\n pass\n", "sub_path": "src/compas_blender/utilities/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 866, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "bpy.data", "line_number": 11, "usage_type": "attribute"}, {"api_name": "bpy.data.collections.remove", "line_number": 12, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 12, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 13, "usage_type": "attribute"}, {"api_name": "bpy.data.objects.remove", "line_number": 15, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 15, "usage_type": "attribute"}, {"api_name": "bpy.data.objects.remove", "line_number": 17, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 17, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 18, "usage_type": "attribute"}, {"api_name": "bpy.data.meshes.remove", "line_number": 19, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 19, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 20, "usage_type": "attribute"}, {"api_name": "bpy.data.curves.remove", "line_number": 21, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 21, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 22, "usage_type": "attribute"}, {"api_name": "bpy.data.materials.remove", "line_number": 23, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "337349760", "text": "import collections\n\n\nPoint = collections.namedtuple(\"Point\", \"y x\")\nPoint._delta_udlr = (\n Point(-1, 0),\n Point(1, 0),\n Point(0, -1),\n Point(0, 1)\n)\n\n\ndef _Point____add__(self, other):\n return Point(self.y + other.y,\n self.x + other.x)\n\n\ndef _Point__udlr(self):\n \"\"\"Get the four next points. UDLR means Up, Down, Left and Right.\"\"\"\n return [self + delta for delta in self._delta_udlr]\n\n\nPoint.__add__ = _Point____add__\nPoint.udlr = _Point__udlr\n\n\ndef def_getter(cls, attr_name: str):\n \"\"\"\n Define a getter property of a given class.\n\n e.g.\n In below code, code 1 is same as code 2.\n\n # Code 1\n def_getter(C, '_foo')\n\n # Code 2\n class C:\n @property\n def foo(self):\n return self._foo\n\n \"\"\"\n prop_name = attr_name.lstrip(\"_\")\n\n def getter(self):\n return getattr(self, attr_name)\n\n setattr(cls, prop_name, property(getter))\n", "sub_path": "mylib/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 925, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "collections.namedtuple", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "244551776", "text": "#!/usr/python3.5\nfrom kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.graphics import *\n# codigo ainda com falhas para corrigir\n\ncores_hex = [\n 'FF9E73', 'FF7B40', 'FF4F00', 'BF5D30', 'A63400',\n 'FFD473', 'FFC540', 'FFB100', 'BF9430', 'A67300',\n '67E467', '39E444', '00C90D', '26972D', '008209',\n '64AAD0', '3C9DD0', '086CA2', '235B79', '034569',\n '717DD7', '4B5CD7', '1729B0', '2E3884', '081472',\n '926CD6', '7945D6', '4811AE', '492A82', '2C0571',\n 'FF7673', 'FF4540', 'FF0700', 'BF3330', 'A60400',\n 'FFFFFF', '999999', '000000'\n\n ]\n\ndef hex2rgb (cor):\n cor = cor.lstrip('#')\n return (int(cor[:2], 16)/255., int(cor[2:4], 16)/255., int(cor[:4], 16)/255., 1.)\n\ncores = [hex2rgb(cor) for cor in cores_hex]\n\nclass DesenhoWidget (Widget):\n def on_touch (self, touch):\n with self.canvas:\n Color(*self.cor, mode='rgba')\n touch.ud['line'] = Line(points=(touch.x, touch.y), width=self.brush)\n\n def on_touch_move (self, touch):\n if 'line' in touch.ud:\n touch.ud['line'].points += [touch.x, touch.y]\n\nclass DesenhoApp (App):\n def build(self):\n tela = BoxLayout(orientation='horizontal')\n controles = BoxLayout(orientation='vertical', size_hint=[.1, 1.])\n palheta = GridLayout(cols=2, spacing=0)\n\n def define_cor (instance):\n desenho.cor = list(instance.background_color)\n desenho.cor[-1] = .6\n limpara.color = desenho.cor\n limparb.color = desenho.cor\n\n for cor in cores:\n cw = Button(text=' ', background_color=cor, background_normal='')\n cw.bind(on_press=define_cor)\n palheta.add_widget(cw)\n\n desenho = DesenhoWidget()\n desenho.cor = list(cores[0])\n desenho.cor[-1] = .6\n desenho.brush = 5.\n tela.add_widget(desenho)\n limpara = Button(text='X', background_color=(0., 0., 0., 1.), color=desenho.cor, background_normal='')\n palheta.add_widget(limpara)\n\n limparb = Button(text='X', background_color=(1., 1., 1., 1.), color=desenho.cor, background_normal='')\n palheta.add_widget(limparb)\n controles.add_widget(palheta)\n tela.add_widget(controles)\n\n def limpar1 (obj):\n desenho.canvas.clear()\n limpara.bind(on_release=limpar1)\n\n def limpar2 (obj):\n with desenho.canvas:\n Color(1., 1., 1., 1., mode='rgba')\n Rectangle(pos=desenho.pos, size=desenho.size)\n limparb.bind(on_release=limpar2)\n return tela\n\nif __name__ == '__main__':\n DesenhoApp().run()\n\n", "sub_path": "livro/24.01_desenho/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2776, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "kivy.uix.widget.Widget", "line_number": 28, "usage_type": "name"}, {"api_name": "kivy.app.App", "line_number": 38, "usage_type": "name"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 40, "usage_type": "call"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 41, "usage_type": "call"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 42, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 51, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 60, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "217930513", "text": "#!/usr/bin/env python2\n\n\ndef parse_command(instr, bot):\n if instr.startswith('action move'):\n time = int(instr.split(' ')[-1])\n x, y = bot.make_move(time)\n return 'place_move %d %d\\n' % (x, y)\n elif instr.startswith('update game field'):\n fstr = instr.split(' ')[-1]\n bot.update_currentboard(fstr)\n elif instr.startswith('update game macroboard'):\n mbstr = instr.split(' ')[-1]\n bot.update_macroboard(mbstr)\n elif instr.startswith('update game move'):\n bot.set_movenumb(int(instr.split(' ')[-1]))\n elif instr.startswith('settings your_botid'):\n myid = int(instr.split(' ')[-1])\n bot.myid(myid)\n bot.myid = myid\n bot.oppid = 1 if myid == 2 else 2\n elif instr.startswith('settings timebank'):\n bot.timebank = int(instr.split(' ')[-1])\n elif instr.startswith('settings time_per_move'):\n bot.time_per_move = int(instr.split(' ')[-1])\n return ''\n\nif __name__ == '__main__':\n import sys\n from scorebot import ScoreBot\n import logging\n import socket\n\n if 'f4hy' in socket.gethostname() or 'fahy' in socket.gethostname():\n logging.basicConfig(format='SCOREBOT %(levelname)s: %(message)s', level=logging.DEBUG)\n root = logging.getLogger()\n errfilename = \"test\"+\".err\"\n errfilehandler = logging.FileHandler(errfilename, delay=True)\n errfilehandler.setLevel(logging.WARNING)\n formatter = logging.Formatter('SCOREBOT %(levelname)s: %(message)s')\n errfilehandler.setFormatter(formatter)\n root.addHandler(errfilehandler)\n logfilename = \"test\"+\".log\"\n logfilehandler = logging.FileHandler(logfilename, delay=True)\n logfilehandler.setLevel(logging.DEBUG)\n formatter = logging.Formatter('SCOREBOT %(levelname)s: %(message)s')\n logfilehandler.setFormatter(formatter)\n root.addHandler(logfilehandler)\n\n logging.info(\"starting logging\")\n\n bot = ScoreBot()\n\n while True:\n try:\n instr = raw_input()\n logging.info(\"instr {}\".format(instr))\n except EOFError as e:\n logging.warn(\"given EOF exiting\")\n sys.stdout.flush()\n exit(-1)\n except Exception as e:\n logging.warn('error reading input {}, {}'.format(e, type(e)))\n sys.stderr.write('error reading input')\n raise e\n outstr = parse_command(instr, bot)\n sys.stdout.write(outstr)\n sys.stdout.flush()\n", "sub_path": "scorebot/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2498, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "socket.gethostname", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 44, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 45, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 46, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 50, "usage_type": "call"}, {"api_name": "scorebot.ScoreBot", "line_number": 52, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 60, "usage_type": "attribute"}, {"api_name": "logging.warn", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 67, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "250915138", "text": "import networkx as nx\nfrom matplotlib import pyplot as plt\nimport os \nG = nx.read_pajek('weighted_hero_network.txt')\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\ndef get_color(arg,name):\n '''\n Determines the color code for the given node\n Inputs:\n arg: (str) \n name: (str)\n Outputs g for green, r for red\n '''\n if name == arg:\n return 'g'\n else:\n return 'r'\n\ndef get_network(arg,conn_num=False):\n '''\n Draws the network\n Inputs:\n arg: (str) name of the character to draw the network for\n Outputs tuple with information about the graph\n '''\n limit = False\n plt.clf()\n arg = arg.upper()\n if arg not in G.nodes():\n return (0,0,0,0)\n else:\n N = G.neighbors(arg)\n N.append(arg)\n H = G.subgraph(N)\n d = nx.degree(H)\n weights = H[arg]\n\n most_apps = sorted([(x,weights[x][0]['weight']) for x in weights], key=lambda i: i[1],reverse=True)\n highest = most_apps[0][1]\n i = 0\n for j in most_apps:\n if j[1] == highest:\n i +=1\n most = [x[0].title() for x in most_apps[:i]]\n\n weights[arg] = {0:{'weight':highest}}\n\n if conn_num and conn_num < len(N):\n limit = True\n lim_N = [x[0] for x in most_apps[:conn_num-1]] + [arg]\n lim_H = G.subgraph(lim_N)\n lim_d = nx.degree(lim_H)\n nx.draw_circular(lim_H,with_labels=True,alpha=0.5,edge_color='0.5',\n nodelist=lim_d.keys(), node_size=[weights[v][0]['weight'] * 5 for v in lim_d.keys()],\n node_color=[get_color(arg,v) for v in lim_d.keys()])\n plt.savefig(os.path.join(BASE_DIR,'ui/static/lim_network.jpg'))\n plt.clf()\n\n nx.draw_circular(H,with_labels=True,alpha=0.5,edge_color='0.5',\n nodelist=d.keys(), node_size=[weights[v][0]['weight'] * 5 for v in d.keys()],\n node_color=[get_color(arg,v) for v in d.keys()])\n plt.savefig(os.path.join(BASE_DIR,'ui/static/network.jpg'))\n plt.clf()\n return (1,most,len(N),limit)\n", "sub_path": "ui/grapher.py", "file_name": "grapher.py", "file_ext": "py", "file_size_in_byte": 2124, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "networkx.read_pajek", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "networkx.degree", "line_number": 37, "usage_type": "call"}, {"api_name": "networkx.degree", "line_number": 54, "usage_type": "call"}, {"api_name": "networkx.draw_circular", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "networkx.draw_circular", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "124990788", "text": "#coding:utf-8\nimport sys,os\nsys.path.append(os.pardir)\nfrom datetime import datetime\nimport unittest\nfrom todoItem import ToDoItem\n\nclass TestToDoItem(unittest.TestCase):\n\t\t\t\tdef setUp(self):\n\t\t\t\t\t\t\t\tself.testItem1=ToDoItem('','',datetime.now())\n\t\t\t\t\t\t\t\tself.testItem2=ToDoItem('test','test description',datetime.strptime('2014/9/20 12:30:00','%Y/%m/%d %H:%M:%S'))\n\t\t\t\t\t\t\t\t\n\t\t\t\tdef tearDown(self):\n\t\t\t\t\t\t\t\tpass\n\t\t\t\tdef testNewInstance(self):\n\t\t\t\t\t\t\t\tself.assertNotEqual(self.testItem1,None)\n\t\t\t\tdef testToDoItemFinish(self):\n\t\t\t\t\t\t\t\tself.testItem2.finish()\n\t\t\t\t\t\t\t\tfinished_date=self.testItem2.finished_date\n\t\t\t\t\t\t\t\tself.assertEqual(self.testItem2.finished,True)\n\t\t\t\t\t\t\t\tself.assertEqual(finished_date.strftime('%Y/%m/%d %H:%M'),datetime.now().strftime('%Y/%m/%d %H:%M'))\nif __name__=='__main__':\n\t\t\t\tsuite=unittest.TestLoader().loadTestsFromTestCase(TestToDoItem)\n\t\t\t\tunittest.TextTestRunner(verbosity=2).run(suite)\n", "sub_path": "test/testToDoItem.py", "file_name": "testToDoItem.py", "file_ext": "py", "file_size_in_byte": 917, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.pardir", "line_number": 3, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "todoItem.ToDoItem", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "name"}, {"api_name": "todoItem.ToDoItem", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "unittest.TestLoader", "line_number": 23, "usage_type": "call"}, {"api_name": "unittest.TextTestRunner", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "228179312", "text": "def readfile(path):\n import numpy as np\n f = open(path, 'r')\n return np.loadtxt(f)\n\nif __name__ == \"__main__\":\n import sys \n sys.path.insert(0, \"/home/akke/Physicum/dyna/project/trim/\")\n from trim import removeFirst\n path = sys.argv[1]\n N = int(sys.argv[2])\n resPath = \"results/\" + path + \"/\"\n axes = ['X','Y','Z']\n colors = ['r','b','g','m','k']\n \n data = readfile(resPath + \"int.out\")\n removeFirst(data,N)\n\n from matplotlib import pyplot as pl\n \n pl.figure(0);\n for i in range(int(N)):\n X = data[:,1+3*i]\n Y = data[:,2+3*i]\n\n pl.plot(X,Y,'.')\n \n pl.savefig(resPath + \"orbitplot.pdf\")\n pl.show()\n", "sub_path": "plot/plotxy.py", "file_name": "plotxy.py", "file_ext": "py", "file_size_in_byte": 688, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.loadtxt", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "trim.removeFirst", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "191409904", "text": "from django.shortcuts import render_to_response ,render,get_object_or_404,redirect\nfrom blog.models import Post,Post2,category,Sub,comment1,comment2\nimport webbrowser\nimport smtplib as p\nfrom django.core.files import File\n\n\ndef index(request):\n\tposts = Post.objects.all()\n\treturn render(request ,'indexx.html',{'posts':posts})\ndef blog(request):\n\tposts = Post.objects.filter(published = True)\n\tposts2 = Post2.objects.filter(published = True)\n\tcategorys = category.objects.all()\n\tnumber = str(Sub.objects.count())\n\treturn render(request , 'blog.html' , {'posts':posts,'posts2':posts2,'category':categorys,'num':number})\ndef view_blog_post(request , slug):\n\tpost = get_object_or_404(Post , slug = slug)\n\tif (request.method == 'POST'):\n\t\tname = request.POST.get('name' , None)\n\t\tif (len(name)!= 0):\n\t\t\temail = request.POST.get('email' , None)\n\t\t\tcmnt = request.POST.get('comment',None)\n\t\t\tc = post.comment1_set.create(name = name , email = email , desc = cmnt)\n\t\t\tc.save()\n\tcomment = comment1.objects.filter(post = post)\n\treturn render(request,'testpost.html',{'post':post,'comment':comment})\n\ndef view_post_2(request , slug):\n\tpost = get_object_or_404(Post2 , slug = slug)\n\tif (request.method == 'POST'):\n\t\tname = request.POST.get('name' , None)\n\t\tif (len(name)!= 0):\n\t\t\temail = request.POST.get('email' , None)\n\t\t\tcmnt = request.POST.get('comment',None)\n\t\t\tc = post.comment1_set.create(name = name , email = email , desc = cmnt)\n\t\t\tc.save()\n\t#comment = post.comment2_set.all()\n\tcomment = comment2.objects.filter(post = post)\n\treturn render_to_response('post.html',{'post':post,'comment':comment})\ndef post(request,slug):\n\tpost = get_object_or_404(Post ,slug=slug)\n\t#comment = post.comment1_set.all()\n\t\n\treturn render(request,'post.html',{'post':post,'comment':comment})\ndef about(request):\n\treturn render(request,'about.html')\ndef sending(request):\n\tif (request.method == 'POST'):\n\t\tname = request.POST.get('name',None)\n\t\tabout = request.POST.get('email',None)\n\t\tdescription = request.POST.get('description',None)\n\t\tmsg =\"\"\"From: From skyteam.work@gmail.com\nTo: %r\nMIME-Version: 1.0\nContent-type: text/html\nSubject: BUG repport about (%a)\n

Message :

\n

%s

\n\n\"\"\"\t% (name,about,description)\t\n\t\tserver = p.SMTP(\"smtp.gmail.com\",587)\n\t\tserver.starttls()\n\t\tserver.login(\"teamsky.work@gmail.com\",\"teamskywork123\")\n\t\tserver.sendmail(\"teamsky.work@gmail.com\",\"sky.red2212@gmail.com\",msg)\n\t\tposts = Post.objects.all()\n\t\treturn render(request ,'indexx.html',{'posts':posts})\ndef send(request):\n\treturn render(request , 'sending.html')\ndef view_category(request,slug):\n\tcategories = get_object_or_404(category ,slug = slug)\n\tcategorys = category.objects.all()\n\tposts = Post.objects.filter(category = categories)\n\tposts2 = Post2.objects.filter(category = categories)\n\tnumber = str(Sub.objects.count())\n\treturn render(request , 'blog.html',{'category':categorys,'posts':posts,'posts2':posts2,'num':number})\ndef subscribe(request):\n\tif (request.method == 'POST'):\n\t\tname = request.POST.get('name',None)\n\t\temail = request.POST.get('email',None)\n\t\tq = Sub(name = name , email=email)\n\t\tq.save()\n\t\tfile = open(\"maillist.txt\",\"a\")\n\t\tfile.write(\"\\n\")\n\t\tfile.write(name)\n\t\tfile.write(\"\\n\")\n\t\tfile.write(email)\n\t\tfile.close()\n\t\treturn redirect('https://skyteam.herokuapp.com')\n\telse:\n\t\treturn redirect('https://www.google.com')\ndef share(request):\n\tmaillist = Sub.objects.all()\n\tabout = \"New post\"\n\turl = \"https://skyteam.herokuapp.com/news\"\n\tserver = p.SMTP(\"smtp.gmail.com\",587)\n\tserver.starttls()\n\tserver.login(\"teamsky.work@gmail.com\",\"teamskywork123\")\n\tfor mail in maillist:\n\t\tmsg =\"\"\"From: From skyteam.work@gmail.com\nTo: %r\nMIME-Version: 1.0\nContent-type: text/html\nSubject: (%a)\n

Message :

\n

check our new post in \\n %s

\n\n\"\"\"\t% (mail.name,about,url)\n\t\tserver.sendmail(\"teamsky.work@gmail.com\",mail.email,msg)\n\t\treturn redirect(\"https://skyteam.herokuapp.com\")\ndef news(request):\n\tposts = Post.objects.filter(published = True)[0:3]\n\tposts2 = Post2.objects.filter(published = True)[0:3]\n\tcategorys = category.objects.all()\n\tnumber = str(Sub.objects.count())\n\treturn render(request , 'blog.html' , {'posts':posts,'posts2':posts2,'category':categorys,'num':number})\n# Create your views here.\ndef blog_search(request):\n \tif (request.method == 'GET'):\n \t\tkeyword = request.GET.get('search')\n \t\tposts = Post.objects.filter(title__icontains = keyword)\n \t\tposts2 = Post2.objects.filter(title__icontains=keyword)\n \t\tcategorys = category.objects.all()\n \t\tnumber = str (Sub.objects.count())\n \t\t#import webbrowser\n \t\t#webbrowser.open(\"www.google.com\")\n \t\treturn render(request , 'blog.html' , {'posts':posts,'posts2':posts2,'category':categorys,'num':number})\n", "sub_path": "blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4659, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "blog.models.Post.objects.all", "line_number": 9, "usage_type": "call"}, {"api_name": "blog.models.Post.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "blog.models.Post", "line_number": 9, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call"}, {"api_name": "blog.models.Post.objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "blog.models.Post.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "blog.models.Post", "line_number": 12, "usage_type": "name"}, {"api_name": "blog.models.Post2.objects.filter", "line_number": 13, "usage_type": "call"}, {"api_name": "blog.models.Post2.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "blog.models.Post2", "line_number": 13, "usage_type": "name"}, {"api_name": "blog.models.category.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "blog.models.category.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "blog.models.category", "line_number": 14, "usage_type": "name"}, {"api_name": "blog.models.Sub.objects.count", "line_number": 15, "usage_type": "call"}, {"api_name": "blog.models.Sub.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "blog.models.Sub", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 18, "usage_type": "call"}, {"api_name": "blog.models.Post", "line_number": 18, "usage_type": "argument"}, {"api_name": "blog.models.comment1.objects.filter", "line_number": 26, "usage_type": "call"}, {"api_name": "blog.models.comment1.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "blog.models.comment1", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 30, "usage_type": "call"}, {"api_name": "blog.models.Post2", "line_number": 30, "usage_type": "argument"}, {"api_name": "blog.models.comment2.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "blog.models.comment2.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "blog.models.comment2", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 42, "usage_type": "call"}, {"api_name": "blog.models.Post", "line_number": 42, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 62, "usage_type": "call"}, {"api_name": "blog.models.Post.objects.all", "line_number": 66, "usage_type": "call"}, {"api_name": "blog.models.Post.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "blog.models.Post", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 71, "usage_type": "call"}, {"api_name": "blog.models.category", "line_number": 71, "usage_type": "argument"}, {"api_name": "blog.models.category.objects.all", "line_number": 72, "usage_type": "call"}, {"api_name": "blog.models.category.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "blog.models.category", "line_number": 72, "usage_type": "name"}, {"api_name": "blog.models.Post.objects.filter", "line_number": 73, "usage_type": "call"}, {"api_name": "blog.models.Post.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "blog.models.Post", "line_number": 73, "usage_type": "name"}, {"api_name": "blog.models.Post2.objects.filter", "line_number": 74, "usage_type": "call"}, {"api_name": "blog.models.Post2.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "blog.models.Post2", "line_number": 74, "usage_type": "name"}, {"api_name": "blog.models.Sub.objects.count", "line_number": 75, "usage_type": "call"}, {"api_name": "blog.models.Sub.objects", "line_number": 75, "usage_type": "attribute"}, {"api_name": "blog.models.Sub", "line_number": 75, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 76, "usage_type": "call"}, {"api_name": "blog.models.Sub", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 89, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "blog.models.Sub.objects.all", "line_number": 93, "usage_type": "call"}, {"api_name": "blog.models.Sub.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "blog.models.Sub", "line_number": 93, "usage_type": "name"}, {"api_name": "smtplib.SMTP", "line_number": 96, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 110, "usage_type": "call"}, {"api_name": "blog.models.Post.objects.filter", "line_number": 112, "usage_type": "call"}, {"api_name": "blog.models.Post.objects", "line_number": 112, "usage_type": "attribute"}, {"api_name": "blog.models.Post", "line_number": 112, "usage_type": "name"}, {"api_name": "blog.models.Post2.objects.filter", "line_number": 113, "usage_type": "call"}, {"api_name": "blog.models.Post2.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "blog.models.Post2", "line_number": 113, "usage_type": "name"}, {"api_name": "blog.models.category.objects.all", "line_number": 114, "usage_type": "call"}, {"api_name": "blog.models.category.objects", "line_number": 114, "usage_type": "attribute"}, {"api_name": "blog.models.category", "line_number": 114, "usage_type": "name"}, {"api_name": "blog.models.Sub.objects.count", "line_number": 115, "usage_type": "call"}, {"api_name": "blog.models.Sub.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "blog.models.Sub", "line_number": 115, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 116, "usage_type": "call"}, {"api_name": "blog.models.Post.objects.filter", "line_number": 121, "usage_type": "call"}, {"api_name": "blog.models.Post.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "blog.models.Post", "line_number": 121, "usage_type": "name"}, {"api_name": "blog.models.Post2.objects.filter", "line_number": 122, "usage_type": "call"}, {"api_name": "blog.models.Post2.objects", "line_number": 122, "usage_type": "attribute"}, {"api_name": "blog.models.Post2", "line_number": 122, "usage_type": "name"}, {"api_name": "blog.models.category.objects.all", "line_number": 123, "usage_type": "call"}, {"api_name": "blog.models.category.objects", "line_number": 123, "usage_type": "attribute"}, {"api_name": "blog.models.category", "line_number": 123, "usage_type": "name"}, {"api_name": "blog.models.Sub.objects.count", "line_number": 124, "usage_type": "call"}, {"api_name": "blog.models.Sub.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "blog.models.Sub", "line_number": 124, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "247621875", "text": "from cv2 import cv2\n\n# Load the eye cascade\neyes_cascade_path = \"../haarcascades/haarcascade_eye.xml\"\n\n# Load the face cascade\neyes_cascade = cv2.CascadeClassifier(eyes_cascade_path)\nface_cascade_path = \"../haarcascades/haarcascade_frontalface_default.xml\"\nface_cascade = cv2.CascadeClassifier(face_cascade_path)\n\nimage_path = r\"../assets/img.png\"\nimage = cv2.imread(image_path)\nimage = cv2.resize(image, (512, 512))\n\n# Convert image to gray\nimage_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nimage_gray = cv2.equalizeHist(image_gray)\n\n# Detect faces in the image\nfaces = face_cascade.detectMultiScale(image_gray)\n\n# Draw ellipse for each face in the image\nfor (x, y, w, h) in faces:\n # Draw ellipse around the face\n center = (x + w // 2, y + h // 2)\n image = cv2.ellipse(image, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 4)\n\n # Crop face from the image and use it for eye detection\n faceROI = image_gray[y:y + h // 2, x:x + w]\n\n # Detect eyes for each face\n eyes = eyes_cascade.detectMultiScale(faceROI)\n for (x2, y2, w2, h2) in eyes:\n eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2)\n radius = int(round((w2 + h2) * 0.25))\n # Draw circle around the eye\n frame = cv2.circle(image, eye_center, radius, (255, 0, 0), 4)\n\ncv2.imshow('Capture - Face detection', image)\ncv2.waitKey(0)\n", "sub_path": "feature-detection/eyes-detection.py", "file_name": "eyes-detection.py", "file_ext": "py", "file_size_in_byte": 1344, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "cv2.cv2.CascadeClassifier", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 7, "usage_type": "name"}, {"api_name": "cv2.cv2.CascadeClassifier", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 9, "usage_type": "name"}, {"api_name": "cv2.cv2.imread", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 12, "usage_type": "name"}, {"api_name": "cv2.cv2.resize", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 13, "usage_type": "name"}, {"api_name": "cv2.cv2.cvtColor", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 16, "usage_type": "name"}, {"api_name": "cv2.cv2.COLOR_BGR2GRAY", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.cv2.equalizeHist", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 17, "usage_type": "name"}, {"api_name": "cv2.cv2.ellipse", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 26, "usage_type": "name"}, {"api_name": "cv2.cv2.circle", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 37, "usage_type": "name"}, {"api_name": "cv2.cv2.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 39, "usage_type": "name"}, {"api_name": "cv2.cv2.waitKey", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "595219402", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2016-05-18 14:43:49\n# @Author : moling (365024424@qq.com)\n# @Link : #\nimport os\nfrom datetime import datetime\nfrom fabric.api import *\n\n_REMOTE_BASE_DIR = '/home/ubuntu/mblog'\n_TAR_FILE = 'dist-mblog.tar.gz'\n_REMOTE_TMP_TAR = '/tmp/%s' % _TAR_FILE\n\nenv.hosts = ['119.29.191.109']\nenv.user = 'ubuntu'\nenv.password = '******'\nenv.key_filename = 'D:\\\\tk'\n\n\ndef _now():\n return datetime.now().strftime('%y-%m-%d_%H:%M:%S')\n\n\ndef build():\n includes = ['app', 'config', '*.py']\n excludes = ['__pycache__', '*.pyc', '*.pyo', 'orm_test.*']\n local('rm -f dist/%s' % _TAR_FILE)\n with lcd(os.path.join(os.path.abspath('.'), 'www')):\n cmd = ['tar', '--dereference', '-czvf', '../dist/%s' % _TAR_FILE]\n cmd.extend(['--exclude=\\'%s\\'' % ex for ex in excludes])\n cmd.extend(includes)\n local(' '.join(cmd))\n\n\ndef deploy():\n newdir = 'www-%s' % _now()\n sudo('rm -f %s' % _REMOTE_TMP_TAR)\n put('dist/%s' % _TAR_FILE, _REMOTE_TMP_TAR)\n with cd(_REMOTE_BASE_DIR):\n sudo('mkdir %s' % newdir)\n with cd('%s/%s' % (_REMOTE_BASE_DIR, newdir)):\n sudo('tar -xzvf %s' % _REMOTE_TMP_TAR)\n with cd(_REMOTE_BASE_DIR):\n sudo('rm -f www')\n sudo('ln -s %s www' % newdir)\n sudo('chown www-data:www-data www')\n sudo('chown -R www-data:www-data %s' % newdir)\n with settings(warn_only=True):\n sudo('supervisorctl restart mblog')\n sudo('service nginx restart')\n\n\ndef go():\n build()\n deploy()\n", "sub_path": "fabfile.py", "file_name": "fabfile.py", "file_ext": "py", "file_size_in_byte": 1542, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "15756790", "text": "from neuron import h\n\nclass PoissonSpiker(object):\n \"\"\"NetStim Object with variable lambda\"\"\"\n def __init__(self):\n self.soma = h.Section(name='soma', cell=self)\n self.syn_ = h.Exp2Syn(self.soma(0.5))\n self.stim = h.NetStim()\n self.stim.start = 0\n\n self.netcon = h.NetCon(self.stim, self.syn_)\n\n self.stim.interval = 10\n self.stim.number = 1e9\n self.stim.noise = 1\n\n def update_lambda(self, value):\n self.stim.interval = value", "sub_path": "Version-2-0/src/PoissonSpiker.py", "file_name": "PoissonSpiker.py", "file_ext": "py", "file_size_in_byte": 498, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "neuron.h.Section", "line_number": 6, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 6, "usage_type": "name"}, {"api_name": "neuron.h.Exp2Syn", "line_number": 7, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 7, "usage_type": "name"}, {"api_name": "neuron.h.NetStim", "line_number": 8, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 8, "usage_type": "name"}, {"api_name": "neuron.h.NetCon", "line_number": 11, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "160878384", "text": "import os\nfrom utils import config, DashDebug\nimport dash\nimport dash_bootstrap_components as dbc\n\nflask_options = config.get('flask')\n\nDASH_DEBUG = 'DASH_DEBUG' in os.environ\n\n_app_create = DashDebug if DASH_DEBUG else dash.Dash\n\napp = _app_create(__name__,\n suppress_callback_exceptions=True,\n external_stylesheets=[\n dbc.themes.BOOTSTRAP,\n 'https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css'\n ])\n\napp.scripts.config.serve_locally = True\napp.css.config.serve_locally = True\napp.server.config['SECRET_KEY'] = flask_options.secret_key\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 657, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "utils.config.get", "line_number": 6, "usage_type": "call"}, {"api_name": "utils.config", "line_number": 6, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "utils.DashDebug", "line_number": 10, "usage_type": "name"}, {"api_name": "dash.Dash", "line_number": 10, "usage_type": "attribute"}, {"api_name": "dash_bootstrap_components.themes", "line_number": 15, "usage_type": "attribute"}]} +{"seq_id": "591566122", "text": "# Author: PythonSalad\n# Date: 2013-06-09\n# Python Version: 2.7.3\n\n\"\"\" File contains the main game object, the core of the tdb game. \"\"\"\n\nimport pygame\nfrom config import GlobalConfig as GC\n\nclass Game:\n \"\"\" Contains the main game loop, StateGroups and all global game configs.\n Has functionality for loading and unloading StateGroups.\n \"\"\"\n def __init__(self, title = 'Game', frequency = 150):\n pygame.init()\n \n self._title = title\n\n # Default frequency of 150Hz (for game loop)\n self._frequency = frequency\n self._delta = 1000 // self._frequency\n\n self._ticks = pygame.time.get_ticks()\n self._is_active = True\n\n # Initialise display\n self._init_display()\n\n def _init_display(self):\n # Save current screen resolution\n self._initial_resolution = (\n pygame.display.Info().current_w,\n pygame.display.Info().current_h,\n )\n\n # Get resolution from config\n resolution = (\n GC().get_config('display', 'width'),\n GC().get_config('display', 'height'),\n )\n\n # Get flags from config\n flags = 0\n if GC().get_config('display', 'fullscreen'):\n flags = pygame.FULLSCREEN | pygame.HWSURFACE\n\n # Create the screen\n self.screen = pygame.display.set_mode(resolution, flags, 0)\n pygame.display.set_caption(self._title)\n\n def run(self):\n \"\"\" The main game loop. At intervals given by self._delta, flush\n the event queue and dispatch all messages to the active StateGroup.\n This function utilises wait, sleeping the main thread when\n all events are handled to avoid resource hogging.\n \"\"\"\n while self._is_active:\n ticks = pygame.time.get_ticks() - self._ticks\n if ticks < self._delta:\n pygame.time.wait(self._delta - ticks)\n self._ticks = pygame.time.get_ticks()\n\n def set_frequency(self, frequency):\n \"\"\" Set the game loop frequency and update self._delta \"\"\"\n self._frequency = frequency\n self._delta = 1000 // frequency\n\n def get_frequency(self):\n \"\"\" Get the game loop frequency. \"\"\"\n return self._frequency\n\n def __del__(self):\n \"\"\" Restore system to as it was before the game was launched. \"\"\"\n # Restore system resolution\n pygame.display.set_mode(self._initial_resolution)\n", "sub_path": "tdb/game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 2439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pygame.init", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.time.get_ticks", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.display.Info", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.display.Info", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 33, "usage_type": "attribute"}, {"api_name": "config.GlobalConfig", "line_number": 38, "usage_type": "call"}, {"api_name": "config.GlobalConfig", "line_number": 39, "usage_type": "call"}, {"api_name": "config.GlobalConfig", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.FULLSCREEN", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.HWSURFACE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 75, "usage_type": "attribute"}]} +{"seq_id": "635143195", "text": "# coding: UTF-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom util import loss_fn\n\n\nclass Config(object):\n def __init__(self, data_dir):\n self.model_name = 'mmoe'\n self.train_path = data_dir + 'census-income.data.gz'\n self.test_path = data_dir + 'census-income.test.gz'\n self.save_path = './saved_dict/' + self.model_name + '.ckpt'\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.require_improvement = 1000\n self.dropout = 0.5\n self.learning_rate = 3e-5\n self.label_columns = ['income_50k', 'marital_stat']\n\n self.label_dict = [2, 2]\n self.num_feature = 0\n self.num_experts = 3\n self.num_tasks = 2\n self.units = 16\n self.hidden_units = 8\n self.embed_size = 300\n self.batch_size = 256\n self.field_size = 0\n self.towers_hidden = 16\n self.SB_hidden = 1024\n self.SB_output = 512\n self.num_epochs = 100\n self.loss_fn = loss_fn('binary')\n\n\nclass Transform_layer(nn.Module):\n def __init__(self, input_size, output_size, config):\n super(Transform_layer, self).__init__()\n self.alpha = torch.nn.Parameter(torch.rand((1,), device=config.device), requires_grad=True)\n self.beta = 0.9\n self.gamma = -0.1\n self.eplison = 2\n\n w = torch.empty(input_size, config.num_experts,output_size, device=config.device)\n self.u = torch.nn.Parameter(torch.nn.init.uniform_(w, 0, 1),\n requires_grad=True)\n\n w = torch.empty(input_size,config.num_experts, output_size, device=config.device)\n self.w_params = torch.nn.Parameter(torch.nn.init.xavier_normal_(w),\n requires_grad=True)\n\n def forward(self, x):\n self.s = torch.sigmoid(torch.log(self.u) - torch.log(1 - self.u) + torch.log(self.alpha) / self.beta)\n self.s_ = self.s * (self.eplison - self.gamma) + self.gamma\n\n self.z_params = (self.s_ > 0).float() * self.s_\n self.z_params = (self.z_params > 1).float() + (self.z_params <= 1).float() * self.z_params\n\n output = self.z_params * self.w_params\n output = torch.einsum('ab,bnc -> anc', x, output)\n return output\n\nclass high_layers(nn.Module):\n\n def __init__(self,input_size,output_size,config):\n super(high_layers,self).__init__()\n self.alpha = torch.nn.Parameter(torch.rand((1,), device=config.device), requires_grad=True)\n self.beta = 0.9\n self.gamma = -0.1\n self.eplison = 2\n\n w = torch.empty(input_size, output_size, device=config.device)\n self.u = torch.nn.Parameter(torch.nn.init.uniform_(w, 0, 1),\n requires_grad=True)\n\n w = torch.empty(input_size, output_size, device=config.device)\n self.w_params = torch.nn.Parameter(torch.nn.init.xavier_normal_(w),\n requires_grad=True)\n def forward(self,x):\n self.s = torch.sigmoid(torch.log(self.u) - torch.log(1 - self.u) + torch.log(self.alpha) / self.beta)\n self.s_ = self.s * (self.eplison - self.gamma) + self.gamma\n\n self.z_params = (self.s_ > 0).float() * self.s_\n self.z_params = (self.z_params > 1).float() + (self.z_params <= 1).float() * self.z_params\n\n output = self.z_params * self.w_params\n output = torch.einsum('anc,cd -> and', x, output)\n return output\n\n\nclass Tower(nn.Module):\n def __init__(self, input_size, output_size, hidden_size=16):\n super(Tower, self).__init__()\n self.fc1 = nn.Linear(input_size, hidden_size)\n self.fc2 = nn.Linear(hidden_size, output_size)\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.4)\n\n def forward(self, x):\n out = self.fc1(x)\n out = self.relu(out)\n out = self.dropout(out)\n out = self.fc2(out)\n\n out = torch.sigmoid(out)\n return out\n\n\nclass Model(nn.Module):\n def __init__(self, config):\n super(Model, self).__init__()\n\n # accept_unit = config.field_size*config.embed_size\n accept_unit = config.num_feature\n self.trans1 = Transform_layer(accept_unit, config.SB_hidden, config)\n self.trans2 = high_layers(config.SB_hidden,config.SB_output,config)\n\n self.fc_experts = nn.Linear(config.num_experts,1)\n self.relu = nn.ReLU()\n\n self.towers = nn.ModuleList([Tower(config.SB_output, 1, config.towers_hidden) for i in range(config.num_tasks)])\n\n self.lamdba = 1e-4\n\n # self.embedding_layer = nn.Embedding(config.num_feature,config.embed_size)\n\n def forward(self, x):\n output = self.trans1(x)\n output = self.trans2(output)\n output = output.transpose(2,1)\n output = self.fc_experts(output)\n output = torch.squeeze(output)\n output = self.relu(output)\n\n final_outputs = [tower(output) for tower in self.towers]\n\n s1 = self.trans1.s_\n s2 = self.trans2.s_\n\n\n s1_prob = 1 - ((s1 < 0).sum(dim=-1) / s1.size(1))\n s2_prob = 1 - ((s2 < 0).sum(dim=-1) / s2.size(1))\n\n regul = self.lamdba * (s1_prob.sum() + s2_prob.sum())\n # regul = 0\n return final_outputs, regul\n\n", "sub_path": "Models/snr_trans.py", "file_name": "snr_trans.py", "file_ext": "py", "file_size_in_byte": 5294, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torch.device", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 14, "usage_type": "attribute"}, {"api_name": "util.loss_fn", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.nn.init.uniform_", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 63, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.nn.init.uniform_", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.squeeze", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "199624595", "text": "\"\"\"\n Copyright (c) 2018 Intel Corporation\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport os.path as osp\n\nimport cv2 as cv\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset\n\nfrom utils.face_align import FivePointsAligner\n\n\nclass MSCeleb1M(Dataset):\n \"\"\"MSCeleb1M Dataset compatible with PyTorch DataLoader.\"\"\"\n def __init__(self, images_root_path, image_list_path, transform=None):\n self.image_list_path = image_list_path\n self.images_root_path = images_root_path\n self.identities = {}\n\n assert osp.isfile(image_list_path)\n self.have_landmarks = True\n\n self.all_samples_info = self._read_samples_info()\n self.samples_info = self.all_samples_info\n self.transform = transform\n\n def _read_samples_info(self):\n \"\"\"Reads annotation of the dataset\"\"\"\n samples = []\n\n with open(self.image_list_path, 'r') as f:\n images_file_lines = f.readlines()\n last_class_id = -1\n\n for i in tqdm(range(len(images_file_lines))):\n line = images_file_lines[i]\n terms = line.split('|')\n if len(terms) < 3:\n continue # FD has failed on this imsage\n path, landmarks, bbox = terms\n image_id, _ = path.split('/')\n\n if image_id in self.identities:\n self.identities[image_id].append(len(samples))\n else:\n last_class_id += 1\n self.identities[image_id] = [len(samples)]\n\n bbox = [max(int(coord), 0) for coord in bbox.strip().split(' ')]\n landmarks = [float(coord) for coord in landmarks.strip().split(' ')]\n assert len(bbox) == 4\n assert len(landmarks) == 10\n samples.append((osp.join(self.images_root_path, path).strip(),\n last_class_id, image_id, bbox, landmarks))\n\n return samples\n\n def get_weights(self):\n \"\"\"Computes weights of the each identity in dataset according to frequency of it's occurance\"\"\"\n weights = [0.]*len(self.all_samples_info)\n for i, sample in enumerate(self.all_samples_info):\n weights[i] = float(len(self.all_samples_info)) / len(self.identities[sample[2]])\n return weights\n\n def get_num_classes(self):\n \"\"\"Returns total number of identities\"\"\"\n return len(self.identities)\n\n def __len__(self):\n \"\"\"Returns total number of samples\"\"\"\n return len(self.samples_info)\n\n def __getitem__(self, idx):\n \"\"\"Returns sample (image, class id, image id) by index\"\"\"\n img = cv.imread(self.samples_info[idx][0], cv.IMREAD_COLOR)\n bbox = self.samples_info[idx][-2]\n landmarks = self.samples_info[idx][-1]\n\n img = img[bbox[1]:bbox[1] + bbox[3], bbox[0]:bbox[0] + bbox[2]]\n img = FivePointsAligner.align(img, landmarks, d_size=(200, 200), normalized=True, show=False)\n\n if self.transform:\n img = self.transform(img)\n\n return {'img': img, 'label': self.samples_info[idx][1], 'instance': self.samples_info[idx][2]}\n", "sub_path": "datasets/ms_celeb1m.py", "file_name": "ms_celeb1m.py", "file_ext": "py", "file_size_in_byte": 3652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 23, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 85, "usage_type": "attribute"}, {"api_name": "utils.face_align.FivePointsAligner.align", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.face_align.FivePointsAligner", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "238055413", "text": "import datetime, os\nimport pymysql.cursors\n\nclass Importer():\n '''\n\n '''\n\n def __init__(self):\n # Connect to the database\n self.connection = pymysql.connect(host='localhost',\n user=os.environ['MYSQL_USER'],\n password=os.environ['MYSQL_USER_PASSWD'],\n #db='xbrl',\n db='xbrl-django',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\n def table_clear(self):\n with self.connection.cursor() as cusor:\n sql = \"DELETE from reports_company\"\n cusor.execute(sql)\n sql = \"DELETE from reports_report\"\n cusor.execute(sql)\n self.connection.commit()\n\n\n def import_dei_to_mysql(self, jpcrp):\n with self.connection.cursor() as cursor:\n sql = \"REPLACE into reports_company ( edinet_code, \\\n company_name, \\\n english_company_name, \\\n security_code) VALUES(%s,%s,%s,%s)\"\n cursor.execute(sql, (jpcrp.dei.edinet_code,\n jpcrp.dei.company_name,\n jpcrp.dei.english_company_name,\n jpcrp.dei.security_code))\n self.connection.commit()\n\n def count_company(self):\n with self.connection.cursor() as cusor:\n sql = \"select count(*) from reports_company\"\n cusor.execute(sql)\n result = cusor.fetchone()\n count = result['count(*)']\n return count\n\n def import_report_to_mysql(self, jpcrp):\n with self.connection.cursor() as cursor:\n sql = \"REPLACE into reports_report (\\\n edinet_code,\\\n year,\\\n per,\\\n roe,\\\n eps,\\\n equity_to_asset_ratio,\\\n pay_out_ratio,\\\n net_sales,\\\n net_assets,\\\n total_assets,\\\n liabilities,\\\n operating_revenue,\\\n ordinary_revenue,\\\n profit_before_tax,\\\n owners_equity_per_share,\\\n cash_and_cash_equivalents,\\\n cash_flow_from_operating,\\\n cash_flow_from_investing,\\\n cash_flow_from_financing,\\\n type_of_current_period,\\\n accounting_standard,\\\n whether_consolidated_financial_statements,\\\n current_fiscal_year_start_date,\\\n current_fiscal_year_end_date)\\\n VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\\\n %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,\\\n %s,%s,%s,%s)\"\n\n cursor.execute(sql,(\n jpcrp.dei.edinet_code,\n jpcrp.get_current_fiscal_year(),\n jpcrp.per,\n jpcrp.roe,\n jpcrp.eps,\n jpcrp.equity_to_asset_ratio,\n jpcrp.pay_out_ratio,\n jpcrp.net_sales,\n jpcrp.net_assets,\n jpcrp.total_assets,\n jpcrp.liabilities,\n jpcrp.operating_revenue,\n jpcrp.ordinary_revenue,\n jpcrp.profit_before_tax,\n jpcrp.owners_equity_per_share,\n jpcrp.cash_and_cash_equivalents,\n jpcrp.cash_flow_from_operating,\n jpcrp.cash_flow_from_investing,\n jpcrp.cash_flow_from_financing,\n jpcrp.dei.type_of_current_period,\n jpcrp.dei.accounting_standard,\n jpcrp.dei.whether_consolidated_financial_statements,\n jpcrp.get_current_fiscal_year_start_date(),\n jpcrp.get_current_fiscal_year_end_date(),\n\n ))\n self.connection.commit()\n\n def count_report(self):\n with self.connection.cursor() as cusor:\n sql = \"select count(*) from reports_report\"\n cusor.execute(sql)\n result = cusor.fetchone()\n count = result['count(*)']\n return count\n", "sub_path": "edinetxbrl/importer.py", "file_name": "importer.py", "file_ext": "py", "file_size_in_byte": 4152, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pymysql.cursors.connect", "line_number": 11, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 11, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pymysql.cursors.cursors", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pymysql.cursors", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "237939351", "text": "import functools\nimport json\nimport subprocess\n\nfrom qhub.provider.cloud.commons import filter_by_highest_supported_k8s_version\n\n\n@functools.lru_cache()\ndef projects():\n output = subprocess.check_output([\"gcloud\", \"projects\", \"list\", \"--format=json\"])\n data = json.loads(output.decode(\"utf-8\"))\n return {_[\"name\"]: _[\"projectId\"] for _ in data}\n\n\n@functools.lru_cache()\ndef regions(project):\n output = subprocess.check_output(\n [\"gcloud\", \"compute\", \"regions\", \"list\", \"--project\", project, \"--format=json\"]\n )\n data = json.loads(output.decode(\"utf-8\"))\n return {_[\"description\"]: _[\"name\"] for _ in data}\n\n\n@functools.lru_cache()\ndef zones(project, region):\n output = subprocess.check_output(\n [\"gcloud\", \"compute\", \"zones\", \"list\", \"--project\", project, \"--format=json\"]\n )\n data = json.loads(output.decode(\"utf-8\"))\n return {_[\"description\"]: _[\"name\"] for _ in data if _[\"name\"].startswith(region)}\n\n\n@functools.lru_cache()\ndef kubernetes_versions(region):\n \"\"\"Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest.\"\"\"\n\n output = subprocess.check_output(\n [\n \"gcloud\",\n \"container\",\n \"get-server-config\",\n \"--region\",\n region,\n \"--format=json\",\n ]\n )\n data = json.loads(output.decode(\"utf-8\"))\n supported_kubernetes_versions = sorted([_ for _ in data[\"validMasterVersions\"]])\n return filter_by_highest_supported_k8s_version(supported_kubernetes_versions)\n\n\n@functools.lru_cache()\ndef instances(project):\n output = subprocess.check_output(\n [\n \"gcloud\",\n \"compute\",\n \"machine-types\",\n \"list\",\n \"--project\",\n project,\n \"--format=json\",\n ]\n )\n data = json.loads(output.decode(\"utf-8\"))\n return {_[\"description\"]: _[\"name\"] for _ in data}\n\n\n# Getting pricing data could come from here\n# https://cloudpricingcalculator.appspot.com/static/data/pricelist.json\n", "sub_path": "qhub/provider/cloud/google_cloud.py", "file_name": "google_cloud.py", "file_ext": "py", "file_size_in_byte": 2045, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "subprocess.check_output", "line_number": 10, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 11, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 8, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 17, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 15, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 29, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 24, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 37, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "qhub.provider.cloud.commons.filter_by_highest_supported_k8s_version", "line_number": 49, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 33, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 54, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 65, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "477840961", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport threading\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n# In[ ]:\n\n\n## firebase와의 연동\n\n\n# In[2]:\n\n\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\n\n# Fetch the service account key JSON file contents\ncred = credentials.Certificate(open('realace2018-firebase-adminsdk-r7gk1-121be5edaf.json').read())\n\n# Initialize the app with a service account, granting admin privileges\napp=firebase_admin.initialize_app(cred, {\n 'databaseURL': 'https://realace2018.firebaseio.com/'\n})\n\nref=db.reference()\n\n\n# In[ ]:\n\n\n## \n\n\n# In[3]:\n\n\nserviceKey='F%2FxP1NfaTBhw0giVbsH7HTUMMnbJF6p9LhD9p8mJ4HpucMsVcxUzoTw4RxZDFdnRP3NgWj0IwJke%2FOzfe5VxhA%3D%3D'\nnumOfRows=[25, 16, 8, 9, 5, 5, 5, 31, 6, 7, 14, 12, 9, 1, 9, 9, 3]\nsidoName=['서울', '부산', '대구', '인천', '광주', '대전', '울산', '경기', '강원', '충북', '충남', '전북', '전남', '세종', '경북', '경남', '제주']\n\n\n# In[ ]:\n\n\n## firebase저장 함수\n\n\n# In[9]:\n\n\n\ndef func():\n timer=threading.Timer(3600,func)\n \n for number in range(0,17):\n url='http://openapi.airkorea.or.kr/openapi/services/rest/ArpltnInforInqireSvc/getCtprvnMesureSidoLIst?serviceKey='+str(serviceKey)+'&numOfRows='+str(numOfRows[number])+'&pageSize=10&pageNo=1&startPage=1&sidoName='+str(sidoName[number])+'&searchCondition=DAILY'\n html=requests.get(url).text\n soup = BeautifulSoup(html, 'html.parser')\n \n citylist=[]\n pm10list=[]\n timelist=[]\n\n datatime=soup.find_all('datatime')\n cityname=soup.find_all('cityname')\n pm10vale=soup.find_all('pm10value')\n\n for code in datatime:\n timelist.append(code.text)\n for code in cityname:\n citylist.append(code.text)\n for code in pm10vale:\n pm10list.append(code.text)\n \n numb=numOfRows[number]\n \n for num in range(0,numb):\n users_ref= ref.child('pm')\n users_ref.push({'sidoname':sidoName[number],'cityname':citylist[int(num)], 'pm10vale':pm10list[int(num)],'datatime':timelist[int(num)]})\n \n timer.start()\n\nfunc()\n\n", "sub_path": "api/APIfirebase.py", "file_name": "APIfirebase.py", "file_ext": "py", "file_size_in_byte": 2177, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "firebase_admin.credentials.Certificate", "line_number": 28, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 28, "usage_type": "name"}, {"api_name": "firebase_admin.initialize_app", "line_number": 31, "usage_type": "call"}, {"api_name": "firebase_admin.db.reference", "line_number": 35, "usage_type": "call"}, {"api_name": "firebase_admin.db", "line_number": 35, "usage_type": "name"}, {"api_name": "threading.Timer", "line_number": 63, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 67, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "473952378", "text": "import tweepy # for tweeting\nimport secrets # shhhh\nfrom book_manager import BookManager # for getting sentences out of our book file\nimport random\n\ndef get_next_chunk():\n # open text file\n book = BookManager()\n first_sentence = book.first_sentence()\n # tweet the whole sentence if it's short enough\n if len(first_sentence) <= 140:\n chunk = first_sentence\n # otherwise just print the first 140 characters\n else:\n chunk = first_sentence[0:140]\n\n # delete what we just tweeted from the text file\n book.delete_message(chunk)\n #chunk = 'https://unsplash.it/200/300/?random'\n return chunk\n\ndef tweet(message):\n auth = tweepy.OAuthHandler(secrets.consumer_key, secrets.consumer_secret)\n auth.set_access_token(secrets.access_token, secrets.access_token_secret)\n api = tweepy.API(auth)\n auth.secure = True\n\n print(\"Posting message {}\".format(message))\n api.update_status(status=message)\n\ndef retweet():\n auth = tweepy.OAuthHandler(secrets.consumer_key, secrets.consumer_secret)\n auth.set_access_token(secrets.access_token, secrets.access_token_secret)\n api = tweepy.API(auth)\n auth.secure = True\n try:\n searchQuery = 'alice in wonderland' # this is what we're searching for\n tweetsPerQry = 1 # this is the max the API permits\n #new_tweet = api.search(q=searchQuery, count=tweetsPerQry) #its a list\n for tweet in tweepy.Cursor(api.search,q=searchQuery,result_type=\"recent\",include_entities=True).items(tweetsPerQry):\n print(\"Posting RT {}\".format(tweet.text))\n api.retweet(tweet.id)\n except tweepy.TweepError as e:\n print(\"TweepError. Posting a message from the book...\")\n tweet(get_next_chunk())\n\nif __name__ == '__main__':\n if random.randint(0,1) == 0:\n tweet(get_next_chunk())\n else:\n retweet()\n", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 1756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "book_manager.BookManager", "line_number": 8, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 23, "usage_type": "call"}, {"api_name": "secrets.consumer_key", "line_number": 23, "usage_type": "attribute"}, {"api_name": "secrets.consumer_secret", "line_number": 23, "usage_type": "attribute"}, {"api_name": "secrets.access_token", "line_number": 24, "usage_type": "attribute"}, {"api_name": "secrets.access_token_secret", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tweepy.API", "line_number": 25, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 32, "usage_type": "call"}, {"api_name": "secrets.consumer_key", "line_number": 32, "usage_type": "attribute"}, {"api_name": "secrets.consumer_secret", "line_number": 32, "usage_type": "attribute"}, {"api_name": "secrets.access_token", "line_number": 33, "usage_type": "attribute"}, {"api_name": "secrets.access_token_secret", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tweepy.API", "line_number": 34, "usage_type": "call"}, {"api_name": "tweepy.Cursor", "line_number": 40, "usage_type": "call"}, {"api_name": "tweepy.TweepError", "line_number": 43, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "3503411", "text": "# Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute.\n# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).\n\"\"\"\nCreated on 2020-04-07 15:41\n\n@author: a002028\n\"\"\"\nimport json\nimport numpy as np\nfrom sirena import utils\n\n\nclass JSONreader(dict):\n \"\"\"Read json files.\n\n - Import json\n - Export to json\n - Find dictionary within json file based on a specific key\n - Add elements to dictionary\n - Fill up json/dictionary structure with relevant/desired information\n \"\"\"\n\n def load_json(self, config_files=None, return_dict=False):\n \"\"\"Load json file.\n\n Array will be either a list of dictionaries or one single dictionary\n depending on what the json file includes.\n \"\"\"\n if not isinstance(config_files, (list, np.ndarray)):\n config_files = [config_files]\n\n for config_file in config_files:\n with open(config_file, 'r') as fd:\n self = utils.recursive_dict_update(self, json.load(fd))\n\n if return_dict:\n return self\n", "sub_path": "sirena/readers/json_reader.py", "file_name": "json_reader.py", "file_ext": "py", "file_size_in_byte": 1080, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.ndarray", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sirena.utils.recursive_dict_update", "line_number": 34, "usage_type": "call"}, {"api_name": "sirena.utils", "line_number": 34, "usage_type": "name"}, {"api_name": "json.load", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "183479477", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Longgeek \n\nimport simplejson as json\n\nfrom fuerte.api.v1.utils import pack_requests\nfrom fuerte.api.v1.config import URL\nfrom fuerte.api.v1.config import HEADERS\n\n\ndef execute(cid, cmds, wait=False):\n \"\"\"在容器中执行命令\n\n :param str cid: The container uuid\n :param list cmds: List of commands to execute\n :param bool wait:\n wait is True 命令将在前台执行,会等待命令执行完成\n wait is False 命令将在后台执行,不等待命令执行完成\n \"\"\"\n\n if type(cmds) != list:\n return (-1, \"cmds needs to be a list type parameter\", \"\")\n\n if wait:\n results = []\n params = {\n \"AttachStdin\": False,\n \"AttachStdout\": True,\n \"AttachStderr\": True,\n \"Tty\": False,\n }\n for c in cmds:\n params[\"Cmd\"] = [\"/bin/sh\", \"-c\", c]\n kwargs = {\n \"url\": URL + \"/containers/%s/exec\" % cid,\n \"headers\": HEADERS,\n \"data\": json.dumps(params)\n }\n r = pack_requests(\"POST\", **kwargs)\n s = r.status_code\n if s != 201:\n return (s, r.text, \"\")\n\n kwargs = {\n \"url\": URL + \"/exec/%s/start\" % r.json()[\"Id\"],\n \"headers\": HEADERS,\n \"data\": json.dumps({\"Tty\": False, \"Detach\": False})\n }\n r = pack_requests(\"POST\", **kwargs)\n s = r.status_code\n results.append(r.text)\n if s != 200:\n return (s, r.text, \"\")\n return (0, \"\", results)\n else:\n params = {\n \"AttachStdout\": False,\n \"AttachStderr\": False,\n \"Tty\": True,\n }\n for c in cmds:\n params[\"Cmd\"] = [\"/bin/sh\", \"-c\", c]\n kwargs = {\n \"url\": URL + \"/containers/%s/exec\" % cid,\n \"headers\": HEADERS,\n \"data\": json.dumps(params)\n }\n r = pack_requests(\"POST\", **kwargs)\n s = r.status_code\n if s != 201:\n return (s, r.text, \"\")\n\n kwargs = {\n \"url\": URL + \"/exec/%s/start\" % r.json()[\"Id\"],\n \"headers\": HEADERS,\n \"data\": json.dumps({\"Tty\": True, \"Detach\": True})\n }\n r = pack_requests(\"POST\", **kwargs)\n s = r.status_code\n if s != 200:\n return (s, r.text, \"\")\n return (0, \"\", \"\")\n", "sub_path": "fuerte/api/v1/actions/container/execute.py", "file_name": "execute.py", "file_ext": "py", "file_size_in_byte": 2567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "fuerte.api.v1.config.URL", "line_number": 36, "usage_type": "name"}, {"api_name": "fuerte.api.v1.config.HEADERS", "line_number": 37, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 38, "usage_type": "call"}, {"api_name": "fuerte.api.v1.utils.pack_requests", "line_number": 40, "usage_type": "call"}, {"api_name": "fuerte.api.v1.config.URL", "line_number": 46, "usage_type": "name"}, {"api_name": "fuerte.api.v1.config.HEADERS", "line_number": 47, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "fuerte.api.v1.utils.pack_requests", "line_number": 50, "usage_type": "call"}, {"api_name": "fuerte.api.v1.config.URL", "line_number": 65, "usage_type": "name"}, {"api_name": "fuerte.api.v1.config.HEADERS", "line_number": 66, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 67, "usage_type": "call"}, {"api_name": "fuerte.api.v1.utils.pack_requests", "line_number": 69, "usage_type": "call"}, {"api_name": "fuerte.api.v1.config.URL", "line_number": 75, "usage_type": "name"}, {"api_name": "fuerte.api.v1.config.HEADERS", "line_number": 76, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 77, "usage_type": "call"}, {"api_name": "fuerte.api.v1.utils.pack_requests", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "350315424", "text": "# Update e Delete de dados \n# Importando modulos necessarios \n\nimport sqlite3 as sql \nimport os \nimport random as rdm\nimport time \nimport datetime as dtm\nimport matplotlib.pyplot as plt\n\n\n# Variaveis Globais \nnomeDB = '/home/ludmylla/Documents/PythonFundamentos/MeusCodigos/cap06/dsaUpdat.db'\nnomeTab = 'CasaNova'\nprodutos = ['fogão', 'geladeira', 'mesa', 'sofa', 'cadeira']\n\n# Validando se ja existe o banco de dados \nif os.path.exists(nomeDB):\n os.remove(nomeDB)\nelse:\n None\n\n# Criando conexão e cursor\ncon = sql.connect(nomeDB)\nc = con.cursor()\n\n# Criando funções para manipulação de dados\ndef criando_tabela(nomeTab):\n c.execute('CREATE TABLE IF NOT EXISTS nomeTab (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,'\\\n 'date TEXT, '\\\n 'prod_name TEXT,'\\\n 'valor REAL)')\n\ndef inserir_dados(nomeTab, produtos):\n c.execute(\"INSERT INTO nomeTab (date,prod_name,valor) VALUES (?, ?, ?)\", (dtm.datetime.now(),produtos,rdm.randrange(50,200)))\n con.commit()\n\ndef lendo_todos_dados(nomeTab):\n c.execute(\"SELECT * FROM nomeTab\")\n for linha in c.fetchall():\n print(linha)\n\ndef lendo_coluna(nomeTab):\n c.execute(\"SELECT * FROM nomeTab\")\n for i in c.fetchall():\n print(i[3])\n\ndef atualizando_doc(nomeTab):\n c.execute(\"UPDATE nomeTab SET valor = 70.00 WHERE valor = 80.00\")\n con.commit()\n\ndef delete_doc(nomeTab):\n c.execute(\"DELETE FROM nomeTab WHERE valor = 70.00\")\n con.commit()\n\n# Função para criação de grafico\ndef criando_graficos(nomeTab):\n c.execute(\"SELECT id, valor FROM nomeTab\")\n ids = []\n valores = []\n dados = c.fetchall()\n for i in dados:\n ids.append(i[0])\n valores.append(i[1])\n plt.bar(ids,valores)\n plt.show()\n\n# Executando as funções \ncriando_tabela(nomeTab)\nfor prod in produtos :\n inserir_dados(nomeTab,prod)\n time.sleep(1)\nlendo_todos_dados(nomeTab)\nlendo_coluna(nomeTab)\natualizando_doc(nomeTab)\ndelete_doc(nomeTab)\ncriando_graficos(nomeTab)", "sub_path": "MeusCodigos/cap06/cap06-05.py", "file_name": "cap06-05.py", "file_ext": "py", "file_size_in_byte": 2022, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "225995932", "text": "\"\"\"\nTests for db/sqlite_db.py\n\"\"\"\nfrom . import *\n\nimport sqlite3\n\nimport db.sqlite_db as _\n\n\nclass TestModuleSqliteDb(TestModule):\n def test_all_statement(self):\n expected = [\"Database\", \"Connection\"]\n self.assertAllSpecifiedProperly(expected, _.__all__)\n\n\nclass TestDatabase(unittest.TestCase):\n def test_init(self):\n db = _.Database(':memory:')\n self.assertIsInstance(db, _.Database)\n\n def test_repr(self):\n path = 'foo'\n db = _.Database(path)\n expected = [\"Database(\" + q + path + q + \")\" for q in (\"'\", '\"')]\n self.assertIn(repr(db), expected)\n\n def test_path(self):\n path = ':memory:'\n db = _.Database(path)\n self.assertEqual(path, db.path)\n\n def test_connect(self):\n db = _.Database(':memory:')\n with db.connect() as c:\n self.assertIsInstance(c, _.Connection)\n self.assertIsInstance(c, sqlite3.Connection)\n\n\ndef iter_cocktails_ingredients(foo, bar):\n index = 0\n for i in foo:\n for j in bar:\n if j[0] <= i[0]:\n index += 1\n yield(index, i[0], j[0], i[0] + j[0])\n\n\nclass TestConnection(unittest.TestCase):\n insert_cocktail = \"INSERT INTO cocktails VALUES (?, ?, ?)\"\n insert_ingredient = \"INSERT INTO ingredients VALUES (?, ?)\"\n insert_cocktail_ingredient = \"\"\"\n INSERT INTO cocktails_ingredients VALUES (?, ?, ?, ?)\n \"\"\"\n\n cocktails = [(i, 'name' + str(i), 'image' + str(i)) for i in range(10)]\n ingredients = [(i, 'ing' + str(i)) for i in range(10)]\n cocktails_ingredients = list(\n iter_cocktails_ingredients(cocktails, ingredients))\n\n def test_inheritance(self):\n with _.Connection(':memory:') as c:\n self.assertIsInstance(c, _.Connection)\n self.assertIsInstance(c, sqlite3.Connection)\n\n def test_fk_support(self):\n with _.Connection(':memory:') as c:\n fk = c.execute(\"PRAGMA foreign_keys\").fetchone()[0]\n self.assertTrue(fk)\n\n def test_create_tables_if_needed(self):\n tables = \"cocktails ingredients cocktails_ingredients\".split()\n query = \"SELECT name FROM sqlite_master WHERE type='table'\"\n with _.Connection(':memory:') as c:\n c.create_tables_if_needed()\n\n existing_tables_count = 0\n for row in c.execute(query):\n # Check that all expected tables exist\n self.assertIn(row[0], tables)\n existing_tables_count += 1\n # Check that extra tables don't exist\n self.assertEqual(existing_tables_count, len(tables))\n\n def test_select_all_cocktails(self):\n with _.Connection(':memory:') as c:\n args = c, 'cocktails', self.insert_cocktail, self.cocktails\n self._test_select_all_foo(*args)\n\n def test_select_cocktail(self):\n with _.Connection(':memory:') as c:\n args = c, 'cocktail', self.insert_cocktail, self.cocktails\n self._test_select_foo(*args)\n\n def test_select_cocktail_type_checking(self):\n with _.Connection(':memory:') as c:\n with self.assertRaises(TypeError):\n c.select_cocktail('165')\n\n def test_select_all_ingredients(self):\n with _.Connection(':memory:') as c:\n args = c, 'ingredients', self.insert_ingredient, self.ingredients\n self._test_select_all_foo(*args)\n\n def test_select_ingredient(self):\n with _.Connection(':memory:') as c:\n args = c, 'ingredient', self.insert_ingredient, self.ingredients\n self._test_select_foo(*args)\n\n def test_select_ingredient_type_checking(self):\n with _.Connection(':memory:') as c:\n with self.assertRaises(TypeError):\n c.select_ingredient('165')\n\n def test_select_all_cocktails_ingredients(self):\n with _.Connection(':memory:') as c:\n self._test_select_all_foo(\n c,\n 'cocktails_ingredients',\n self.insert_cocktail_ingredient,\n self.cocktails_ingredients\n )\n\n def test_select_cocktail_ingredient(self):\n with _.Connection(':memory:') as c:\n self._test_select_foo(\n c,\n 'cocktail_ingredient',\n self.insert_cocktail_ingredient,\n self.cocktails_ingredients\n )\n\n def test_select_cocktail_ingredient_type_checking(self):\n with _.Connection(':memory:') as c:\n with self.assertRaises(TypeError):\n c.select_cocktail_ingredient('165')\n\n def test_select_ingredients_of_cocktail(self):\n with _.Connection(':memory:') as c:\n c.create_tables_if_needed()\n self._fill_table(c, self.insert_cocktail, self.cocktails)\n self._fill_table(c, self.insert_ingredient, self.ingredients)\n self._fill_table(\n c, self.insert_cocktail_ingredient, self.cocktails_ingredients)\n\n for cocktail in self.cocktails:\n identifier = cocktail[0]\n result = c.select_ingredients_of_cocktail(identifier)\n expected = []\n for ci in self.cocktails_ingredients:\n if ci[1] == identifier:\n ing_id = ci[2]\n part = ci[3]\n for i in self.ingredients:\n if i[0] == ing_id:\n name = i[1]\n row = ing_id, name, part\n expected.append(row)\n self.assertEqual(expected, list(result))\n\n def test_select_ingredients_of_cocktail_type_checking(self):\n with _.Connection(':memory:') as c:\n with self.assertRaises(TypeError):\n c.select_ingredients_of_cocktail('165')\n\n def test_insert_cocktail(self):\n with _.Connection(':memory:') as connection:\n self._test_insert_foo(\n connection,\n 'cocktails',\n self.insert_cocktail,\n self.cocktails,\n )\n\n def test_insert_ingredient(self):\n with _.Connection(':memory:') as connection:\n self._test_insert_foo(\n connection,\n 'ingredients',\n self.insert_ingredient,\n self.ingredients,\n )\n\n def _iter_select_all(self, connection, select_postfix):\n select = getattr(connection, 'select_all_' + select_postfix)\n yield from select()\n\n def _test_insert_foo(self, connection, select_postfix, insert_sql, items):\n connection.create_tables_if_needed()\n\n for item in items:\n cursor = connection.execute(insert_sql, item)\n self.assertEqual(item[0], cursor.lastrowid)\n\n fiends_count = None\n select = self._iter_select_all(connection, select_postfix)\n for expected_row, got_row in zip(items, select):\n if fiends_count is None: # set up once\n fiends_count = len(expected_row)\n\n for i in range(fiends_count):\n self.assertEqual(expected_row[i], got_row[i])\n\n def _fill_table(self, connection, insert_sql, items):\n for item in items:\n connection.execute(insert_sql, item)\n\n def _test_select_all_foo(self, connection, select_postfix, insert_sql,\n items):\n connection.create_tables_if_needed()\n self._fill_table(connection, insert_sql, items)\n\n fiends_count = None\n select = self._iter_select_all(connection, select_postfix)\n for expected_row, got_row in zip(items, select):\n if fiends_count is None: # set up once\n fiends_count = len(expected_row)\n\n for i in range(fiends_count):\n self.assertEqual(expected_row[i], got_row[i])\n\n def _get_select_one_func(self, connection, select_postfix):\n return getattr(connection, 'select_' + select_postfix)\n\n def _test_select_foo(self, connection, select_postfix, insert_sql, items):\n connection.create_tables_if_needed()\n self._fill_table(connection, insert_sql, items)\n\n select = self._get_select_one_func(connection, select_postfix)\n for item in items:\n identifier = item[0]\n row = select(identifier).fetchone()\n self.assertEqual(item, row)\n", "sub_path": "tests/test_db_sqlite_db.py", "file_name": "test_db_sqlite_db.py", "file_ext": "py", "file_size_in_byte": 8426, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "db.sqlite_db.__all__", "line_number": 14, "usage_type": "attribute"}, {"api_name": "db.sqlite_db", "line_number": 14, "usage_type": "name"}, {"api_name": "db.sqlite_db", "line_number": 19, "usage_type": "name"}, {"api_name": "db.sqlite_db.Database", "line_number": 19, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 20, "usage_type": "argument"}, {"api_name": "db.sqlite_db.Database", "line_number": 20, "usage_type": "attribute"}, {"api_name": "db.sqlite_db", "line_number": 24, "usage_type": "name"}, {"api_name": "db.sqlite_db.Database", "line_number": 24, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 26, "usage_type": "argument"}, {"api_name": "db.sqlite_db", "line_number": 30, "usage_type": "name"}, {"api_name": "db.sqlite_db.Database", "line_number": 30, "usage_type": "call"}, {"api_name": "db.sqlite_db.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "db.sqlite_db", "line_number": 31, "usage_type": "name"}, {"api_name": "db.sqlite_db", "line_number": 34, "usage_type": "name"}, {"api_name": "db.sqlite_db.Database", "line_number": 34, "usage_type": "call"}, {"api_name": "db.sqlite_db.connect", "line_number": 35, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 35, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 36, "usage_type": "attribute"}, {"api_name": "db.sqlite_db", "line_number": 36, "usage_type": "name"}, {"api_name": "sqlite3.Connection", "line_number": 37, "usage_type": "attribute"}, {"api_name": "db.sqlite_db.Connection", "line_number": 62, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 62, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 63, "usage_type": "attribute"}, {"api_name": "db.sqlite_db", "line_number": 63, "usage_type": "name"}, {"api_name": "sqlite3.Connection", "line_number": 64, "usage_type": "attribute"}, {"api_name": "db.sqlite_db.Connection", "line_number": 67, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 67, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 74, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 74, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 86, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 86, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 91, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 91, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 96, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 96, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 101, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 101, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 106, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 106, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 111, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 111, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 116, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 116, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 125, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 125, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 134, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 134, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 139, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 139, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 162, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 162, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 167, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 167, "usage_type": "name"}, {"api_name": "db.sqlite_db.Connection", "line_number": 176, "usage_type": "call"}, {"api_name": "db.sqlite_db", "line_number": 176, "usage_type": "name"}]} +{"seq_id": "20017100", "text": "import cv2\nimport csv\n\nif __name__ == \"__main__\":\n\t# Loading the cascade XML file using CascadeClassifier method. \n\tface_cascades = cv2.CascadeClassifier(\"xml-files/haarcascade_frontalface_alt.xml\")\n\n\timg = cv2.imread(\"images/image-group-1.jpg\")\n\timg = cv2.resize(img, None, fx=0.1, fy=0.1, interpolation = cv2.INTER_CUBIC)\n\n\t# For storing x, y pixels of the face in any given image\n\tfaces = face_cascades.detectMultiScale(img, scaleFactor=1.05, minNeighbors=5)\n\n\tfor x, y, w, h in faces:\n\t\t# To draw rectamgle, args: img, (starting point corner), (end point corner), (color), width)\n\t\timg = cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n\tcv2.imshow(\"Output\", img)\n\tcv2.waitKey(0)\n\n", "sub_path": "image-face-detection.py", "file_name": "image-face-detection.py", "file_ext": "py", "file_size_in_byte": 692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "90044889", "text": " \n\n# Program extracting first column \nimport xlrd \nimport xlwt\nimport xlsxwriter \nloc = (\"C:/Users/ARUNBABU/Desktop/Mashupstack/Python/parvathy.xlsx\") \n \nwb = xlrd.open_workbook(loc) \nsheet = wb.sheet_by_index(0) \nsheet.cell_value(0, 0) \n \nfor i in range(sheet.nrows):\n\tlist1 = sheet.cell_value(i, 0)\n\tprint(list1) \n\nrow = 0\ncolumn = 0\n \n# Workbook() takes one, non-optional, argument \n# which is the filename that we want to create. \nworkbook = xlsxwriter.Workbook('hello2.xlsx') \n \n# The workbook object is then used to add new \n# worksheet via the add_worksheet() method. \nworksheet = workbook.add_worksheet() \n \n# Use the worksheet object to write \n# data via the write() method. \n\n\nfor item in list1 : \n \n # write operation perform \n worksheet.write(row,column,item) \n \n # incrementing the value of row by one \n # with each iteratons. \n row += 1\n \nworkbook.close() \n\n \n\n", "sub_path": "Python/pgm1.py", "file_name": "pgm1.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "xlrd.open_workbook", "line_number": 9, "usage_type": "call"}, {"api_name": "xlsxwriter.Workbook", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "585688131", "text": "#!/usr/bin/env python\n# coding: utf8\n\"\"\"\nTango device server for setting up pyFAI azimuthal integrator in a LImA ProcessLib.\n\nDestination path:\nLima/tango/plugins/DistortionCorrection \n\"\"\"\n__author__ = \"Jérôme Kieffer\"\n__contact__ = \"Jerome.Kieffer@ESRF.eu\"\n__license__ = \"GPLv3+\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"25/02/2013\"\n__status__ = \"beta\"\n__docformat__ = 'restructuredtext'\n\nimport os\nimport sys\nimport threading\nimport logging\nlogger = logging.getLogger(\"lima.tango.pyfai\")\n# set loglevel at least at INFO\nif logger.getEffectiveLevel() > logging.INFO:\n logger.setLevel(logging.INFO)\nimport pyFAI, pyFAI._distortion\ntry:\n from pyFAI.fastcrc import crc32\nexcept ImportError:\n from zlib import crc32\n\nimport fabio\ntry:\n import pyopencl\n import pyFAI.ocl_azim_lut\nexcept ImportError:\n pyopencl = None\n logger.warning(\"Unable to import pyopencl, will use OpenMP (if available)\")\nimport PyTango\nfrom os.path import dirname\ncwd = dirname(dirname(dirname(os.path.abspath(__file__))))\nsys.path.append(os.path.join(cwd, \"build\", \"lib.linux-x86_64-2.6\"))\nimport numpy\nfrom Lima import Core\nfrom Utils import BasePostProcess\n\nclass PyFAISink(Core.Processlib.SinkTaskBase):\n \"\"\"\n This is a processlib Sink: it takes an image as input and writes a file to disk but returns nothing\n \"\"\"\n def __init__(self, splinefile=None, darkfile=None, flatfile=None, extraheader=None):\n \"\"\"\n @param splinefile: File with the description of the distortion as a cubic spline\n @param darkfile: image with the dark current\n @param flatfile: image with the flat field correction \n @param extraheader: dictionary with additional static header for EDF files \n \"\"\"\n Core.Processlib.SinkTaskBase.__init__(self)\n\n self._sem = threading.Semaphore()\n if extraheader:\n self.header = extraheader\n else:\n self.header = {}\n self.splinefile = self.dis = self.det = self.ocl_integrator = None\n self.darkfile = self.darkcurrent = self.darkcurrent_crc = None\n self.flatfile = self.flatfield = self.flatfield_crc = None\n self.setSplineFile(splinefile)\n self.setDarkcurrentFile(darkfile)\n self.setFlatfieldFile(flatfile)\n\n def process(self, data) :\n \"\"\"\n Process a frame\n @param data: a LImA frame with member .buffer (a numpy array) and .frameNumber (an int)\n \"\"\"\n ctControl = _control_ref()\n saving = ctControl.saving()\n sav_parms = saving.getParameters()\n directory = sav_parms.directory\n prefix = sav_parms.prefix\n nextNumber = sav_parms.nextNumber\n indexFormat = sav_parms.indexFormat\n output = os.path.join(directory, prefix + indexFormat % (nextNumber + data.frameNumber) + \".cor.edf\")\n header = self.header.copy()\n header[\"index\"] = nextNumber + data.frameNumber\n if pyopencl and self.ocl_integrator:\n out = self.ocl_integrator.integrate(data.buffer, dark=self.darkcurrent, flat=self.flatfield,\n dark_checksum=self.darkcurrent_crc, flat_checksum=self.flatfield_crc)[1]\n else:\n data = numpy.ascontiguousarray(data.buffer, dtype=numpy.float32)\n if self.darkcurrent is not None:\n data -= self.darkcurrent\n if self.flatfield is not None:\n data /= self.flatfield\n if self.dis:\n out = self.dis.correct(data)\n else:\n out = data\n edf = fabio.edfimage.edfimage(data=out, header=header)\n edf.write(output)\n\n\n\n def setDarkcurrentFile(self, imagefile):\n \"\"\"\n @param imagefile: filename with the path to the dark image\n \"\"\"\n with self._sem:\n if imagefile:\n self.darkfile = imagefile\n try:\n self.darkcurrent = numpy.ascontiguousarray(fabio.open(imagefile).data, numpy.float32)\n except Exception as error:\n logger.warning(\"setDarkcurrentFile: Unable to read file %s: %s\" % (imagefile, error))\n else:\n self.darkcurrent_crc = crc32(self.darkcurrent)\n self.header[\"darkcurrent\"] = imagefile\n else:\n self.darkfile = self.darkcurrent = self.darkcurrent_crc = None\n self.header[\"darkcurrent\"] = \"None\"\n\n def setFlatfieldFile(self, imagefile):\n \"\"\"\n @param imagefile: filename with the path to the flatfield image\n \"\"\"\n with self._sem:\n if imagefile:\n self.flatfile = imagefile\n try:\n self.flatfield = numpy.ascontiguousarray(fabio.open(imagefile).data, numpy.float32)\n except Exception as error:\n logger.warning(\"setFlatfieldFile: Unable to read file %s: %s\" % (imagefile, error))\n else:\n self.flatfield_crc = crc32(self.flatfield)\n self.header[\"flatfield\"] = imagefile\n else:\n self.flatfile = self.flatfield = self.flatfield_crc = None\n self.header[\"flatfield\"] = \"None\"\n\n def setSplineFile(self, splineFile):\n \"\"\"\n @param imagefile: filename with the path to the spline distortion file\n \"\"\"\n with self._sem:\n if not splineFile or not os.path.exists(str(splineFile)):\n self.splinefile = None\n self.det = None\n self.dis = None\n self.header[\"splinefile\"] = \"None\"\n else:\n logger.info(\"start config ...\")\n self.det = pyFAI.detectors.FReLoN(splineFile)\n self.dis = pyFAI._distortion.Distortion(self.det)\n self.reset()\n self.header[\"splinefile\"] = splineFile\n\n def calc_LUT(self):\n \"\"\"\n This is the \"slow\" calculation of the Look-up table that can be spown in another thread \n (especially to avoid Tango from timing out) \n \"\"\"\n with self._sem:\n if self.dis:\n self.dis.calc_LUT_size()\n self.dis.calc_LUT()\n if pyopencl:\n self.ocl_integrator = pyFAI.ocl_azim_lut.OCL_LUT_Integrator(self.dis.LUT, self.dis.shape[0] * self.dis.shape[1])\n else:\n self.splinefile = None\n self.det = None\n self.dis = None\n self.header[\"splinefile\"] = \"None\"\n\n def reset(self):\n \"\"\"\n Recalculate the lookup table in another thread\n \"\"\"\n threading.Thread(target=self.calc_LUT, name=\"calc_LUT\").start()\n\n\nclass DistortionCorrectionDeviceServer(BasePostProcess) :\n \"\"\"\n Tango device server exposed to configure the LImA plugin\n \"\"\"\n DISTORTION_TASK_NAME = 'DistortionCorrectionTask'\n Core.DEB_CLASS(Core.DebModApplication, 'DistortionCorrection')\n def __init__(self, cl, name):\n self.__Task = None\n self.get_device_properties(self.get_device_class())\n BasePostProcess.__init__(self, cl, name)\n DistortionCorrectionDeviceServer.init_device(self)\n\n self.__spline_filename = None\n self.__darkcurrent_filename = None\n self.__flatfield_filename = None\n self.__pyFAISink = None\n\n def set_state(self, state) :\n \"\"\"\n Switch on or off the LImA plugin\n \"\"\"\n if(state == PyTango.DevState.OFF) :\n if (self.__Task):\n self.__Task = None\n ctControl = _control_ref()\n extOpt = ctControl.externalOperation()\n extOpt.delOp(self.DISTORTION_TASK_NAME)\n elif(state == PyTango.DevState.ON) :\n if not self.__Task:\n try:\n ctControl = _control_ref()\n extOpt = ctControl.externalOperation()\n self.__Task = extOpt.addOp(Core.USER_SINK_TASK,\n self.DISTORTION_TASK_NAME,\n self._runLevel)\n if not self.__pyFAISink:\n self.__pyFAISink = PyFAISink(splinefile=self.__spline_filename,\n darkfile=self.__darkcurrent_filename,\n flatfile=self.__flatfield_filename)\n self.__Task.setSinkTask(self.__pyFAISink)\n except:\n import traceback\n traceback.print_exc()\n return\n PyTango.Device_4Impl.set_state(self, state)\n\n def setDarkcurrentFile(self, filepath):\n \"\"\"\n @param imagefile: filename with the path to the dark image\n \"\"\"\n\n self.__darkcurrent_filename = filepath\n if(self.__pyFAISink) :\n self.__pyFAISink.setBackgroundFile(filepath)\n\n def setFlatfieldImage(self, filepath):\n \"\"\"\n @param filepath: filename with the path to the flatfield image\n \"\"\"\n self.__flatfield_filename = filepath\n if(self.__pyFAISink) :\n self.__pyFAISink.setFlatfieldFile(filepath)\n\n def setSplineFile(self, filepath):\n \"\"\"\n @param filepath: filename with the path to the spline distortion file\n \"\"\"\n\n self.__spline_filename = filepath\n if(self.__pyFAISink) :\n self.__pyFAISink.setSplineFile(filepath)\n\n def Reset(self) :\n \"\"\"\n Force the reinitialization\n \"\"\"\n self.__pyFAISink = PyFAISink(splinefile=self.__spline_filename,\n darkfile=self.__darkcurrent_filename,\n flatfile=self.__flatfield_filename)\n self.__Task.setSinkTask(self.__pyFAISink)\n\n\nclass DistortionCorrectionDeviceServerClass(PyTango.DeviceClass) :\n # Class Properties\n class_property_list = {\n }\n\n\n # Device Properties\n device_property_list = {\n }\n\n\n # Command definitions\n cmd_list = {\n 'setDarkcurrentFile':\n [[PyTango.DevString, \"Full path of darkcurrent image file\"],\n [PyTango.DevVoid, \"\"]],\n\n 'setFlatfieldImage':\n [[PyTango.DevString, \"Full path of flatfield image file\"],\n [PyTango.DevVoid, \"\"]],\n\n 'setSplineFile':\n [[PyTango.DevString, \"Full path of spline distortion file\"],\n [PyTango.DevVoid, \"\"]],\n\n 'Start':\n [[PyTango.DevVoid, \"\"],\n [PyTango.DevVoid, \"\"]],\n 'Stop':\n [[PyTango.DevVoid, \"\"],\n [PyTango.DevVoid, \"\"]],\n 'Reset':\n [[PyTango.DevVoid, \"\"],\n [PyTango.DevVoid, \"\"]],\n }\n\n\n # Attribute definitions\n attr_list = {\n 'RunLevel':\n [[PyTango.DevLong,\n PyTango.SCALAR,\n PyTango.READ_WRITE]],\n# 'delete_dark_after_read':\n# [[PyTango.DevBoolean,\n# PyTango.SCALAR,\n# PyTango.READ_WRITE]],\n }\n#------------------------------------------------------------------\n# AzimuthalIntegratorDeviceServerClass Constructor\n#------------------------------------------------------------------\n def __init__(self, name):\n PyTango.DeviceClass.__init__(self, name)\n self.set_type(name)\n\n_control_ref = None\ndef set_control_ref(control_class_ref):\n global _control_ref\n _control_ref = control_class_ref\n\ndef get_tango_specific_class_n_device() :\n return DistortionCorrectionDeviceServerClass, DistortionCorrectionDeviceServer\n", "sub_path": "plugins/Lima/DistortionCorrection.py", "file_name": "DistortionCorrection.py", "file_ext": "py", "file_size_in_byte": 11688, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "Lima.Core.Processlib", "line_number": 46, "usage_type": "attribute"}, {"api_name": "Lima.Core", "line_number": 46, "usage_type": "name"}, {"api_name": "Lima.Core.Processlib.SinkTaskBase.__init__", "line_number": 57, "usage_type": "call"}, {"api_name": "Lima.Core.Processlib", "line_number": 57, "usage_type": "attribute"}, {"api_name": "Lima.Core", "line_number": 57, "usage_type": "name"}, {"api_name": "threading.Semaphore", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.ascontiguousarray", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 90, "usage_type": "attribute"}, {"api_name": "fabio.edfimage.edfimage", "line_number": 99, "usage_type": "call"}, {"api_name": "fabio.edfimage", "line_number": 99, "usage_type": "attribute"}, {"api_name": "numpy.ascontiguousarray", "line_number": 112, "usage_type": "call"}, {"api_name": "fabio.open", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 112, "usage_type": "attribute"}, {"api_name": "zlib.crc32", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 130, "usage_type": "call"}, {"api_name": "fabio.open", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 130, "usage_type": "attribute"}, {"api_name": "zlib.crc32", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pyFAI.detectors.FReLoN", "line_number": 152, "usage_type": "call"}, {"api_name": "pyFAI.detectors", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pyFAI._distortion.Distortion", "line_number": 153, "usage_type": "call"}, {"api_name": "pyFAI._distortion", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pyFAI.ocl_azim_lut.OCL_LUT_Integrator", "line_number": 167, "usage_type": "call"}, {"api_name": "pyFAI.ocl_azim_lut", "line_number": 167, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 178, "usage_type": "call"}, {"api_name": "Utils.BasePostProcess", "line_number": 181, "usage_type": "name"}, {"api_name": "Lima.Core.DEB_CLASS", "line_number": 186, "usage_type": "call"}, {"api_name": "Lima.Core", "line_number": 186, "usage_type": "name"}, {"api_name": "Lima.Core.DebModApplication", "line_number": 186, "usage_type": "attribute"}, {"api_name": "Utils.BasePostProcess.__init__", "line_number": 190, "usage_type": "call"}, {"api_name": "Utils.BasePostProcess", "line_number": 190, "usage_type": "name"}, {"api_name": "PyTango.DevState", "line_number": 202, "usage_type": "attribute"}, {"api_name": "PyTango.DevState", "line_number": 208, "usage_type": "attribute"}, {"api_name": "Lima.Core.USER_SINK_TASK", "line_number": 213, "usage_type": "attribute"}, {"api_name": "Lima.Core", "line_number": 213, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 223, "usage_type": "call"}, {"api_name": "PyTango.Device_4Impl.set_state", "line_number": 225, "usage_type": "call"}, {"api_name": "PyTango.Device_4Impl", "line_number": 225, "usage_type": "attribute"}, {"api_name": "PyTango.DeviceClass", "line_number": 263, "usage_type": "attribute"}, {"api_name": "PyTango.DevString", "line_number": 277, "usage_type": "attribute"}, {"api_name": "PyTango.DevVoid", "line_number": 278, "usage_type": "attribute"}, {"api_name": "PyTango.DevString", "line_number": 281, "usage_type": "attribute"}, {"api_name": "PyTango.DevVoid", "line_number": 282, "usage_type": "attribute"}, {"api_name": "PyTango.DevString", "line_number": 285, "usage_type": "attribute"}, {"api_name": "PyTango.DevVoid", "line_number": 286, "usage_type": "attribute"}, {"api_name": "PyTango.DevVoid", "line_number": 289, "usage_type": "attribute"}, {"api_name": "PyTango.DevVoid", "line_number": 290, "usage_type": "attribute"}, {"api_name": "PyTango.DevVoid", "line_number": 292, "usage_type": "attribute"}, {"api_name": "PyTango.DevVoid", "line_number": 293, "usage_type": "attribute"}, {"api_name": "PyTango.DevVoid", "line_number": 295, "usage_type": "attribute"}, {"api_name": "PyTango.DevVoid", "line_number": 296, "usage_type": "attribute"}, {"api_name": "PyTango.DevLong", "line_number": 303, "usage_type": "attribute"}, {"api_name": "PyTango.SCALAR", "line_number": 304, "usage_type": "attribute"}, {"api_name": "PyTango.READ_WRITE", "line_number": 305, "usage_type": "attribute"}, {"api_name": "PyTango.DeviceClass.__init__", "line_number": 315, "usage_type": "call"}, {"api_name": "PyTango.DeviceClass", "line_number": 315, "usage_type": "attribute"}]} +{"seq_id": "289715679", "text": "import getopt\nimport secrets\nimport sys\n\nimport ckanapi\n\nckan = ckanapi.RemoteCKAN(secrets.ckan_url, apikey=secrets.ckan_api_key)\n\n\ndef get_opts():\n # Get the arguments from the command-line except the filename\n argv = sys.argv[1:]\n try:\n # Define the getopt parameters\n opts, args = getopt.getopt(argv, 'p:n:d:u:f:', ['package_id', 'name','description', 'url', 'file'])\n # Check if the options' length is 4 (can be enhanced)\n if len(opts) != 5:\n print('usage: ckan_upload.py -p -n -d -u -f ')\n else:\n # Iterate the options and get the corresponding values\n for opt, arg in opts:\n print(opt, arg)\n pid = opts[0][1]\n name = opts[1][1]\n desc = opts[2][1]\n url = opts[3][1]\n file = opts[4][1]\n # print(pid)\n return pid, name, desc, url, file\n except getopt.GetoptError:\n # Print something useful\n print('usage: ckan_upload.py -p -n -d -u -f ')\n sys.exit(2)\n\n\ndef upload_file(package_id, name, description, url, file_to_upload):\n ckan.action.resource_create(package_id=package_id,\n name=name, # added\n description=description,\n url=url,\n upload=open(file_to_upload))\n\n\ndef main():\n pid, name, desc, url, file = get_opts()\n upload_file(pid, name, desc, url, file)\n\nmain()\n", "sub_path": "ckanuploader.py", "file_name": "ckanuploader.py", "file_ext": "py", "file_size_in_byte": 1585, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "ckanapi.RemoteCKAN", "line_number": 7, "usage_type": "call"}, {"api_name": "secrets.ckan_url", "line_number": 7, "usage_type": "attribute"}, {"api_name": "secrets.ckan_api_key", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "getopt.getopt", "line_number": 15, "usage_type": "call"}, {"api_name": "getopt.GetoptError", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "594022489", "text": "import argparse\nimport os\nimport numpy as np\n\nimport sys\n\nsys.path.insert(0, \"../../test_utils\")\nimport result_parser_utils\n\n\nWARMUP_ROUNDS = 2\n\n\ndef get_durations(lines):\n durations = []\n for line in lines:\n if 'duration = ' in line:\n tmp = line.split('duration = ')[1]\n durations.append(float(tmp))\n return durations\n\n\ndef parse_all_ranks(folder_path, with_rank0=True):\n files = os.listdir(folder_path)\n all_rank_durations = []\n for filename in files:\n if 'rank' in filename and (with_rank0 or 'rank_0' not in filename):\n try:\n with open(os.path.join(folder_path, filename)) as f:\n durations = get_durations(f.readlines())\n if not durations:\n raise ValueError(\"Bad file\")\n all_rank_durations.append(durations)\n except Exception:\n print(\"Bad file\", folder_path, filename)\n return None\n\n try:\n return np.max(all_rank_durations, axis=0)\n except Exception as e:\n print(\"Error: empty directory\", folder_path, e)\n return None\n\n\ndef parse_file(task_name, log_dir, foldername):\n path = os.path.join(log_dir, foldername)\n\n if task_name in ('allreduce', 'allgather'):\n return parse_all_ranks(path)\n elif task_name == 'multicast':\n return parse_all_ranks(path, with_rank0=False)\n elif task_name in ('reduce', 'gather', 'subset_reduce'):\n return result_parser_utils.default_parse_file(task_name, log_dir, foldername)\n else:\n raise ValueError('Unknown task', task_name)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Hoplite (C++) benchmark results parser.')\n parser.add_argument('log_dir', metavar='PATH', nargs='?', type=str, default='log',\n help='The logging directory of Gloo benchmarks')\n parser.add_argument('--verbose', action='store_true')\n args = parser.parse_args()\n df = result_parser_utils.parse(args.log_dir, parse_file)\n if args.verbose:\n print(df)\n df.to_csv('hoplite_results.csv', index=False)\n", "sub_path": "microbenchmarks/hoplite-cpp/parse_result.py", "file_name": "parse_result.py", "file_ext": "py", "file_size_in_byte": 2138, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "result_parser_utils.default_parse_file", "line_number": 53, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 59, "usage_type": "call"}, {"api_name": "result_parser_utils.parse", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "318253621", "text": "import pygame\nfrom pygame.sprite import Sprite\n\nclass Alien(Sprite):\n\n\tdef __init__(self, game):\n\t\tsuper().__init__()\n\t\tself.screen = game.screen\n\t\tself.settings = game.game_setting\n\t\tself.image = pygame .image.load(\"img/NYA.PNG\")\n\t\tself.rect = self.image.get_rect()\n\n\t\tself.rect.x = self.rect.width\n\t\tself.rect.y = self.rect.height\n\n\t\tself.y = float(self.rect.y)\n\n\n\tdef update(self):\n\n\n\t\tself.y -= self.settings.alien_speed * self.settings.alien_army_direction\n\t\tself.rect.y = self.y\n\n\tdef _check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif (self.rect.bottom >= screen_rect.bottom) or (self.rect.top <= screen_rect.top):\n\t\t\treturn True", "sub_path": "Game/alien.py", "file_name": "alien.py", "file_ext": "py", "file_size_in_byte": 649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 4, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "392648280", "text": "# Instructions to run file\n# 1) Open terminal and go to the directory in which this file exists\n# 2) In the terminal, type: set FLASK_APP=hello.py\n# 3) In the terminal, type: flask run\n\nfrom flask import Flask, Response, request, jsonify\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/', methods=['POST', 'GET'])\ndef hello_world():\n # return 'Hello, World!'\n return jsonify({\"data\": \"Hello World\"})\n\n@app.route('/analyze', methods=['POST', 'GET'])\ndef second():\n param = request.args.get('values')\n param = int(param)\n val = param + 20\n\n return jsonify({\"data\": val})\n\n\n\n\n", "sub_path": "hello.py", "file_name": "hello.py", "file_ext": "py", "file_size_in_byte": 618, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "148400364", "text": "from multiprocessing import Queue\nfrom itertools import izip\n\nfrom traits.api import (\n HasTraits, Bool, Int, String, Set, Tuple, Dict, Instance, Range, Event,\n List, DelegatesTo, on_trait_change\n)\n\nfrom gambolputty.core import line_to_melody, choose_transition\nfrom gambolputty.model import (\n Phrase, Point, PhraseNode, Voice, Track, Transition\n)\nfrom gambolputty.exceptions import (\n GambolputtyException, NoPhraseSelected, NoVoiceSelected\n)\n\n\nclass PlayerBackend(HasTraits):\n \"\"\"\n Abstract base class for player backends.\n \"\"\"\n\n device = Int(-1)\n \"\"\"The number of the MIDI output device.\"\"\"\n\n devices = List(Int)\n \"\"\"Available MIDI output devices.\"\"\"\n\n def time(self):\n \"\"\"\n @return: the current MIDI time\n \"\"\"\n raise NotImplementedError\n\n def refresh_devices(self):\n device = self.device\n self.device = -1\n self.devices = list(self.init_devices())\n if device in self.devices:\n self.device = device\n\n def init_devices(self):\n \"\"\"\n @return: a sequence of output device numbers\n \"\"\"\n raise NotImplementedError\n\n def get_device_title(self, device_id):\n \"\"\"\n @return: the title of the given device number.\n \"\"\"\n raise NotImplementedError\n\n def get_device(self, device_title):\n \"\"\"\n @return: the device number for the given title.\n \"\"\"\n raise NotImplementedError\n\n def program_change(self, channel, program, time):\n \"\"\"\n Sends a MIDI ProgramChange event to the MIDI output device.\n \"\"\"\n raise NotImplementedError\n\n def noteon(self, channel, note, volume, time):\n \"\"\"\n Sends a MIDI NoteOn event to the MIDI output device.\n \"\"\"\n raise NotImplementedError\n\n def noteoff(self, channel, note, time):\n \"\"\"\n Sends a MIDI NoteOff event to the MIDI output device.\n \"\"\"\n raise NotImplementedError\n\n def all_notes_off(self, channel):\n \"\"\"\n Sends a MIDI NoteOff event for all notes to the MIDI output device.\n \"\"\"\n raise NotImplementedError\n\n\nclass NoDeviceSelected(GambolputtyException):\n title = \"No output device selected\"\n\n def __init__(self):\n super(NoDeviceSelected, self).__init__(\n \"Please select a MIDI output device\"\n )\n\n\nclass PyGamePlayerBackend(PlayerBackend):\n\n def __init__(self):\n from pygame import midi\n self.midi = midi\n self.midi.init()\n self.output = None\n super(PyGamePlayerBackend, self).__init__()\n\n def time(self):\n return self.midi.time()\n\n def init_devices(self):\n #if self.output is not None:\n # self.output = None\n self.midi.quit()\n self.midi.init()\n for i in range(self.midi.get_count()):\n interf, name, inp, outp, opened = self.midi.get_device_info(i)\n if outp != 1:\n continue\n yield i\n\n def get_device_title(self, device_id):\n if device_id < 0:\n return ''\n interf, name, inp, outp, opened = self.midi.get_device_info(device_id)\n return \"%d: %s\" % (device_id, name)\n\n def get_device(self, device_title):\n if not device_title:\n return -1\n return int(device_title.split(':')[0])\n\n @on_trait_change('device')\n def _device_changed(self):\n # Calling Output.close() here results in 'Bad Pointer' error,\n # for it is called automatically when dereferencing.\n # (see http://comments.gmane.org/gmane.comp.python.pygame/22027)\n if self.device < 0:\n self.output = None\n return\n self.output = self.midi.Output(self.device)\n\n def program_change(self, channel, program, time):\n if self.output is None:\n raise NoDeviceSelected()\n self.output.write([[[0xc0+channel, program, 0], time]])\n\n def noteon(self, channel, note, volume, time):\n if self.output is None:\n raise NoDeviceSelected()\n self.output.write([[[0x90+channel, note, volume], time]])\n\n def noteoff(self, channel, note, time):\n if self.output is None:\n raise NoDeviceSelected()\n self.output.write([[[0x80+channel, note, 0], time]])\n\n def all_notes_off(self, channel):\n if self.output is None:\n raise NoDeviceSelected()\n self.output.write([[[0xb0+channel, 123, 0], self.time()]])\n\n\nclass PortMidiPlayerBackend(PlayerBackend):\n\n def __init__(self):\n import pypm\n self.pypm = pypm\n pypm.Initialize()\n self.output = None\n super(PortMidiPlayerBackend, self).__init__()\n\n def time(self):\n return self.pypm.Time()\n\n def init_devices(self):\n self.device = -1\n if self.output is not None:\n self.output = None\n self.pypm.Terminate()\n self.pypm.Initialize()\n for i in range(self.pypm.CountDevices()):\n interf, name, inp, outp, opened = self.pypm.GetDeviceInfo(i)\n if outp != 1:\n continue\n yield i\n\n def get_device_title(self, device_id):\n if device_id < 0:\n return ''\n interf, name, inp, outp, opened = self.pypm.GetDeviceInfo(device_id)\n return \"%d: %s\" % (device_id, name)\n\n def get_device(self, device_title):\n if not device_title:\n return -1\n return int(device_title.split(':')[0])\n\n @on_trait_change('device')\n def _device_changed(self):\n self.output = self.pypm.Output(self.device, 0)\n\n def program_change(self, channel, program, time):\n if self.output is None:\n raise NoDeviceSelected()\n self.output.Write([[[0xc0+channel, program, 0], time]])\n\n def noteon(self, channel, note, volume, time):\n if self.output is None:\n raise NoDeviceSelected()\n self.output.Write([[[0x90+channel, note, volume], time]])\n\n def noteoff(self, channel, note, time):\n if self.output is None:\n raise NoDeviceSelected()\n self.output.Write([[[0x80+channel, note, 0], time]])\n\n def all_notes_off(self, channel):\n if self.output is None:\n raise NoDeviceSelected()\n self.output.Write([[[0xb0+channel, 123, 0], self.pypm.Time()]])\n\n\nclass Player(HasTraits):\n \"\"\"\n Abstract base class for players.\n \"\"\"\n\n playing = Bool(False)\n \"\"\"True if the player is currently playing back.\"\"\"\n\n def __init__(self, backend, **kwargs):\n \"\"\"\n Initializes the player.\n\n @param backend: the L{PlayerBackend}\n \"\"\"\n self.play_iter = None\n self.backend = backend\n super(Player, self).__init__(**kwargs)\n\n def tick(self):\n \"\"\"Ticks the player's iterator.\"\"\"\n if self.play_iter is None:\n return False\n try:\n self.play_iter.next()\n except StopIteration:\n self.play_iter = None\n self.playing = False\n return False\n except:\n self.play_iter = None\n self.playing = False\n raise\n else:\n return True\n\n def play(self):\n \"\"\"\n Starts the player by initializing L{Player.play_iter} with the\n generator function L{Player.iter()}.\n \"\"\"\n self.play_iter = self.iter()\n self.playing = True\n\n def stop(self):\n \"\"\"\n Stops the player.\n \"\"\"\n self.playing = False\n\n def iter(self):\n raise NotImplementedError\n\n\nclass PhrasePlayer(Player):\n \"\"\"\n Player for playing back a L{Phrase}.\n \"\"\"\n\n phrase = Instance(Phrase)\n \"\"\"The phrase to be played back.\"\"\"\n\n voice = Instance(Voice)\n \"\"\"The voice to use.\"\"\"\n\n tempo = Int(120)\n \"\"\"The tempo in beats per minute.\"\"\"\n\n transpose = Int(0)\n \"\"\"The number of half steps to add to every note.\"\"\"\n\n loop = Bool(False)\n \"\"\"Whether to loop playback.\"\"\"\n\n current_point = Instance(Point)\n \"\"\"The currently played back L{Point}.\"\"\"\n\n time = Int(-1)\n \"\"\"The current MIDI time.\"\"\"\n\n def stop(self):\n super(PhrasePlayer, self).stop()\n self.current_point = None\n\n def tick(self):\n result = super(PhrasePlayer, self).tick()\n if not result:\n self.time = -1\n return result\n\n def iter(self):\n if self.phrase is None:\n raise NoPhraseSelected\n if self.voice is None:\n raise NoVoiceSelected\n backend = self.backend\n try:\n while True:\n phrase = self.phrase\n if phrase is None or len(phrase.points) <= 1:\n yield None\n continue\n melody = line_to_melody(\n (p.pos for p in phrase.points),\n phrase.steps, phrase.transpose + self.transpose,\n )\n measure_len = (60.0/(float(self.tempo)/4.0))*1000\n point = None\n if self.time < 0:\n self.time = backend.time()\n voice = self.voice\n for point, (note, length) in izip(phrase.points[1:], melody):\n if not voice.mute:\n if point.type != 'Rest':\n backend.noteon(\n voice.channel,\n note,\n voice.volume,\n self.time,\n )\n self.current_point = point\n else:\n self.current_point = None\n note_len = int(measure_len*length)\n note_time = self.time + note_len\n try:\n while backend.time() < note_time:\n yield None\n self.time = note_time\n finally:\n backend.noteoff(\n voice.channel, note, self.time\n )\n if not self.playing:\n break\n if not self.loop or not self.playing:\n break\n finally:\n self.current_point = None\n self.playing = False\n\n @on_trait_change('voice.instrument, voice.channel')\n def _voice_changed(self):\n self.backend.program_change(\n self.voice.channel,\n self.voice.instrument,\n self.backend.time(),\n )\n\n\nclass VoicePlayer(Player):\n \"\"\"\n Player for playing back a L{Voice}.\n \"\"\"\n\n track = Instance(Track)\n \"\"\"The L{track} the voice belongs to.\"\"\"\n\n voice = Instance(Voice)\n \"\"\"The played back L{Voice}.\"\"\"\n\n _current_node = Instance(PhraseNode)\n \"\"\"\n The L{PhraseNode} currently played back. This is only to be used\n internally in the player module and you should not bind any trait\n notification handlers to this, for this will hurt performance in that\n transitions between nodes will sound sloppy.\n Use L{VoicePlayer.current_node} instead.\n \"\"\"\n\n current_node = Instance(PhraseNode)\n \"\"\"\n The L{PhraseNode} to be displayed by viewers etc.\n This is set to L{VoicePlayer._current_node} just after the first note of\n the new node is beginning to play, to give notification handlers some\n time to do their (usually slow) GUI updates.\n \"\"\"\n\n phrase_player = Instance(PhrasePlayer)\n \"\"\"\n The L{PhrasePlayer} used to play back phrases.\n \"\"\"\n\n time = DelegatesTo('phrase_player')\n\n def _phrase_player_default(self):\n return PhrasePlayer(self.backend, voice=self.voice)\n\n def stop(self):\n super(VoicePlayer, self).stop()\n self.phrase_player.stop()\n\n def iter(self):\n phrase_player = self.phrase_player\n # Start with the voice's start node. This will also set the phrase\n # player's phrase to the node's phrase.\n self._current_node = self.voice.start_node\n phrase_player.play()\n while True:\n # Wait until there is a phrase to be played back\n if phrase_player.phrase is None:\n yield None\n # Exit the loop if the voice player has been stopped\n if not self.playing:\n break\n continue\n try:\n yield phrase_player.play_iter.next()\n except StopIteration:\n # The phrase player has finished its phrase or was stopped.\n\n # Exit the loop if the voice player has been stopped.\n if not self.playing:\n break\n\n # Choose the next phrase transition.\n transition = choose_transition(\n *self.track.get_voice_transition_weights(\n self._current_node, 'Phrase', self.voice\n )\n )\n if transition is None:\n # If there is no transition for the current voice,\n # try to get a transition for all voices.\n transition = choose_transition(\n *self.track.get_voice_transition_weights(\n self._current_node, 'Phrase', None\n )\n )\n if transition is None:\n next_node = None\n else:\n # Set the node to be played back next. This will also set\n # the phrase player's phrase to the new node's phrase.\n next_node = transition.target_node\n self._current_node = next_node\n phrase_player.play()\n if next_node is None or next_node.phrase is None:\n # If there is no phrase to be played, update the\n # externally visible node attribute immediately for we\n # don't have a chance to do so later on (the else-branch\n # below will not be reached in this case).\n self.current_node = next_node\n else:\n # Check if the node has been changed, so we can now tell\n # observers to do their stuff while the first note of the\n # new node is already being played.\n if self.current_node is not self._current_node:\n self.current_node = self._current_node\n phrase_player.current_point = None\n self._current_node = None\n self.current_node = None\n self.time = -1\n\n @on_trait_change('_current_node.transpose')\n def _transpost_changed(self):\n if self._current_node is not None:\n self.phrase_player.transpose = self._current_node.transpose\n\n @on_trait_change('_current_node.phrase')\n def _phrase_changed(self):\n if self._current_node is None:\n self.phrase_player.phrase = None\n else:\n self.phrase_player.phrase = self._current_node.phrase\n\n @on_trait_change('voice')\n def _voice_changed(self):\n self.phrase_player.voice = self.voice\n\n @on_trait_change('playing')\n def _playing_changed(self):\n self.phrase_player.playing = self.playing\n\n @on_trait_change('track.tempo')\n def _tempo_changed(self):\n self.phrase_player.tempo = self.track.tempo\n\n\nclass TrackPlayer(Player):\n \"\"\"\n Player to play back a L{Track}.\n \"\"\"\n\n track = Instance(Track)\n \"\"\"The played back L{Track}.\"\"\"\n\n voice_players = Dict(Voice, VoicePlayer)\n \"\"\"The players for playing back the L{Voice}s of the L{Track}.\"\"\"\n\n def stop(self):\n super(TrackPlayer, self).stop()\n for voice_player in self.voice_players.itervalues():\n voice_player.stop()\n\n def iter(self):\n self.voice_players = {}\n time = self.backend.time()\n for voice in self.track.voices:\n voice_player = VoicePlayer(\n self.backend, voice=voice, track=self.track, time=time,\n )\n voice_player.play()\n self.voice_players[voice] = voice_player\n stopped_voices = set()\n while self.voice_players:\n for voice, voice_player in self.voice_players.iteritems():\n try:\n yield voice_player.play_iter.next()\n except StopIteration:\n stopped_voices.add(voice)\n if stopped_voices:\n for voice in stopped_voices:\n del self.voice_players[voice]\n stopped_voices.clear()\n\n @on_trait_change('playing')\n def _playing_changed(self):\n for voice_player in self.voice_players.itervalues():\n voice_player.playing = self.playing\n\n @on_trait_change('voice_players:_current_node')\n def _on_node_change(self, voice_player, name, old, new):\n if old is None or new is None:\n # only trigger other voices when moving to a different node\n return\n node = voice_player._current_node\n voice_players = self.voice_players\n for voice, weight_sum, transition_weights in (\n self.track.get_transition_weights(node, 'Trigger')\n ):\n if voice is voice_player.voice:\n # Do not let voices trigger themselves.\n continue\n transition = choose_transition(weight_sum, transition_weights)\n target_voice_player = voice_players[voice]\n if node is transition.target_node:\n target_voice_player._current_node = None\n target_voice_player._current_node = transition.target_node\n target_voice_player.time = voice_player.time\n target_voice_player.phrase_player.play()\n\n", "sub_path": "src/gambolputty/player.py", "file_name": "player.py", "file_ext": "py", "file_size_in_byte": 17803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "traits.api.HasTraits", "line_number": 18, "usage_type": "name"}, {"api_name": "traits.api.Int", "line_number": 23, "usage_type": "call"}, {"api_name": "traits.api.List", "line_number": 26, "usage_type": "call"}, {"api_name": "traits.api.Int", "line_number": 26, "usage_type": "argument"}, {"api_name": "gambolputty.exceptions.GambolputtyException", "line_number": 85, "usage_type": "name"}, {"api_name": "pygame.midi", "line_number": 98, "usage_type": "name"}, {"api_name": "traits.api.on_trait_change", "line_number": 128, "usage_type": "call"}, {"api_name": "pypm.Initialize", "line_number": 164, "usage_type": "call"}, {"api_name": "traits.api.on_trait_change", "line_number": 194, "usage_type": "call"}, {"api_name": "traits.api.HasTraits", "line_number": 219, "usage_type": "name"}, {"api_name": "traits.api.Bool", "line_number": 224, "usage_type": "call"}, {"api_name": "traits.api.Instance", "line_number": 277, "usage_type": "call"}, {"api_name": "gambolputty.model.Phrase", "line_number": 277, "usage_type": "argument"}, {"api_name": "traits.api.Instance", "line_number": 280, "usage_type": "call"}, {"api_name": "gambolputty.model.Voice", "line_number": 280, "usage_type": "argument"}, {"api_name": "traits.api.Int", "line_number": 283, "usage_type": "call"}, {"api_name": "traits.api.Int", "line_number": 286, "usage_type": "call"}, {"api_name": "traits.api.Bool", "line_number": 289, "usage_type": "call"}, {"api_name": "traits.api.Instance", "line_number": 292, "usage_type": "call"}, {"api_name": "gambolputty.model.Point", "line_number": 292, "usage_type": "argument"}, {"api_name": "traits.api.Int", "line_number": 295, "usage_type": "call"}, {"api_name": "gambolputty.exceptions.NoPhraseSelected", "line_number": 310, "usage_type": "name"}, {"api_name": "gambolputty.exceptions.NoVoiceSelected", "line_number": 312, "usage_type": "name"}, {"api_name": "gambolputty.core.line_to_melody", "line_number": 320, "usage_type": "call"}, {"api_name": "itertools.izip", "line_number": 329, "usage_type": "call"}, {"api_name": "traits.api.on_trait_change", "line_number": 359, "usage_type": "call"}, {"api_name": "traits.api.Instance", "line_number": 373, "usage_type": "call"}, {"api_name": "gambolputty.model.Track", "line_number": 373, "usage_type": "argument"}, {"api_name": "traits.api.Instance", "line_number": 376, "usage_type": "call"}, {"api_name": "gambolputty.model.Voice", "line_number": 376, "usage_type": "argument"}, {"api_name": "traits.api.Instance", "line_number": 379, "usage_type": "call"}, {"api_name": "gambolputty.model.PhraseNode", "line_number": 379, "usage_type": "argument"}, {"api_name": "traits.api.Instance", "line_number": 388, "usage_type": "call"}, {"api_name": "gambolputty.model.PhraseNode", "line_number": 388, "usage_type": "argument"}, {"api_name": "traits.api.Instance", "line_number": 396, "usage_type": "call"}, {"api_name": "traits.api.DelegatesTo", "line_number": 401, "usage_type": "call"}, {"api_name": "gambolputty.core.choose_transition", "line_number": 434, "usage_type": "call"}, {"api_name": "gambolputty.core.choose_transition", "line_number": 442, "usage_type": "call"}, {"api_name": "traits.api.on_trait_change", "line_number": 472, "usage_type": "call"}, {"api_name": "traits.api.on_trait_change", "line_number": 477, "usage_type": "call"}, {"api_name": "traits.api.on_trait_change", "line_number": 484, "usage_type": "call"}, {"api_name": "traits.api.on_trait_change", "line_number": 488, "usage_type": "call"}, {"api_name": "traits.api.on_trait_change", "line_number": 492, "usage_type": "call"}, {"api_name": "traits.api.Instance", "line_number": 502, "usage_type": "call"}, {"api_name": "gambolputty.model.Track", "line_number": 502, "usage_type": "argument"}, {"api_name": "traits.api.Dict", "line_number": 505, "usage_type": "call"}, {"api_name": "gambolputty.model.Voice", "line_number": 505, "usage_type": "argument"}, {"api_name": "traits.api.on_trait_change", "line_number": 534, "usage_type": "call"}, {"api_name": "gambolputty.core.choose_transition", "line_number": 552, "usage_type": "call"}, {"api_name": "traits.api.on_trait_change", "line_number": 539, "usage_type": "call"}]} +{"seq_id": "379420393", "text": "import boto3\nfrom base64 import b64decode\nfrom urllib.parse import parse_qs\n\n# Replace your email address here\nsend_to = 'your_email_address_here'\n\ndef lambda_handler(event, context):\n # We receive our data through POST requests. API gateway\n # sends the POST data as a Base64 encoded string in\n # event['body'], so we must decode it.\n data = parse_qs(b64decode(event['body']).decode())\n\n subject = 'You got a message from %s' % data['email'][0]\n text = '\\n'.join([\n 'Name: %s' % data['name'][0],\n 'Email: %s' % data['email'][0],\n 'Message %s' % data['message'][0]\n ])\n\n # Send an email through SES with the SendEmail API\n client = boto3.client('ses', region_name='us-east-1')\n client.send_email(\n Source=send_to,\n Destination={'ToAddresses': [send_to]},\n Message={\n 'Subject': {'Data': subject},\n 'Body': {'Text': {'Data': text}}\n },\n ReplyToAddresses=[data['email'][0]]\n )\n\n # This is the response that'll be sent out through the\n # API gateway to the browser.\n return {\n 'statusCode': 200,\n 'headers': {\n 'Access-Control-Allow-Origin': '*'\n },\n 'body': '\"Success\"' # jquery expects a JSON response\n }\n", "sub_path": "lamda-script.py", "file_name": "lamda-script.py", "file_ext": "py", "file_size_in_byte": 1266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "urllib.parse.parse_qs", "line_number": 12, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 12, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "376455939", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/1/27 下午2:22\n# @Author : wudizhangzhi\n\"\"\"Hupu.\n Proudly presented by Hupu JRs.\n\nUsage:\n hupu [-m MODE] [-a APIVERSION] [-d DATATYPE] [-u USERNAME] [-p PASSWORD]\n hupu -h | --help\n hupu -v | --version\n\nTips:\n Please hit Ctrl-C on the keyborad when you want to interrupt the game live.\n\nOptions:\n -u USERNAME --username=USERNAME Input username.\n -p PASSWORD --password=PASSWORD Input password.\n -a APIVERSION --apiversion=APIVERSION Api version.[default: 7.1.15]\n -m MODE --mode=MODE Run mode.Available: live news teamranks...[default: live]\n -d DATATYPE --datatype=DATATYPE Player data type.Available: regular, injury, daily[default:regular]\n -h --help Show this help message and exit.\n -v --version Show version.\n\"\"\"\nfrom __future__ import print_function\n# python2 curses addstr乱码问题\nimport locale\n\nlocale.setlocale(locale.LC_ALL, '')\nimport sys\nimport curses\nimport colored\nimport docopt\nimport traceback\n\nfrom hupu.api.live import LiveMinxin\nfrom hupu.api.login import LoginMixin\nfrom hupu.api.news import NewsMixin\nfrom hupu.api.datas import DatasMixin\nfrom hupu.utils import colored_text, SYSTEM\nfrom hupu.api import logger\nfrom hupu.menus.HupuMenu import HupuMenu\nfrom hupu.version import version\n\n# if SYSTEM.lower() == 'windows':\n# # TODO debug window\n# reload(sys)\n# sys.setdefaultencoding('utf-8')\n# sys.stdout.encoding = 'cp65001'\n\nlog = logger.getLogger(__name__)\n\nMODE_LIST = ['live', 'news', 'teamranks', 'playerdata']\n\n\nclass HupuApp(LiveMinxin, NewsMixin, LoginMixin, DatasMixin):\n def run(self):\n # 判断参数, 执行哪一种场景\n # 默认进入比赛文字直播模式\n mode = self._kwargs.get('mode', 'live')\n mode = mode.lower()\n assert mode in MODE_LIST, AttributeError('Expected mode are {}, got {}.'.format(', '.join(MODE_LIST), mode))\n try:\n hupumenu = HupuMenu(self)\n items = []\n if mode == 'live': # 文字直播模式\n items = self.getGames()\n\n elif mode == 'news': # 新闻模式\n items = self.getNews()\n hupumenu.body_title = '新闻:'\n\n elif mode == 'teamranks': # 球队数据模式\n items = self.getDatas()\n hupumenu.body_title = '球队数据:'\n\n elif mode == 'playerdata': # 球队数据模式\n datatype = self._kwargs.get('datatype', '').lower()\n if not datatype or datatype not in ['regular', 'injury', 'daily']:\n datatype = 'regular'\n items = self.getPlayerDataInGenernal(datatype)\n hupumenu.body_title = '球员数据:'\n\n if not items:\n raise Exception('没有数据!')\n hupumenu.set_items(items)\n hupumenu.mode = mode\n\n hupumenu.draw()\n hupumenu.listen()\n\n except curses.error as e:\n curses.endwin()\n log.error(e)\n print(colored_text('窗口太小, 请调整窗口大小!', colored.fg(\"red\") + colored.attr(\"bold\")))\n except Exception as e:\n log.error(traceback.format_exc())\n if not curses.isendwin():\n curses.endwin()\n print(e)\n\n\ndef start():\n arguments = docopt.docopt(__doc__, version='Hupu {}'.format(version))\n # 处理参数\n arguments = {k.replace('--', ''): v for k, v in arguments.items()}\n hupulive = HupuApp(**arguments)\n hupulive.run()\n", "sub_path": "hupu/hupuapp.py", "file_name": "hupuapp.py", "file_ext": "py", "file_size_in_byte": 3671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "locale.setlocale", "line_number": 29, "usage_type": "call"}, {"api_name": "locale.LC_ALL", "line_number": 29, "usage_type": "attribute"}, {"api_name": "hupu.api.logger.getLogger", "line_number": 51, "usage_type": "call"}, {"api_name": "hupu.api.logger", "line_number": 51, "usage_type": "name"}, {"api_name": "hupu.api.live.LiveMinxin", "line_number": 56, "usage_type": "name"}, {"api_name": "hupu.api.news.NewsMixin", "line_number": 56, "usage_type": "name"}, {"api_name": "hupu.api.login.LoginMixin", "line_number": 56, "usage_type": "name"}, {"api_name": "hupu.api.datas.DatasMixin", "line_number": 56, "usage_type": "name"}, {"api_name": "hupu.menus.HupuMenu.HupuMenu", "line_number": 64, "usage_type": "call"}, {"api_name": "curses.error", "line_number": 92, "usage_type": "attribute"}, {"api_name": "curses.endwin", "line_number": 93, "usage_type": "call"}, {"api_name": "hupu.utils.colored_text", "line_number": 95, "usage_type": "call"}, {"api_name": "colored.fg", "line_number": 95, "usage_type": "call"}, {"api_name": "colored.attr", "line_number": 95, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 97, "usage_type": "call"}, {"api_name": "curses.isendwin", "line_number": 98, "usage_type": "call"}, {"api_name": "curses.endwin", "line_number": 99, "usage_type": "call"}, {"api_name": "docopt.docopt", "line_number": 104, "usage_type": "call"}, {"api_name": "hupu.version.version", "line_number": 104, "usage_type": "argument"}]} +{"seq_id": "225116083", "text": "\"\"\" Finviz View \"\"\"\n__docformat__ = \"numpy\"\n\n\nimport io\nimport logging\n\nfrom PIL import Image\n\nfrom openbb_terminal import OpenBBFigure\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.stocks.technical_analysis import finviz_model\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef view(symbol: str, external_axes: bool = False):\n \"\"\"View finviz image for ticker\n\n Parameters\n ----------\n symbol: str\n Stock ticker symbol\n external_axes: bool, optional\n Whether to return the figure object or not, by default False\n \"\"\"\n\n image_data = finviz_model.get_finviz_image(symbol)\n dataBytesIO = io.BytesIO(image_data)\n im = Image.open(dataBytesIO)\n fig = OpenBBFigure()\n fig.add_layout_image(\n dict(\n source=im,\n xref=\"x\",\n yref=\"y\",\n x=0,\n y=1,\n sizex=im.width,\n sizey=im.height,\n sizing=\"stretch\",\n )\n )\n fig.update_xaxes(visible=False, range=[0, im.width])\n fig.update_yaxes(visible=False, range=[im.height, 0], scaleanchor=\"y\")\n fig.update_layout(height=im.height, width=im.width)\n\n return fig.show(external=external_axes)\n", "sub_path": "openbb_terminal/stocks/technical_analysis/finviz_view.py", "file_name": "finviz_view.py", "file_ext": "py", "file_size_in_byte": 1229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "openbb_terminal.stocks.technical_analysis.finviz_model.get_finviz_image", "line_number": 29, "usage_type": "call"}, {"api_name": "openbb_terminal.stocks.technical_analysis.finviz_model", "line_number": 29, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 30, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 31, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 31, "usage_type": "name"}, {"api_name": "openbb_terminal.OpenBBFigure", "line_number": 32, "usage_type": "call"}, {"api_name": "openbb_terminal.decorators.log_start_end", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "511238699", "text": "import paddle.fluid as fluid\r\nimport numpy as np\r\nfrom paddle.fluid.layer_helper import LayerHelper\r\nfrom paddle.fluid.dygraph import Conv2D, Pool2D, BatchNorm, Linear, InstanceNorm, PRelu, SpectralNorm\r\nfrom paddle.fluid.dygraph import Sequential\r\n\r\n\r\nclass ResnetGenerator(fluid.dygraph.Layer):\r\n def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, img_size=256, light=False):\r\n assert(n_blocks >= 0)\r\n super(ResnetGenerator, self).__init__()\r\n self.input_nc = input_nc\r\n self.output_nc = output_nc\r\n self.ngf = ngf\r\n self.n_blocks = n_blocks\r\n self.img_size = img_size\r\n self.light = light\r\n\r\n DownBlock = []\r\n # 先通过一个卷积核尺寸为7的卷积层,图片大小不变,通道数变为64\r\n DownBlock += [ReflectionPad2d(3),\r\n Conv2D(input_nc, ngf, filter_size=7, stride=1, padding=0, bias_attr=False),\r\n InstanceNorm(ngf),\r\n PRelu(mode=\"all\")]\r\n\r\n # Down-Sampling --> 下采样模块\r\n n_downsampling = 2\r\n # 两层下采样,img_size缩小4倍(64),通道数扩大4倍(256)\r\n for i in range(n_downsampling):\r\n mult = 2**i\r\n DownBlock += [ReflectionPad2d(1),\r\n Conv2D(ngf * mult, ngf * mult * 2, filter_size=3, stride=2, padding=0, bias_attr=False),\r\n InstanceNorm(ngf * mult * 2),\r\n PRelu(mode=\"all\")]\r\n\r\n # Down-Sampling Bottleneck --> 编码器中的残差模块\r\n mult = 2**n_downsampling\r\n # 6个残差块,尺寸和通道数都不变\r\n for i in range(n_blocks):\r\n DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]\r\n\r\n # Class Activation Map --> 产生类别激活图\r\n # 接着global average pooling后的全连接层\r\n self.gap_fc = Linear(ngf * mult, 1, bias_attr=False)\r\n # 接着global max pooling后的全连接层\r\n self.gmp_fc = Linear(ngf * mult, 1, bias_attr=False)\r\n #下面1x1卷积和激活函数,是为了得到两个pooling合并后的特征图\r\n self.conv1x1 = Conv2D(ngf * mult * 2, ngf * mult, filter_size=1, stride=1, bias_attr=True, act='relu')\r\n # self.relu = nn.ReLU(True)\r\n\r\n # Gamma, Beta block --> 生成自适应 L-B Normalization(AdaILN)中的Gamma, Beta\r\n # 确定轻量级,FC使用的是两个256 --> 256的全连接层\r\n if self.light:\r\n FC = [Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu'),\r\n # nn.ReLU(True),\r\n Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu'),\r\n # nn.ReLU(True)\r\n ]\r\n else:\r\n # 不是轻量级,则下面的1024x1024 --> 256的全连接层和一个256 --> 256的全连接层\r\n FC = [Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias_attr=False, act='relu'),\r\n # nn.ReLU(True),\r\n Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu'),\r\n # nn.ReLU(True)\r\n ]\r\n # AdaILN中的Gamma, Beta\r\n self.gamma = Linear(ngf * mult, ngf * mult, bias_attr=False)\r\n self.beta = Linear(ngf * mult, ngf * mult, bias_attr=False)\r\n\r\n # Up-Sampling Bottleneck --> 解码器中的自适应残差模块\r\n for i in range(n_blocks):\r\n setattr(self, 'UpBlock1_' + str(i+1), ResnetAdaILNBlock(ngf * mult, use_bias=False))\r\n\r\n # Up-Sampling --> 解码器中的上采样模块\r\n UpBlock2 = []\r\n # 上采样与编码器的下采样对应\r\n for i in range(n_downsampling):\r\n mult = 2**(n_downsampling - i)\r\n UpBlock2 += [Upsample(),\r\n ReflectionPad2d(1),\r\n Conv2D(ngf * mult, int(ngf * mult / 2), filter_size=3, stride=1, padding=0, bias_attr=False, act='relu'),\r\n ILN(int(ngf * mult / 2)), # 注:只有自适应残差块使用AdaILN\r\n # nn.ReLU(True)\r\n ]\r\n # 最后一层卷积层,与最开始的卷积层对应\r\n UpBlock2 += [ReflectionPad2d(3),\r\n Conv2D(ngf, output_nc, filter_size=7, stride=1, padding=0, bias_attr=False, act='tanh'),\r\n # nn.Tanh()\r\n ]\r\n\r\n self.DownBlock = Sequential(*DownBlock) # 编码器整个模块\r\n self.FC = Sequential(*FC) # 生成gamma,beta的全连接层模块\r\n self.UpBlock2 = Sequential(*UpBlock2) # 只包含上采样后的模块,不包含残差块\r\n\r\n def forward(self, input):\r\n x = self.DownBlock(input)\r\n # 得到编码器的输出,对应途中encoder feature map\r\n # torch.Size([1, 256, 64, 64])\r\n # gap torch.Size([1, 256, 1, 1])\r\n\r\n gap = Pool2D(pool_size=x.shape[-1],pool_stride=x.shape[-1],pool_type='avg')(x) #全局平均池化\r\n gap = fluid.layers.reshape(gap, shape=[x.shape[0], -1]) #torch.Size([1, 1])\r\n gap_logit = self.gap_fc(gap) #gap的预测\r\n gap_weight = list(self.gap_fc.parameters())[0] #self.gap_fc的权重参数 torch.Size([1, 256])\r\n gap_weight = fluid.layers.unsqueeze(input=gap_weight, axes=[0])\r\n gap_weight = fluid.layers.unsqueeze(input=gap_weight, axes=[3])\r\n gap = x * gap_weight #得到全局平均池化加持权重的特征图 torch.Size([1, 256, 64, 64])\r\n\r\n gmp = Pool2D(pool_size=x.shape[-1],pool_stride=x.shape[-1],pool_type='max')(x)\r\n gmp = fluid.layers.reshape(gmp, shape=[x.shape[0], -1])\r\n gmp_logit = self.gmp_fc(gmp)\r\n gmp_weight = list(self.gmp_fc.parameters())[0]\r\n gmp_weight = fluid.layers.unsqueeze(input=gmp_weight, axes=[0])\r\n gmp_weight = fluid.layers.unsqueeze(input=gmp_weight, axes=[3]) \r\n gmp = x * gmp_weight #torch.Size([1, 256, 64, 64])\r\n\r\n cam_logit = fluid.layers.concat([gap_logit, gmp_logit], 1) #结合gap和gmp的cam_logit预测\r\n x = fluid.layers.concat([gap, gmp], 1) #torch.Size([1, 512, 64, 64]) \r\n x = self.conv1x1(x) #接入一个卷积层,通道数512转换为256 torch.Size([1, 256, 64, 64])\r\n #x = self.relu(self.conv1x1(x))\r\n #torch.Size([1, 256, 64, 64])\r\n\r\n # heatmap = torch.sum(x, dim=1, keepdim=True)\r\n heatmap = fluid.layers.reduce_sum(x, dim=1, keep_dim=True) #得到注意力热力图\r\n #heatmap torch.Size([1, 1, 64, 64])\r\n\r\n if self.light:\r\n #轻量级则先经过一个gap\r\n x_ = fluid.layers.adaptive_pool2d(x, 1,pool_type='avg')\r\n x_ = fluid.layers.reshape(x_, shape=[x.shape[0], -1])\r\n x_ = self.FC(x_)\r\n else:\r\n x_=fluid.layers.reshape(x, shape=[x.shape[0], -1])\r\n x_ = self.FC(x_)\r\n gamma, beta = self.gamma(x_), self.beta(x_) #得到自适应gamma和beta\r\n # gamma torch.Size([1, 256]) beta torch.Size([1, 256])\r\n\r\n for i in range(self.n_blocks):\r\n # 将自适应gamma和beta送入到AdaILN\r\n x = getattr(self, 'UpBlock1_' + str(i+1))(x, gamma, beta)\r\n out = self.UpBlock2(x) #通过上采样后的模块,得到生成结果\r\n #out torch.Size([1, 3, 256, 256]) cam_logit torch.Size([1, 2]) heatmap torch.Size([1, 1, 64, 64])\r\n\r\n return out, cam_logit, heatmap #模型输出为生成结果,cam预测以及热力图\r\n\r\n\r\nclass ResnetBlock(fluid.dygraph.Layer):\r\n def __init__(self, dim, use_bias):\r\n super(ResnetBlock, self).__init__()\r\n conv_block = []\r\n conv_block += [ReflectionPad2d(1),\r\n Conv2D(dim, dim, filter_size=3, stride=1, padding=0, bias_attr=use_bias),\r\n InstanceNorm(dim),\r\n PRelu(mode=\"all\")]\r\n\r\n conv_block += [ReflectionPad2d(1),\r\n Conv2D(dim, dim, filter_size=3, stride=1, padding=0, bias_attr=use_bias),\r\n InstanceNorm(dim)]\r\n\r\n self.conv_block = Sequential(*conv_block)\r\n\r\n def forward(self, x):\r\n out = x + self.conv_block(x)\r\n return out\r\n\r\n\r\nclass ResnetAdaILNBlock(fluid.dygraph.Layer):\r\n def __init__(self, dim, use_bias):\r\n super(ResnetAdaILNBlock, self).__init__()\r\n self.pad1 = ReflectionPad2d(1)\r\n self.conv1 = Conv2D(dim, dim, filter_size=3, stride=1, padding=0, bias_attr=use_bias)\r\n self.norm1 = adaILN(dim)\r\n # self.relu1 = nn.ReLU(True)\r\n\r\n self.pad2 = ReflectionPad2d(1)\r\n self.conv2 = Conv2D(dim, dim, filter_size=3, stride=1, padding=0, bias_attr=use_bias)\r\n self.norm2 = adaILN(dim)\r\n\r\n def forward(self, x, gamma, beta):\r\n out = self.pad1(x)\r\n out = self.conv1(out)\r\n out = self.norm1(out, gamma, beta)\r\n out = fluid.layers.relu(out)\r\n # out = self.relu1(out)\r\n out = self.pad2(out)\r\n out = self.conv2(out)\r\n out = self.norm2(out, gamma, beta)\r\n\r\n return out + x\r\n\r\n# Adaptive Layer-Instance Normalization代码\r\nclass adaILN(fluid.dygraph.Layer):\r\n def __init__(self, num_features, eps=1e-5):\r\n super(adaILN, self).__init__()\r\n self.eps = eps\r\n # adaILN的参数p,通过这个参数来动态调整LN和IN的占比\r\n self.rho = fluid.layers.fill_constant(shape=[1, num_features, 1, 1], value=0.9, dtype='float32')\r\n\r\n def forward(self, input, gamma, beta):\r\n # torch.Size([1, 256, 64, 64])\r\n ninput = input.numpy()\r\n # 先求两种规范化的值\r\n in_mean, in_var = np.mean(ninput, axis=(2, 3), keepdims=True), np.var(ninput, axis=(2, 3), keepdims=True)\r\n out_in = (ninput - in_mean) / np.sqrt(in_var + self.eps)\r\n ln_mean, ln_var = np.mean(ninput, axis=(1, 2, 3), keepdims=True), np.var(ninput, axis=(1, 2, 3), keepdims=True)\r\n out_ln = (ninput - ln_mean) / np.sqrt(ln_var + self.eps)\r\n out_in = fluid.dygraph.base.to_variable(out_in)\r\n out_ln = fluid.dygraph.base.to_variable(out_ln)\r\n ninput = fluid.dygraph.base.to_variable(ninput)\r\n #out = fluid.dygraph.base.to_variable(out)\r\n # 合并两种规范化(IN, LN)\r\n out = self.rho * out_in + (1-self.rho) * out_ln\r\n # 扩张得到结果\r\n gamma = fluid.layers.unsqueeze(input=gamma, axes=[2])\r\n gamma = fluid.layers.unsqueeze(input=gamma, axes=[3]) \r\n beta = fluid.layers.unsqueeze(input=beta, axes=[2])\r\n beta = fluid.layers.unsqueeze(input=beta, axes=[3]) \r\n out = out * gamma + beta\r\n # out torch.Size([1, 256, 64, 64])\r\n\r\n return out\r\n\r\n\r\nclass ILN(fluid.dygraph.Layer):\r\n def __init__(self, num_features, eps=1e-5):\r\n super(ILN, self).__init__()\r\n self.eps = eps\r\n self.rho = fluid.layers.fill_constant(shape=[1, num_features, 1, 1], value=0.0, dtype='float32')\r\n self.gamma = fluid.layers.fill_constant(shape=[1, num_features, 1, 1], value=1.0, dtype='float32')\r\n self.beta = fluid.layers.fill_constant(shape=[1, num_features, 1, 1], value=0.0, dtype='float32')\r\n\r\n def forward(self, input):\r\n #torch.Size([1, 128, 128, 128])\r\n ninput = input.numpy()\r\n\r\n in_mean, in_var=np.mean(ninput, axis=(2, 3), keepdims=True), np.var(ninput, axis=(2, 3), keepdims=True)\r\n out_in = (ninput - in_mean) / np.sqrt(in_var + self.eps)\r\n ln_mean, ln_var = np.mean(ninput, axis=(1, 2, 3), keepdims=True), np.var(ninput, axis=(1, 2, 3), keepdims=True)\r\n out_ln = (ninput - ln_mean) / np.sqrt(ln_var + self.eps)\r\n out_in = fluid.dygraph.base.to_variable(out_in)\r\n out_ln = fluid.dygraph.base.to_variable(out_ln)\r\n ninput = fluid.dygraph.base.to_variable(ninput) \r\n out = self.rho * out_in + (1-self.rho) * out_ln\r\n out = out * self.gamma + self.beta\r\n # out torch.Size([1, 128, 128, 128])\r\n\r\n return out\r\n\r\n\r\nclass Discriminator(fluid.dygraph.Layer):\r\n def __init__(self, input_nc, ndf=64, n_layers=5):\r\n super(Discriminator, self).__init__()\r\n # 第一层下采样, 尺寸减半(128),通道数为64\r\n model = [ReflectionPad2d(1),\r\n Spectralnorm(Conv2D(input_nc, ndf, filter_size=4, stride=2, padding=1, bias_attr=True, act='leaky_relu')),\r\n # nn.LeakyReLU(0.2, True)\r\n ]\r\n # 第二,三层下采样,尺寸再缩4倍(32),通道数为256\r\n for i in range(1, n_layers - 2):\r\n mult = 2 ** (i - 1)\r\n model += [ReflectionPad2d(1),\r\n Spectralnorm(Conv2D(ndf * mult, ndf * mult * 2, filter_size=4, stride=2, padding=0, bias_attr=True, act='leaky_relu')),\r\n # nn.LeakyReLU(0.2, True)\r\n ]\r\n # 尺寸不变(32),通道数为512\r\n mult = 2 ** (n_layers - 2 - 1)\r\n model += [ReflectionPad2d(1),\r\n Spectralnorm(Conv2D(ndf * mult, ndf * mult * 2, filter_size=4, stride=1, padding=0, bias_attr=True, act='leaky_relu')),\r\n # nn.LeakyReLU(0.2, True)\r\n ]\r\n\r\n # Class Activation Map\r\n mult = 2 ** (n_layers - 2)\r\n self.gap_fc = Spectralnorm(Linear(ndf * mult, 1, bias_attr=False))\r\n self.gmp_fc = Spectralnorm(Linear(ndf * mult, 1, bias_attr=False))\r\n self.conv1x1 = Conv2D(ndf * mult * 2, ndf * mult, filter_size=1, stride=1, bias_attr=True)\r\n # self.leaky_relu = nn.LeakyReLU(0.2, True)\r\n\r\n self.pad = ReflectionPad2d(1)\r\n self.conv = Spectralnorm(Conv2D(ndf * mult, 1, filter_size=4, stride=1, padding=0, bias_attr=False))\r\n\r\n self.model = Sequential(*model)\r\n\r\n def forward(self, input):\r\n x = self.model(input) #[1, 2048, 2, 2]\r\n\r\n gap = Pool2D(pool_size=x.shape[-1],pool_stride=x.shape[-1],pool_type='avg')(x) #[1, 2048, 1, 1]\r\n gap = fluid.layers.reshape(gap, shape=[x.shape[0], -1]) \r\n gap_logit = self.gap_fc(gap) #torch.Size([1, 1])\r\n gap_weight = list(self.gap_fc.parameters())[0]\r\n gap_weight = fluid.layers.unsqueeze(input=gap_weight, axes=[0])\r\n gap_weight = fluid.layers.unsqueeze(input=gap_weight, axes=[3]) \r\n gap = x * gap_weight #[1, 2048, 2, 2]\r\n\r\n gmp = Pool2D(pool_size=x.shape[-1],pool_stride=x.shape[-1],pool_type='max')(x)\r\n gmp = fluid.layers.reshape(gmp, shape=[x.shape[0], -1]) \r\n gmp_logit = self.gmp_fc(gmp)\r\n gmp_weight = list(self.gmp_fc.parameters())[0]\r\n gmp_weight = fluid.layers.unsqueeze(input=gmp_weight, axes=[0])\r\n gmp_weight = fluid.layers.unsqueeze(input=gmp_weight, axes=[3]) \r\n gmp = x * gmp_weight\r\n\r\n cam_logit = fluid.layers.concat([gap_logit, gmp_logit], 1)\r\n x = fluid.layers.concat([gap, gmp], 1)\r\n x = fluid.layers.leaky_relu(self.conv1x1(x))\r\n\r\n heatmap = fluid.layers.reduce_sum(x, dim=1, keep_dim=True)\r\n\r\n x = self.pad(x)\r\n out = self.conv(x)\r\n\r\n return out, cam_logit, heatmap\r\n\r\n\r\nclass RhoClipper(object):\r\n\r\n def __init__(self, min, max):\r\n self.clip_min = min\r\n self.clip_max = max\r\n assert min < max\r\n\r\n def __call__(self, module):\r\n\r\n if hasattr(module, 'rho'):\r\n w = module.rho.data\r\n w = w.clamp(self.clip_min, self.clip_max)\r\n module.rho.data = w\r\n\r\n\r\nclass ReflectionPad2d(fluid.dygraph.Layer):\r\n def __init__(self, size):\r\n super(ReflectionPad2d, self).__init__()\r\n self.size = size\r\n \r\n def forward(self, x):\r\n return fluid.layers.pad2d(x, [self.size] * 4, mode='reflect')\r\n\r\n\r\n# 定义上采样模块\r\nclass Upsample(fluid.dygraph.Layer):\r\n def __init__(self, scale=2):\r\n super(Upsample, self).__init__()\r\n self.scale = scale\r\n\r\n def forward(self, inputs):\r\n # get dynamic upsample output shape\r\n shape_nchw = fluid.layers.shape(inputs)\r\n shape_hw = fluid.layers.slice(shape_nchw, axes=[0], starts=[2], ends=[4])\r\n shape_hw.stop_gradient = True\r\n in_shape = fluid.layers.cast(shape_hw, dtype='int32')\r\n out_shape = in_shape * self.scale\r\n out_shape.stop_gradient = True\r\n\r\n # reisze by actual_shape\r\n out = fluid.layers.resize_nearest(\r\n input=inputs, scale=self.scale, actual_shape=out_shape)\r\n return out\r\n\r\n\r\nclass Spectralnorm(fluid.dygraph.Layer):\r\n def __init__(self,\r\n layer,\r\n dim=0,\r\n power_iters=1,\r\n eps=1e-12,\r\n dtype='float32'):\r\n super(Spectralnorm, self).__init__()\r\n self.spectral_norm = SpectralNorm(layer.weight.shape, dim, power_iters, eps, dtype)\r\n self.dim = dim\r\n self.power_iters = power_iters\r\n self.eps = eps\r\n self.layer = layer\r\n weight = layer._parameters['weight']\r\n del layer._parameters['weight']\r\n self.weight_orig = self.create_parameter(weight.shape, dtype=weight.dtype)\r\n self.weight_orig.set_value(weight)\r\n\r\n def forward(self, x):\r\n weight = self.spectral_norm(self.weight_orig)\r\n self.layer.weight = weight\r\n out = self.layer(x)\r\n \r\n return out\r\n\r\n \r\nclass BCEWithLogitsLoss():\r\n def __init__(self, weight=None, reduction='mean'):\r\n self.weight = weight\r\n self.reduction = 'mean'\r\n\r\n def __call__(self, x, label):\r\n out = fluid.layers.sigmoid_cross_entropy_with_logits(x, label)\r\n if self.reduction == 'sum':\r\n return fluid.layers.reduce_sum(out)\r\n elif self.reduction == 'mean':\r\n return fluid.layers.reduce_mean(out)\r\n else:\r\n return out", "sub_path": "networks.py", "file_name": "networks.py", "file_ext": "py", "file_size_in_byte": 17728, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "paddle.fluid.dygraph", "line_number": 8, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 8, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 22, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.InstanceNorm", "line_number": 23, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.PRelu", "line_number": 24, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 32, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.InstanceNorm", "line_number": 33, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.PRelu", "line_number": 34, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 44, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 46, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 48, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 54, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 56, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 61, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 63, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 67, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 68, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 81, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 87, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Sequential", "line_number": 91, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Sequential", "line_number": 92, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Sequential", "line_number": 93, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Pool2D", "line_number": 101, "usage_type": "call"}, {"api_name": "paddle.fluid.layers.reshape", "line_number": 102, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 102, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 102, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 105, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 105, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 105, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 106, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 106, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 106, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.Pool2D", "line_number": 109, "usage_type": "call"}, {"api_name": "paddle.fluid.layers.reshape", "line_number": 110, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 110, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 110, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 113, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 113, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 113, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 114, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 114, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 114, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 117, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 117, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 117, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 118, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 118, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 118, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reduce_sum", "line_number": 124, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 124, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 124, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.adaptive_pool2d", "line_number": 129, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 129, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 129, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reshape", "line_number": 130, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 130, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 130, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reshape", "line_number": 133, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 133, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 133, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph", "line_number": 147, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 147, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 152, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.InstanceNorm", "line_number": 153, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.PRelu", "line_number": 154, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 157, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.InstanceNorm", "line_number": 158, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Sequential", "line_number": 160, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph", "line_number": 167, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 167, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 171, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 176, "usage_type": "call"}, {"api_name": "paddle.fluid.layers.relu", "line_number": 183, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 183, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 183, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph", "line_number": 192, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 192, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fill_constant", "line_number": 197, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 197, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 197, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 206, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.base.to_variable", "line_number": 207, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph", "line_number": 207, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 207, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.base.to_variable", "line_number": 208, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph", "line_number": 208, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 208, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.base.to_variable", "line_number": 209, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph", "line_number": 209, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 209, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 214, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 214, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 214, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 215, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 215, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 215, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 216, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 216, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 216, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 217, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 217, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 217, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph", "line_number": 224, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 224, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fill_constant", "line_number": 228, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 228, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 228, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fill_constant", "line_number": 229, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 229, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 229, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.fill_constant", "line_number": 230, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 230, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 230, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 239, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.base.to_variable", "line_number": 240, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph", "line_number": 240, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 240, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.base.to_variable", "line_number": 241, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph", "line_number": 241, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 241, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.base.to_variable", "line_number": 242, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph", "line_number": 242, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 242, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph", "line_number": 250, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 250, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 255, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 262, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 268, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 274, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Linear", "line_number": 275, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 276, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Conv2D", "line_number": 280, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Sequential", "line_number": 282, "usage_type": "call"}, {"api_name": "paddle.fluid.dygraph.Pool2D", "line_number": 287, "usage_type": "call"}, {"api_name": "paddle.fluid.layers.reshape", "line_number": 288, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 288, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 288, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 291, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 291, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 291, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 292, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 292, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 292, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.Pool2D", "line_number": 295, "usage_type": "call"}, {"api_name": "paddle.fluid.layers.reshape", "line_number": 296, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 296, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 296, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 299, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 299, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 299, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.unsqueeze", "line_number": 300, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 300, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 300, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 303, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 303, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 303, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.concat", "line_number": 304, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 304, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 304, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.leaky_relu", "line_number": 305, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 305, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 305, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reduce_sum", "line_number": 307, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 307, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 307, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph", "line_number": 330, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 330, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.pad2d", "line_number": 336, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 336, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 336, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph", "line_number": 340, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 340, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.shape", "line_number": 347, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 347, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 347, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.slice", "line_number": 348, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 348, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 348, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.cast", "line_number": 350, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 350, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 350, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.resize_nearest", "line_number": 355, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 355, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 355, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph", "line_number": 360, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 360, "usage_type": "name"}, {"api_name": "paddle.fluid.dygraph.SpectralNorm", "line_number": 368, "usage_type": "call"}, {"api_name": "paddle.fluid.layers.sigmoid_cross_entropy_with_logits", "line_number": 392, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 392, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 392, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reduce_sum", "line_number": 394, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 394, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 394, "usage_type": "name"}, {"api_name": "paddle.fluid.layers.reduce_mean", "line_number": 396, "usage_type": "call"}, {"api_name": "paddle.fluid.layers", "line_number": 396, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 396, "usage_type": "name"}]} +{"seq_id": "137253291", "text": "import csv\nimport cv2\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Cropping2D, Activation, Convolution2D\n\n\nlines = []\nwith open('driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n\nimages = []\nimagesCenter = []\nimagesLeft = []\nimagesRight = []\nmeasurements = []\ncorrection = 0.002\nfor line in lines:\n source_pathCenter = line[0]\n source_pathLeft = line[1]\n source_pathRight = line[2]\n \n filenameCenter = source_pathCenter.split('/')[-1] \n filenameLeft = source_pathLeft.split('/')[-1] \n filenameRight = source_pathRight.split('/')[-1] \n \n imagesCenter = cv2.imread(filenameCenter)\n imagesLeft = cv2.imread(filenameLeft)\n imagesRight = cv2.imread(filenameRight)\n \n \n images.append(imagesCenter)\n images.append(imagesLeft)\n images.append(imagesRight)\n \n measurement = float(line[3])\n measurements.append(measurement)\n measurements.append(measurement+correction)\n measurements.append(measurement-correction)\n \nX_train = np.array(images)\nY_train = np.array(measurements)\n\n\n#Build the NN\nmodel = Sequential()\nmodel.add(Lambda(lambda x:x /255.0 - 0.5, input_shape = (160, 320, 3)))\nmodel.add(Cropping2D(cropping = ((70,25), (0,0))))\nmodel.add(Convolution2D(24,5,5, subsample = (2,2), border_mode = \"valid\", init = 'he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(36,5,5, subsample = (2,2), border_mode = \"valid\", init = 'he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(48,5,5, subsample = (2,2), border_mode = \"valid\", init = 'he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(64,3,3, subsample = (1,1), border_mode = \"valid\", init = 'he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(64,3,3, subsample = (1,1), border_mode = \"valid\", init = 'he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Flatten())\nmodel.add(Dense(1164, init = 'he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Dense(100, init = 'he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Dense(50, init = 'he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Dense(10, init = 'he_normal'))\nmodel.add(Activation('relu'))\nmodel.add(Dense(1, init = 'he_normal'))\nmodel.compile(loss = 'mse', optimizer ='adam')\n\nmodel.fit(X_train, Y_train, validation_split = 0.2, shuffle = True, nb_epoch = 5)\nmodel.save('model.h5')", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 2452, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "csv.reader", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.Cropping2D", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "330230647", "text": "import time\nimport subprocess\nimport shlex\nimport string\nimport random\nfrom uuid import uuid1\nimport os\nimport pyqrcode\nimport config\nimport hashlib\n\nstring_pool = string.ascii_letters + string.digits\ngen_random_text = lambda s: ''.join(map(lambda _: random.choice(string_pool), range(s)))\n\n\ndef timestamp():\n return int(time.time())\n\n\ndef run_command(cmd):\n args = shlex.split(cmd)\n subprocess.call(args)\n\n\ndef generate_uuid():\n return str(uuid1())\n\n\ndef gen_identity():\n if config.get_uuid() == \"\":\n uuid = generate_uuid()\n psw = gen_random_text(6)\n config.set_uuid_psw(uuid, psw)\n qrimg = os.path.join(config.PROJECT_DIR, config.get_identity_path())\n if not os.path.isfile(qrimg):\n gen_identity_img(qrimg)\n\n\ndef regen_identity():\n uuid = config.get_uuid()\n if uuid == \"\":\n uuid = generate_uuid()\n psw = gen_random_text(6)\n config.set_uuid_psw(uuid, psw)\n gen_identity_img()\n\n\ndef gen_identity_img(qrimg):\n\n json_str = str({'UUID': config.get_uuid(), 'PSW': config.get_psw()})\n qr = pyqrcode.create(json_str)\n qr.png(qrimg, scale=5)\n\n\ndef psw_salt(uuid, psw):\n md5 = hashlib.md5()\n pwbytes = (uuid+psw).encode('utf-8')\n md5.update(pwbytes)\n return md5.hexdigest()\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1258, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "string.ascii_letters", "line_number": 12, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 12, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 13, "usage_type": "call"}, {"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 21, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 22, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 26, "usage_type": "call"}, {"api_name": "config.get_uuid", "line_number": 30, "usage_type": "call"}, {"api_name": "config.set_uuid_psw", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "config.PROJECT_DIR", "line_number": 34, "usage_type": "attribute"}, {"api_name": "config.get_identity_path", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "config.get_uuid", "line_number": 40, "usage_type": "call"}, {"api_name": "config.set_uuid_psw", "line_number": 44, "usage_type": "call"}, {"api_name": "config.get_uuid", "line_number": 50, "usage_type": "call"}, {"api_name": "config.get_psw", "line_number": 50, "usage_type": "call"}, {"api_name": "pyqrcode.create", "line_number": 51, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "246414218", "text": "#!/usr/bin/python3\n\"\"\"\n Prints count of all searched for words in hot post titles.\n\"\"\"\n\n\nimport requests\n\n\ndef count_words(subreddit, word_list, hot_list=[], aft=None):\n \"\"\"\n Counts words in all hot post titles\n \"\"\"\n\n dic = {}\n for x in word_list:\n dic[x] = 0\n\n head = {\n 'User-Agent': 'Totally human'\n }\n\n response = requests.get('https://www.reddit.com/r/{}/hot.json?after={}'\n .format(subreddit, aft), headers=head)\n\n if (not response.status_code == 200 or\n not response.json().get('data').get('children')):\n return None\n\n aft = response.json().get('data').get('after')\n hot_list += [x.get('data').get('title') for x in\n [y for y in response.json().get('data').get('children')]]\n if aft:\n count_words(subreddit, word_list, hot_list, aft)\n else:\n for title in hot_list:\n for key in dic:\n dic[key] += title.lower().split(' ').count(key.lower())\n sort = [(v, k) for k, v in dic.items()]\n sort.sort(reverse=True)\n for v, k in sort:\n if v:\n print('{}: {}'.format(k, v))\n", "sub_path": "0x16-api_advanced/100-count.py", "file_name": "100-count.py", "file_ext": "py", "file_size_in_byte": 1171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "requests.get", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "40823571", "text": "from flask import Flask, render_template, request, send_file\nfrom werkzeug.utils import secure_filename\nimport pandas as pd\nfrom geopy.geocoders import ArcGIS\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/', methods=['POST'])\ndef success():\n global ufile\n global df\n if request.method == 'POST':\n ufile = request.files['file']\n if ufile.filename.lower().endswith('.csv'):\n df = pd.read_csv(ufile, encoding = \"ISO-8859-1\")\n if 'Address' in df.columns or 'address' in df.columns:\n if 'address' in df.columns and 'Address' not in df.columns:\n address = 'address'\n else:\n address = 'Address'\n df['Latitude'] = df[address].apply(ArcGIS().geocode).apply(\n lambda x: x.latitude if x != None else None)\n df['Longitude'] = df[address].apply(ArcGIS().geocode).apply(\n lambda x: x.longitude if x != None else None)\n return render_template(\"index.html\",\n tables=[\n df.to_html(classes='data',\n index=False,\n header=\"true\")\n ],\n btn=\"download.html\")\n else:\n return render_template(\"index.html\", wrong_file=\"wrong_file.html\")\n else:\n return render_template(\"index.html\", wrong_file=\"wrong_file.html\")\n\n@app.route('/download')\ndef download():\n df.to_csv(secure_filename(\"uploaded_\" + ufile.filename), index=False)\n return send_file(\"uploaded_\" + ufile.filename, attachment_filename=\"yourfile.csv\", as_attachment=True)\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run(port=5001)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "geopy.geocoders.ArcGIS", "line_number": 27, "usage_type": "call"}, {"api_name": "geopy.geocoders.ArcGIS", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "334504266", "text": "\"\"\" A command that implements auto-completion of names in the search box. \"\"\"\n\n\nfrom traits.api import HasTraits\n\n\n# We get a new instance of each command every time, so we can't keep instance\n# state. I tried this as class state but it just looks uglier ;^)\n# The index of the next candidate.\ncandidate_index = 0\n\n# The candidate names.\ncandidate_names = []\n\n\nclass AbstractAutoComplete(HasTraits):\n \"\"\" The base class for the auto-complete commands.\n\n Do not use this command directly - use the derived classes.\n\n \"\"\"\n\n #### 'AutoComplete' *class* protocol #######################################\n\n # Gives the 'direction' of the auto-completion. Must be either 'next' or\n # 'previous' (and is set appropriately in the derived classes). We would\n # like this to be Enum('next', 'previous') maybe, but we can't have class\n # traits.\n direction = None\n\n #### 'object' protocol #####################################################\n\n def __call__(self, window):\n red = window.red\n \n global candidate_names, candidate_index\n\n # If we are NOT currently in auto-complete mode then we need to generate\n # a list of candidate names.\n if not self._in_auto_complete_mode(red):\n candidate_names = self._get_candidate_names(red)\n candidate_index = -1\n \n if len(candidate_names) > 0:\n if self.direction == 'next':\n candidate_index += 1\n if candidate_index > len(candidate_names) - 1:\n candidate_index = 0\n\n else:\n candidate_index -= 1\n if candidate_index < 0:\n candidate_index = len(candidate_names) - 1\n\n red.name = candidate_names[candidate_index]\n red.set_insertion_point_end()\n\n return\n\n #### Private protocol ######################################################\n\n def _get_candidate_names(self, red):\n \"\"\" Get the candidate names at the start of auto-complete mode. \"\"\"\n\n parent_name, filter = self._get_parent_name_and_filter(red.name)\n parent = red.lookup(parent_name)\n\n candidate_names = (\n self._get_filtered_child_names(parent, filter)\n\n if parent is not None else []\n )\n\n return candidate_names\n \n def _get_filtered_child_names(self, node, filter):\n \"\"\" Get the filtered child names of the given node. \"\"\"\n\n if filter is not None:\n candidate_names = [\n child.dotted_name for child in node.children\n\n if child.name.startswith(filter)\n ]\n\n else:\n candidate_names = [\n child.dotted_name for child in node.children\n ]\n\n return sorted(candidate_names)\n\n def _get_parent_name_and_filter(self, name):\n \"\"\" Get the parent name and the filter from the given name. \"\"\"\n\n # If the name ends with a period then we use *all* of the name before\n # the period as the name of the parent namespace node, and there is\n # no filter.\n if name.endswith('.'):\n parent_name = name[:-1]\n filter = None\n \n # Otherwise, we use all but the last portion of the name as the name\n # of the parent namespace node, and the rest as the filter.\n else:\n atoms = name.split('.')\n if len(atoms) == 1:\n parent_name = ''\n \n else:\n parent_name = '.'.join(atoms[:-1])\n\n filter = atoms[-1]\n\n return parent_name, filter\n\n def _in_auto_complete_mode(self, red):\n \"\"\" Return True if we are in auto-complete mode. \"\"\"\n \n command_history = red.command_history\n\n in_auto_complete_mode = (\n len(command_history) > 0\n and isinstance(command_history[-1], AbstractAutoComplete)\n )\n \n return in_auto_complete_mode\n\n\nclass AutoCompleteNext(AbstractAutoComplete):\n \"\"\" Auto-complete to the next candidate name. \"\"\"\n\n direction = 'next'\n\n\nclass AutoCompletePrevious(AbstractAutoComplete):\n \"\"\" Auto-complete to the previous candidate name. \"\"\"\n\n direction = 'previous'\n \n#### EOF #######################################################################\n", "sub_path": "source/pgv/red/ui/commands/auto_complete.py", "file_name": "auto_complete.py", "file_ext": "py", "file_size_in_byte": 4355, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "traits.api.HasTraits", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "302667074", "text": "import time\nfrom part2.spiders import scrape, check_domains\nfrom scrapy.crawler import CrawlerProcess, CrawlerRunner\n\n'''\n https://github.com/scrapy/scrapy/issues/990\n\n'''\ndef main():\n process = CrawlerProcess({\n 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',\n 'FEED_FORMAT': 'json',\n 'FEED_URI': 'healthy_domain_resps.json'\n })\n\n start_time = time.time()\n process.crawl(scrape.ScrapeDomains)\n #process.crawl(check_domains.CheckDomains)\n process.start()\n print('The script took {0} second !'.format(time.time() - start_time))\n\n # https://doc.scrapy.org/en/latest/topics/practices.html -- explains the differences with CrawlerProcess and runner\n # runner = CrawlerRunner({\n # 'FEED_FORMAT': 'json',\n # 'FEED_URI': 'result.json',\n # })\n # runner.crawl(scrape.ScrapeDomains)\n #\n # d = runner.join()\n # d.addBoth(lambda _: reactor.stop())\n # reactor.run()\n\nif __name__ == '__main__':\n main()\n\n\n\n", "sub_path": "run_spyder.py", "file_name": "run_spyder.py", "file_ext": "py", "file_size_in_byte": 999, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "scrapy.crawler.CrawlerProcess", "line_number": 10, "usage_type": "call"}, {"api_name": "time.time", "line_number": 16, "usage_type": "call"}, {"api_name": "part2.spiders.scrape.ScrapeDomains", "line_number": 17, "usage_type": "attribute"}, {"api_name": "part2.spiders.scrape", "line_number": 17, "usage_type": "name"}, {"api_name": "time.time", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "288959452", "text": "from tv_show_folder_rename_sonarr.logger import Logger\nfrom tv_show_folder_rename_sonarr.config import GuiConfig\nfrom tv_show_folder_rename_sonarr.cli_helpers import read_args\nfrom tv_show_folder_rename_sonarr.rename_shows import run_rename\nfrom pathlib import Path\nimport argparse\n\n\ndef run_cli():\n \"\"\"\n Runs the cli interface. Requires that the script was called with proper arguments. Accesses sys.argv for arguments.\n Starts worker script after parsing inputs.\n :return: None\n \"\"\"\n cli_default_config = Path('config.yml')\n cli_config = GuiConfig(cli_default_config)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config\", help=\"specify configuration file to load\", default=None)\n parser.add_argument(\"-l\", \"--log\", help=\"Specify log file.\", default=None, )\n parser.add_argument(\"-u\", \"--url\", help=\"Specify the Sonarr API url.\", default=None, )\n parser.add_argument(\"-k\", \"--key\", help=\"Specify the Sonarr API key\", default=None, )\n parser.add_argument(\"--char\", help=\"replacement char for space\", default=None, )\n parser.add_argument(\"-n\", \"--new\", help=\"new library root path\", default=None, )\n\n parser.add_argument(\"-p\", \"--preview\", help=\"Switch use if you want to preview the changes\",\n default=False, action='store_true')\n parser.add_argument(\"--replace_space\", help=\"Switch use if you want to replace the space char in the folder name\",\n default=False, action='store_true')\n parser.add_argument(\"--replace_root\", help=\"Switch use if you want to change the root folder of your library\",\n default=False, action='store_true')\n parser.add_argument(\"--use_language_in_path\", help=\"Switch use if you want to use the language of a show in the library path\",\n default=False, action='store_true')\n\n args = parser.parse_args()\n cli_config = read_args(args, cli_config)\n cli_config.logger = Logger(log_file=Path(cli_config.get('log_file')))\n run_rename(cli_config)\n", "sub_path": "tv_show_folder_rename_sonarr/run_cli.py", "file_name": "run_cli.py", "file_ext": "py", "file_size_in_byte": 2042, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pathlib.Path", "line_number": 15, "usage_type": "call"}, {"api_name": "tv_show_folder_rename_sonarr.config.GuiConfig", "line_number": 16, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "tv_show_folder_rename_sonarr.cli_helpers.read_args", "line_number": 35, "usage_type": "call"}, {"api_name": "tv_show_folder_rename_sonarr.logger.Logger", "line_number": 36, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 36, "usage_type": "call"}, {"api_name": "tv_show_folder_rename_sonarr.rename_shows.run_rename", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "442271478", "text": "import itertools\n\nfrom flask import jsonify, url_for, request, Response\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound, UnmappedInstanceError\n\nfrom app.application import app, db\nfrom app.models.auth_token import AuthToken\nfrom app.models.location import Location, locations_schema, location_schema\nfrom app.models.stop_point import StopPoint, stop_points_schema, \\\n stop_point_schema\nfrom app.models.user import User, users_schema\nfrom app.utils.utils import has_no_empty_params, Errors, error, \\\n get_timetable_for_stop_point_ref_list_multithreaded\nfrom verbund_soap_client.verbund_soap_client import VDVClient\n\n\n@app.route('/users', methods=['GET'])\ndef get_users():\n all_users = User.query.all()\n result = users_schema.dump(all_users)\n return jsonify(result)\n\n\n@app.route('/proxy/location-information-request', methods=['GET'])\ndef proxy_location_information_request():\n if 'location_name' in request.args:\n locations = VDVClient().location_information_request__location_name(\n request.args['location_name'])\n return jsonify(locations)\n\n@app.route('/locations', methods=['GET'])\ndef get_locations():\n return jsonify(locations_schema.dump(Location.query.all()))\n\n@app.route('/location/slug//timetable', methods=['GET'])\ndef get_timetable_for_location_slug(slug):\n try:\n location = Location.query.filter(Location.slug == slug).one()\n except NoResultFound:\n return error(Errors.OBJECT_NOT_FOUND_ERROR, status_code=404)\n stop_point_refs = [stop_point.ref for stop_point in location.stop_points]\n\n result_lists = list(get_timetable_for_stop_point_ref_list_multithreaded(stop_point_refs))\n result = itertools.chain.from_iterable(result_lists) # todo: have to sort the lists while/after jaining them\n return jsonify(list(result))\n\n\n@app.route('/location', methods=['POST'])\ndef create_location():\n name, slug = request.json['name'], request.json['slug']\n try:\n location = Location(name=name, slug=slug)\n db.session.add(location)\n db.session.commit()\n except IntegrityError:\n return error(Errors.SLUG_ALREADY_EXISTS_ERROR)\n return jsonify(location_schema.dump(location))\n\n\n@app.route('/location/id/', methods=['GET', 'DELETE'])\ndef rud_location(location_id):\n # find location\n location = Location.query.get(location_id)\n if not location:\n return error(Errors.OBJECT_NOT_FOUND_ERROR)\n if request.method == 'DELETE':\n db.session.delete(location)\n db.session.commit()\n return jsonify(location_schema.dump(location))\n\n\n@app.route('/location/id//stopPoint', methods=['POST'])\ndef create_stop_point(location_id):\n name, city, ref = request.json['name'], request.json['city'], request.json[\n 'ref']\n stop_point = StopPoint(name=name, city=city, ref=ref, location_id=location_id)\n db.session.add(stop_point)\n db.session.commit()\n return jsonify(stop_point_schema.dump(stop_point))\n\n\n@app.route('/stopPoint/id/', methods=['GET', 'DELETE'])\ndef rud_stop_point(stop_point_id):\n stop_point = StopPoint.query.get(stop_point_id)\n if not stop_point:\n return error(Errors.OBJECT_NOT_FOUND_ERROR)\n if request.method == 'DELETE':\n db.session.delete(stop_point)\n db.session.commit()\n return jsonify(location_schema.dump(stop_point))\n\n\n@app.route('/location/id//stopPoints', methods=['GET'])\ndef get_stop_points(location_id):\n return jsonify(stop_points_schema.dump(Location.query.get(location_id).stop_points))\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n username, password = request.json['username'], request.json['password']\n\n # find user\n try:\n user = User.query.filter(User.username == username).one()\n except NoResultFound:\n return error(Errors.AUTHENTICATION_ERROR)\n # check password\n if not user.check_password(password):\n return error(Errors.AUTHENTICATION_ERROR)\n # generate password and return\n auth_token = AuthToken(user_id=user.id)\n db.session.add(auth_token)\n db.session.commit()\n\n return jsonify({'token': auth_token.token})\n\n\n@app.route('/')\ndef all_links():\n links = []\n for rule in app.url_map.iter_rules():\n # Filter out rules we can't navigate to in a browser\n # and rules that require parameters\n if \"GET\" in rule.methods and has_no_empty_params(rule):\n url = url_for(rule.endpoint, **(rule.defaults or {}))\n links.append((url, rule.endpoint))\n # links is now a list of url, endpoint tuples\n return jsonify(links)\n", "sub_path": "backend/app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "app.models.user.User.query.all", "line_number": 20, "usage_type": "call"}, {"api_name": "app.models.user.User.query", "line_number": 20, "usage_type": "attribute"}, {"api_name": "app.models.user.User", "line_number": 20, "usage_type": "name"}, {"api_name": "app.models.user.users_schema.dump", "line_number": 21, "usage_type": "call"}, {"api_name": "app.models.user.users_schema", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 22, "usage_type": "call"}, {"api_name": "app.application.app.route", "line_number": 18, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "verbund_soap_client.verbund_soap_client.VDVClient", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 30, "usage_type": "call"}, {"api_name": "app.application.app.route", "line_number": 25, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 34, "usage_type": "call"}, {"api_name": "app.models.location.locations_schema.dump", "line_number": 34, "usage_type": "call"}, {"api_name": "app.models.location.locations_schema", "line_number": 34, "usage_type": "name"}, {"api_name": "app.models.location.Location.query.all", "line_number": 34, "usage_type": "call"}, {"api_name": "app.models.location.Location.query", "line_number": 34, "usage_type": "attribute"}, {"api_name": "app.models.location.Location", "line_number": 34, "usage_type": "name"}, {"api_name": "app.application.app.route", "line_number": 32, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 32, "usage_type": "name"}, {"api_name": "app.models.location.Location.query.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "app.models.location.Location.query", "line_number": 39, "usage_type": "attribute"}, {"api_name": "app.models.location.Location", "line_number": 39, "usage_type": "name"}, {"api_name": "app.models.location.Location.slug", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 40, "usage_type": "name"}, {"api_name": "app.utils.utils.error", "line_number": 41, "usage_type": "call"}, {"api_name": "app.utils.utils.Errors.OBJECT_NOT_FOUND_ERROR", "line_number": 41, "usage_type": "attribute"}, {"api_name": "app.utils.utils.Errors", "line_number": 41, "usage_type": "name"}, {"api_name": "app.utils.utils.get_timetable_for_stop_point_ref_list_multithreaded", "line_number": 44, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 45, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 46, "usage_type": "call"}, {"api_name": "app.application.app.route", "line_number": 36, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "app.models.location.Location", "line_number": 53, "usage_type": "call"}, {"api_name": "app.application.db.session.add", "line_number": 54, "usage_type": "call"}, {"api_name": "app.application.db.session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "app.application.db", "line_number": 54, "usage_type": "name"}, {"api_name": "app.application.db.session.commit", "line_number": 55, "usage_type": "call"}, {"api_name": "app.application.db.session", "line_number": 55, "usage_type": "attribute"}, {"api_name": "app.application.db", "line_number": 55, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 56, "usage_type": "name"}, {"api_name": "app.utils.utils.error", "line_number": 57, "usage_type": "call"}, {"api_name": "app.utils.utils.Errors.SLUG_ALREADY_EXISTS_ERROR", "line_number": 57, "usage_type": "attribute"}, {"api_name": "app.utils.utils.Errors", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 58, "usage_type": "call"}, {"api_name": "app.models.location.location_schema.dump", "line_number": 58, "usage_type": "call"}, {"api_name": "app.models.location.location_schema", "line_number": 58, "usage_type": "name"}, {"api_name": "app.application.app.route", "line_number": 49, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 49, "usage_type": "name"}, {"api_name": "app.models.location.Location.query.get", "line_number": 64, "usage_type": "call"}, {"api_name": "app.models.location.Location.query", "line_number": 64, "usage_type": "attribute"}, {"api_name": "app.models.location.Location", "line_number": 64, "usage_type": "name"}, {"api_name": "app.utils.utils.error", "line_number": 66, "usage_type": "call"}, {"api_name": "app.utils.utils.Errors.OBJECT_NOT_FOUND_ERROR", "line_number": 66, "usage_type": "attribute"}, {"api_name": "app.utils.utils.Errors", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "app.application.db.session.delete", "line_number": 68, "usage_type": "call"}, {"api_name": "app.application.db.session", "line_number": 68, "usage_type": "attribute"}, {"api_name": "app.application.db", "line_number": 68, "usage_type": "name"}, {"api_name": "app.application.db.session.commit", "line_number": 69, "usage_type": "call"}, {"api_name": "app.application.db.session", "line_number": 69, "usage_type": "attribute"}, {"api_name": "app.application.db", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 70, "usage_type": "call"}, {"api_name": "app.models.location.location_schema.dump", "line_number": 70, "usage_type": "call"}, {"api_name": "app.models.location.location_schema", "line_number": 70, "usage_type": "name"}, {"api_name": "app.application.app.route", "line_number": 61, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "app.models.stop_point.StopPoint", "line_number": 77, "usage_type": "call"}, {"api_name": "app.application.db.session.add", "line_number": 78, "usage_type": "call"}, {"api_name": "app.application.db.session", "line_number": 78, "usage_type": "attribute"}, {"api_name": "app.application.db", "line_number": 78, "usage_type": "name"}, {"api_name": "app.application.db.session.commit", "line_number": 79, "usage_type": "call"}, {"api_name": "app.application.db.session", "line_number": 79, "usage_type": "attribute"}, {"api_name": "app.application.db", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 80, "usage_type": "call"}, {"api_name": "app.models.stop_point.stop_point_schema.dump", "line_number": 80, "usage_type": "call"}, {"api_name": "app.models.stop_point.stop_point_schema", "line_number": 80, "usage_type": "name"}, {"api_name": "app.application.app.route", "line_number": 73, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 73, "usage_type": "name"}, {"api_name": "app.models.stop_point.StopPoint.query.get", "line_number": 85, "usage_type": "call"}, {"api_name": "app.models.stop_point.StopPoint.query", "line_number": 85, "usage_type": "attribute"}, {"api_name": "app.models.stop_point.StopPoint", "line_number": 85, "usage_type": "name"}, {"api_name": "app.utils.utils.error", "line_number": 87, "usage_type": "call"}, {"api_name": "app.utils.utils.Errors.OBJECT_NOT_FOUND_ERROR", "line_number": 87, "usage_type": "attribute"}, {"api_name": "app.utils.utils.Errors", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "app.application.db.session.delete", "line_number": 89, "usage_type": "call"}, {"api_name": "app.application.db.session", "line_number": 89, "usage_type": "attribute"}, {"api_name": "app.application.db", "line_number": 89, "usage_type": "name"}, {"api_name": "app.application.db.session.commit", "line_number": 90, "usage_type": "call"}, {"api_name": "app.application.db.session", "line_number": 90, "usage_type": "attribute"}, {"api_name": "app.application.db", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 91, "usage_type": "call"}, {"api_name": "app.models.location.location_schema.dump", "line_number": 91, "usage_type": "call"}, {"api_name": "app.models.location.location_schema", "line_number": 91, "usage_type": "name"}, {"api_name": "app.application.app.route", "line_number": 83, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 96, "usage_type": "call"}, {"api_name": "app.models.stop_point.stop_points_schema.dump", "line_number": 96, "usage_type": "call"}, {"api_name": "app.models.stop_point.stop_points_schema", "line_number": 96, "usage_type": "name"}, {"api_name": "app.models.location.Location.query.get", "line_number": 96, "usage_type": "call"}, {"api_name": "app.models.location.Location.query", "line_number": 96, "usage_type": "attribute"}, {"api_name": "app.models.location.Location", "line_number": 96, "usage_type": "name"}, {"api_name": "app.application.app.route", "line_number": 94, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "app.models.user.User.query.filter", "line_number": 105, "usage_type": "call"}, {"api_name": "app.models.user.User.query", "line_number": 105, "usage_type": "attribute"}, {"api_name": "app.models.user.User", "line_number": 105, "usage_type": "name"}, {"api_name": "app.models.user.User.username", "line_number": 105, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 106, "usage_type": "name"}, {"api_name": "app.utils.utils.error", "line_number": 107, "usage_type": "call"}, {"api_name": "app.utils.utils.Errors.AUTHENTICATION_ERROR", "line_number": 107, "usage_type": "attribute"}, {"api_name": "app.utils.utils.Errors", "line_number": 107, "usage_type": "name"}, {"api_name": "app.utils.utils.error", "line_number": 110, "usage_type": "call"}, {"api_name": "app.utils.utils.Errors.AUTHENTICATION_ERROR", "line_number": 110, "usage_type": "attribute"}, {"api_name": "app.utils.utils.Errors", "line_number": 110, "usage_type": "name"}, {"api_name": "app.models.auth_token.AuthToken", "line_number": 112, "usage_type": "call"}, {"api_name": "app.application.db.session.add", "line_number": 113, "usage_type": "call"}, {"api_name": "app.application.db.session", "line_number": 113, "usage_type": "attribute"}, {"api_name": "app.application.db", "line_number": 113, "usage_type": "name"}, {"api_name": "app.application.db.session.commit", "line_number": 114, "usage_type": "call"}, {"api_name": "app.application.db.session", "line_number": 114, "usage_type": "attribute"}, {"api_name": "app.application.db", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 116, "usage_type": "call"}, {"api_name": "app.application.app.route", "line_number": 99, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 99, "usage_type": "name"}, {"api_name": "app.application.app.url_map.iter_rules", "line_number": 122, "usage_type": "call"}, {"api_name": "app.application.app.url_map", "line_number": 122, "usage_type": "attribute"}, {"api_name": "app.application.app", "line_number": 122, "usage_type": "name"}, {"api_name": "app.utils.utils.has_no_empty_params", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 129, "usage_type": "call"}, {"api_name": "app.application.app.route", "line_number": 119, "usage_type": "call"}, {"api_name": "app.application.app", "line_number": 119, "usage_type": "name"}]} +{"seq_id": "308149368", "text": "import unittest\nfrom polyhedral_analysis.coordination_polyhedron import CoordinationPolyhedron\nfrom polyhedral_analysis.atom import Atom\nfrom pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry\nfrom unittest.mock import Mock, MagicMock, patch, PropertyMock\nimport copy\nimport numpy as np\nfrom pymatgen.core.sites import Site\n\ndef mock_atom_lt( self, other ):\n return self.index < other.index\n\ndef mock_atom_eq( self, other ):\n return self.index == other.index\n\nclass TestCoordinationPolyhedronInit( unittest.TestCase ):\n\n def test_coordination_polyhedron_is_initialised( self ):\n mock_central_atom = Mock( spec=Atom )\n mock_central_atom.in_polyhedra = []\n mock_central_atom.index = 10\n mock_central_atom.label = 'Li'\n mock_vertices = [ Mock( spec=Atom ) for i in range(6) ]\n for i, v in enumerate( mock_vertices, 1 ):\n v.neighbours = None\n v.__lt__ = mock_atom_lt\n v.index = i\n v.in_polyhedra = []\n with patch( 'polyhedral_analysis.coordination_polyhedron.CoordinationPolyhedron.construct_edge_graph' ) as mock_construct_edge_graph:\n mock_construct_edge_graph.return_value = { 0: [ 1, 2, 3, 4 ],\n 1: [ 0, 2, 3, 5 ],\n 2: [ 0, 1, 3, 5 ],\n 3: [ 0, 2, 4, 5 ],\n 4: [ 0, 1, 3, 5 ],\n 5: [ 1, 2, 3, 4 ] }\n with patch( 'polyhedral_analysis.coordination_polyhedron.CoordinationPolyhedron.construct_abstract_geometry' ) as mock_construct_abstract_geometry:\n mock_construct_abstract_geometry.return_value = Mock( spec=AbstractGeometry )\n CoordinationPolyhedron( central_atom=mock_central_atom, \n vertices=mock_vertices )\n\nclass TestCoordinationPolyhedron( unittest.TestCase ):\n\n def setUp( self ):\n mock_central_atom = Mock( spec=Atom )\n mock_central_atom.in_polyhedra = []\n mock_central_atom.index = 0\n mock_central_atom.label = 'A'\n mock_central_atom.__eq__ = mock_atom_eq\n mock_vertices = [ Mock( spec=Atom ) for i in range(6) ]\n for i, v in enumerate( mock_vertices, 1 ):\n v.neighbours = None\n v.index = i\n v.__lt__ = mock_atom_lt\n v.__eq__ = mock_atom_eq\n v.in_polyhedra =[] \n with patch( 'polyhedral_analysis.coordination_polyhedron.CoordinationPolyhedron.construct_edge_graph' ) as mock_construct_edge_graph:\n mock_construct_edge_graph.return_value = { 0: [ 1, 2, 3, 4 ],\n 1: [ 0, 2, 3, 5 ],\n 2: [ 0, 1, 3, 5 ],\n 3: [ 0, 2, 4, 5 ],\n 4: [ 0, 1, 3, 5 ],\n 5: [ 1, 2, 3, 4 ] }\n with patch( 'polyhedral_analysis.coordination_polyhedron.CoordinationPolyhedron.construct_abstract_geometry' ) as mock_construct_abstract_geometry:\n mock_construct_abstract_geometry.return_value = Mock( spec=AbstractGeometry )\n self.coordination_polyhedron = CoordinationPolyhedron( \n central_atom=mock_central_atom,\n vertices=mock_vertices )\n\n def test_equal_members( self ):\n other_coordination_polyhedron = copy.deepcopy( self.coordination_polyhedron )\n other_coordination_polyhedron.vertices[0].neighbours = { 0: [ 1, 2, 3 ] }\n other_coordination_polyhedron.vertices[4].neighbours = { 4: [ 1, 3, 5 ] }\n self.assertTrue( self.coordination_polyhedron.equal_members(\n other_coordination_polyhedron ) )\n\n def test_vertex_vectors( self ):\n vectors = [ np.array( [ 1.0, 0.0, 0.0 ] ),\n np.array( [ 0.0, 1.0, 2.0 ] ) ]\n self.coordination_polyhedron.abstract_geometry.points_wocs_ctwocc.return_value = vectors\n returned_vectors = self.coordination_polyhedron.vertex_vectors\n for v1, v2 in zip( vectors, returned_vectors ):\n np.testing.assert_equal( v1, v2 )\n\n def test_angles( self ):\n vertex_vectors = np.array( [ [ 1.0, 0.0, 0.0 ],\n [ 0.0, 1.0, 0.0 ],\n [ 0.0, -1.0, 0.0 ] ] )\n with patch( 'polyhedral_analysis.coordination_polyhedron.CoordinationPolyhedron.vertex_vectors', new_callable=PropertyMock ) as mock_vertex_vectors:\n mock_vertex_vectors.return_value = vertex_vectors\n angles = self.coordination_polyhedron.angles()\n np.testing.assert_equal( angles, [ 90.0, 90.0, 180.0 ] )\n\n def test_vertex_distances( self ):\n mock_vertex_distances = [ 2.0, 1.0, 1.0, 1.0, 1.0, 1.5 ]\n self.coordination_polyhedron.central_atom.site = Mock( spec=Site )\n self.coordination_polyhedron.central_atom.site.distance = \\\n Mock( side_effect=mock_vertex_distances )\n for v in self.coordination_polyhedron.vertices:\n v.site = Mock( spec=Site )\n vertex_distances = self.coordination_polyhedron.vertex_distances()\n np.testing.assert_equal( vertex_distances, mock_vertex_distances )\n \n def test_vertex_distances_with_vertex_labels( self ):\n mock_vertex_distances = [ 2.0, 1.0, 1.0, 1.0, 1.0, 1.5 ]\n mock_labels = [ 'O', 'O', 'F', 'F', 'F', 'F' ]\n self.coordination_polyhedron.central_atom.site = Mock( spec=Site )\n self.coordination_polyhedron.central_atom.site.distance = \\\n Mock( side_effect=mock_vertex_distances )\n for v, mock_label in zip( self.coordination_polyhedron.vertices, mock_labels ):\n v.site = Mock( spec=Site )\n v.label = mock_label\n vertex_distances = self.coordination_polyhedron.vertex_distances( vertex_labels=True )\n np.testing.assert_equal( vertex_distances, \n list( zip( mock_vertex_distances, mock_labels ) ) )\n \nif __name__ == '__main__':\n unittest.main()\n\n", "sub_path": "tests/test_coordination_polyhedron.py", "file_name": "test_coordination_polyhedron.py", "file_ext": "py", "file_size_in_byte": 6409, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "unittest.TestCase", "line_number": 16, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 19, "usage_type": "call"}, {"api_name": "polyhedral_analysis.atom.Atom", "line_number": 19, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 23, "usage_type": "call"}, {"api_name": "polyhedral_analysis.atom.Atom", "line_number": 23, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 29, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 36, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 37, "usage_type": "call"}, {"api_name": "pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder.AbstractGeometry", "line_number": 37, "usage_type": "name"}, {"api_name": "polyhedral_analysis.coordination_polyhedron.CoordinationPolyhedron", "line_number": 38, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 41, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 44, "usage_type": "call"}, {"api_name": "polyhedral_analysis.atom.Atom", "line_number": 44, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 49, "usage_type": "call"}, {"api_name": "polyhedral_analysis.atom.Atom", "line_number": 49, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 56, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 63, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 64, "usage_type": "call"}, {"api_name": "pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder.AbstractGeometry", "line_number": 64, "usage_type": "name"}, {"api_name": "polyhedral_analysis.coordination_polyhedron.CoordinationPolyhedron", "line_number": 65, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.testing.assert_equal", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 88, "usage_type": "call"}, {"api_name": "unittest.mock.PropertyMock", "line_number": 88, "usage_type": "name"}, {"api_name": "numpy.testing.assert_equal", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 91, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 95, "usage_type": "call"}, {"api_name": "pymatgen.core.sites.Site", "line_number": 95, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 97, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 99, "usage_type": "call"}, {"api_name": "pymatgen.core.sites.Site", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.testing.assert_equal", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 101, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 106, "usage_type": "call"}, {"api_name": "pymatgen.core.sites.Site", "line_number": 106, "usage_type": "name"}, {"api_name": "unittest.mock.Mock", "line_number": 108, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 110, "usage_type": "call"}, {"api_name": "pymatgen.core.sites.Site", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.testing.assert_equal", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 113, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "218321162", "text": "# -------------- Timers ---------------------------\nfrom datetime import datetime, timedelta\nfrom json import dumps\nfrom unittest import TestCase\nimport dateutil\nfrom bson import ObjectId\nfrom goalboost.model.timer_models import TimerEntity, TimerDAO\nfrom goalboost.model.legacy_timer_models import LegacyTimer\nfrom test.common.test_helper import TestHelper, TestObjects\n\nclass TestTimerEntity(TestCase):\n\n def test_can_save_and_load_timer(self):\n user = TestObjects().get_test_user()\n t = TimerEntity(id=TestObjects().get_any_id(), notes=\"Saved from unit test\", user=user )\n t.save()\n t2 = TimerEntity.objects(id = t.id).first()\n assert(t.__repr__() == t2.__repr__())\n t.delete()\n\n def test_eval_ok(self):\n user = TestObjects().get_test_user()\n t1 = TimerEntity(id=ObjectId(b\"Timer1Timer2\"), notes=\"I want a shrubbery\", user=user)\n # print(t1.__repr__())\n t2 = eval(t1.__repr__())\n # Note this part works partly because compare is brain-dead, compares id only and only works for non-null id\n # But that may be what we need for MongoEngine purposes, so don't override\n assert(t1 == t2)\n # A better check\n assert(t1.__repr__() == t2.__repr__())\n # print(t1.to_json())\n\n def test_user_not_updated_on_save(self):\n user = TestObjects().get_test_user()\n t1 = TimerEntity(id=ObjectId(b\"Timer1Timer3\"), notes=\"I want a shrubbery\", user=user)\n t1.save()\n t1.user.password = \"foo\"\n t1.save()\n # TODO ETC...\n t1.delete()\n\n\n# This tests the new refactored TimerDAO that takes a TimerEntity\nclass TestTimerDAO(TestCase):\n def test_save_timer(self):\n dao = TimerDAO()\n t1 = TimerEntity(notes=\"My Test LegacyTimer Running!\", user=TestObjects().get_test_user(), running=True)\n dao.put(t1)\n assert(t1.id is not None)\n\n# Derive from object temporarily to disable\nclass TestTimerLegacy(TestCase):\n def test_can_create_with_utc_now(self):\n #userId = \"561dcd3c8c57cf2c17b7f4f9\"\n my_notes = \"I want to know how long this took, but my code is brain dead so far. Woe is me.\"\n timer = LegacyTimer(notes=my_notes)\n assert(my_notes == timer.notes)\n timer.save()\n\n def test_can_start_and_stop(self):\n notes = \"Testing start and stop\"\n t = LegacyTimer(notes=notes)\n t.start()\n t.stop()\n\n\n def test_can_create_with_explicit_start(self):\n my_notes = \"I am another timer\"\n timer = LegacyTimer(notes=my_notes, startTime=datetime(2007, 12, 5, 0, 0))\n assert(my_notes == timer.notes)\n assert(timer.startTime == timer.lastRestart)\n\n # timers created this way display an ugly ui bug -- so there's a bug either in the GUI or here or both.\n # Entered as\n # https://github.com/CodeSolid/Goalboost/issues/12\n def test_can_create_without_datastore(self):\n pass\n # Uncomment the following to see (and delete \"pass\" of course).\n # my_notes = \"We don't need no steenkin datastore.\"\n # timer = LegacyTimer(id=\"56259a278c57cf02f9692ccc\", userId = \"561dcd3c8c57cf2c17b7f4f9\", notes=my_notes)\n #\n # # This will be a bug that will appear in the UI -- no entry in the entries array is created for today!\n # # timer = LegacyTimer(id=\"56259a278c57cf02f9692ccc\", userId = \"561dcd3c8c57cf2c17b7f4f9\", notes=my_notes, startTime=datetime(2007, 12, 5, 0, 0))\n # timer.save()\n # timer2 = LegacyTimer.objects(id=\"56259a278c57cf02f9692ccc\").first()\n # assert(timer2.notes == timer.notes)\n # assert(my_notes == timer.notes)\n # assert(timer.startTime == timer.lastRestart)\n\n # Don't run in debugger, a breakpoint in right place will throw off elapsed calculation.\n # Otherwise elapsed converts to int, which shaves off any \"running time\" error\n def test_elapsed_time_correct(self):\n now = datetime.utcnow()\n tenSecondsAgo = now - timedelta(seconds=10)\n # LegacyTimer must be running or elapsed time will be zero\n timer = LegacyTimer(startTime = tenSecondsAgo, running=True)\n timer.set_seconds_today(20)\n elapsed = timer.current_elapsed()\n total = timer.total_elapsed()\n assert(elapsed == 10)\n assert(total == 30)\n\n def test_to_api_dict_correct(self):\n start_time = dateutil.parser.parse('2008-09-03T20:00:00.000000Z')\n # LegacyTimer must be running or elapsed time will be zero\n timer = LegacyTimer(startTime = start_time, running=True, id=ObjectId(\"56259a278c57cf02f9692b31\"))\n d = timer.to_api_dict()\n json = dumps(d)\n assert('\"notes\": null' in json)\n assert('\"id\": \"56259a278c57cf02f9692b31\"' in json)\n assert('\"entries\": []' in json)\n #assert('\"seconds\": 20' in json)\n timer.notes = \"Testing the JSON!\"\n timer.set_seconds_today(99)\n d = timer.to_api_dict()\n json = dumps(d)\n assert('\"notes\": \"Testing the JSON!\"' in json)\n assert('\"seconds\": 99' in json)\n\n def test_can_load_from_api_dict(self):\n start_time = dateutil.parser.parse('2008-09-03T20:00:00.000000Z')\n # LegacyTimer must be running or elapsed time will be zero\n timer = LegacyTimer(startTime = start_time, running=True, id=ObjectId(\"56259a278c57cf02f9692b31\"))\n timer.set_seconds_today(99)\n d = timer.to_api_dict()\n t2 = LegacyTimer.load_from_dict(d)\n assert(timer.notes == t2.notes)\n assert(timer.id == t2.id)\n assert(timer.entries[0].dateRecorded == t2.entries[0].dateRecorded)\n assert(len(timer.entries) == len(t2.entries))\n assert(timer.entries[0].seconds == t2.entries[0].seconds)\n d[\"notes\"] = \"Testing\"\n t2 = LegacyTimer.load_from_dict(d)\n assert(t2.notes == \"Testing\")\n", "sub_path": "test/unit/model/test_timer_model.py", "file_name": "test_timer_model.py", "file_ext": "py", "file_size_in_byte": 5860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "test.common.test_helper.TestObjects", "line_number": 14, "usage_type": "call"}, {"api_name": "goalboost.model.timer_models.TimerEntity", "line_number": 15, "usage_type": "call"}, {"api_name": "test.common.test_helper.TestObjects", "line_number": 15, "usage_type": "call"}, {"api_name": "goalboost.model.timer_models.TimerEntity.objects", "line_number": 17, "usage_type": "call"}, {"api_name": "goalboost.model.timer_models.TimerEntity", "line_number": 17, "usage_type": "name"}, {"api_name": "test.common.test_helper.TestObjects", "line_number": 22, "usage_type": "call"}, {"api_name": "goalboost.model.timer_models.TimerEntity", "line_number": 23, "usage_type": "call"}, {"api_name": "bson.ObjectId", "line_number": 23, "usage_type": "call"}, {"api_name": "test.common.test_helper.TestObjects", "line_number": 34, "usage_type": "call"}, {"api_name": "goalboost.model.timer_models.TimerEntity", "line_number": 35, "usage_type": "call"}, {"api_name": "bson.ObjectId", "line_number": 35, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 44, "usage_type": "name"}, {"api_name": "goalboost.model.timer_models.TimerDAO", "line_number": 46, "usage_type": "call"}, {"api_name": "goalboost.model.timer_models.TimerEntity", "line_number": 47, "usage_type": "call"}, {"api_name": "test.common.test_helper.TestObjects", "line_number": 47, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 52, "usage_type": "name"}, {"api_name": "goalboost.model.legacy_timer_models.LegacyTimer", "line_number": 56, "usage_type": "call"}, {"api_name": "goalboost.model.legacy_timer_models.LegacyTimer", "line_number": 62, "usage_type": "call"}, {"api_name": "goalboost.model.legacy_timer_models.LegacyTimer", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 94, "usage_type": "call"}, {"api_name": "goalboost.model.legacy_timer_models.LegacyTimer", "line_number": 96, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 104, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 104, "usage_type": "attribute"}, {"api_name": "goalboost.model.legacy_timer_models.LegacyTimer", "line_number": 106, "usage_type": "call"}, {"api_name": "bson.ObjectId", "line_number": 106, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 108, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 116, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 121, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 121, "usage_type": "attribute"}, {"api_name": "goalboost.model.legacy_timer_models.LegacyTimer", "line_number": 123, "usage_type": "call"}, {"api_name": "bson.ObjectId", "line_number": 123, "usage_type": "call"}, {"api_name": "goalboost.model.legacy_timer_models.LegacyTimer.load_from_dict", "line_number": 126, "usage_type": "call"}, {"api_name": "goalboost.model.legacy_timer_models.LegacyTimer", "line_number": 126, "usage_type": "name"}, {"api_name": "goalboost.model.legacy_timer_models.LegacyTimer.load_from_dict", "line_number": 133, "usage_type": "call"}, {"api_name": "goalboost.model.legacy_timer_models.LegacyTimer", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "640551460", "text": "#_*_ encoding: utf-8 _*_\nfrom django.shortcuts import render\nfrom django.template.loader import get_template\nfrom django.http import HttpResponse\nfrom datetime import datetime\n\n# Create your views here.\ndef index(request, tvno='0'):\n\ttv_list = [{'name':'CCTV News', 'tvcode':'yPhFG2I0dE0'},\n\t\t\t{'name':'CCTV中文国际', 'tvcode':'E1DTZBy4xr4'},]\n\n\ttemplate = get_template('index.html')\n\tnow = datetime.now()\n\thour = now.timetuple().tm_hour\n\ttvno = tvno\n\ttv = tv_list[int(tvno)]\n\thtml = template.render(locals())\n\n\treturn HttpResponse(html)\n\ndef engtv(request, tvno='0'):\n\ttv_list = [{'name':'SkyNews', 'tvcode':'y60wDzZt8yg'},\n\t\t\t{'name':'Euro News', 'tvcode':'mWdKb7255Bs'},\n\t\t\t{'name':'India News', 'tvcode':'oMncjfIE-ZU'},\n\t\t\t{'name':'CCTV', 'tvcode':'wuzZYzSoEEU'},]\n\n\ttemplate = get_template('engtv.html')\n\tnow = datetime.now()\n\ttvno = tvno\n\ttv = tv_list[int(tvno)]\n\thtml = template.render(locals())\n\n\treturn HttpResponse(html)\n\n", "sub_path": "django/djangoStudy/mtv/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 936, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.template.loader.get_template", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 19, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "370534503", "text": "import uuid\nimport route as r\nimport validate as vld\nimport voicer as v\nimport file_loger as fl\nimport database_loger as db\n\nFILE_LOG = int(1)\nDB_LOG = int()\n\nPHONE = str()\nPATH = str()\nSTAGE = str()\n\nif __name__ == \"__main__\":\n arguments = r.runRouting()\n\n if vld.validate(arguments):\n PHONE = arguments.phone\n DB_LOG = arguments.database\n PATH = arguments.path\n STAGE = arguments.stage\n\n message = v.readingSound(PATH)\n status, response, duration = v.analyseMessage(message, STAGE)\n logMessage = {\n 'uid': str(uuid.uuid4()),\n 'stage': 'stage №' + str(STAGE),\n 'phone': PHONE,\n 'duration': duration,\n 'status': status\n }\n fl.logInfo(\"{uid} - {stage} - {phone} - {duration} - {status}\".format(**logMessage))\n if(DB_LOG == 1):\n db.logInfo(logMessage)\n r.removeFileByPath(PATH)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 951, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "route.runRouting", "line_number": 16, "usage_type": "call"}, {"api_name": "validate.validate", "line_number": 18, "usage_type": "call"}, {"api_name": "voicer.readingSound", "line_number": 24, "usage_type": "call"}, {"api_name": "voicer.analyseMessage", "line_number": 25, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 27, "usage_type": "call"}, {"api_name": "file_loger.logInfo", "line_number": 33, "usage_type": "call"}, {"api_name": "database_loger.logInfo", "line_number": 35, "usage_type": "call"}, {"api_name": "route.removeFileByPath", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "71864876", "text": "import logging\nimport sqlite3\n\nimport discord\nfrom discord.ext import commands\n\nfrom .extensions import INIT_EXTENSIONS\nfrom .helpers.database import init_db\nfrom .settings import DEBUG, COMMAND_PREFIX, TOKEN\n\nlog = logging.getLogger(__name__)\n\nbot = commands.Bot(command_prefix=COMMAND_PREFIX)\n\n# Load extensions\nlog.debug(\"Loading default extensions...\")\nif DEBUG is True:\n log.info(\"=== DEBUG MODE ENABLED ===\")\n # Add debug-specific code/extensions here\n\nfor ext in INIT_EXTENSIONS:\n log.debug(f\"Loading {ext}...\")\n bot.load_extension(ext)\n\nlog.debug(\"Default extensions loaded.\")\n\n# Initialize Database\ninit_db()\n\n\n@bot.event\nasync def on_ready():\n \"\"\"\n Execute on bot initialization with the Discord API. This may happen more than once.\n \"\"\"\n log.info(f\"Started as {bot.user}\")\n\n\n@bot.command()\n@commands.is_owner()\nasync def reload(ctx: commands.context):\n \"\"\"\n Reload default extensions (cogs)\n\n :param ctx: Discord Context\n \"\"\"\n async with ctx.channel.typing():\n log.info(\"Reloading Extensions...\")\n\n msg = await ctx.send(\n embed=discord.Embed(\n title=\"Reloading extensions...\", color=discord.Color.orange()\n )\n )\n\n for extension in INIT_EXTENSIONS:\n from discord.ext.commands import (\n ExtensionNotLoaded,\n ExtensionNotFound,\n ExtensionFailed,\n )\n\n try:\n bot.reload_extension(extension)\n except (\n ExtensionNotLoaded,\n ExtensionNotFound,\n ExtensionFailed,\n ) as e:\n log.exception(e)\n await ctx.send(\n embed=discord.Embed(\n title=f\"Module {extension} failed to reload\",\n color=discord.Color.red(),\n )\n )\n log.debug(f\"{extension} reloaded\")\n\n try:\n log.info(\"Re-initializing database\")\n init_db()\n except sqlite3.OperationalError:\n await ctx.send(\n embed=discord.Embed(\n title=f\"Database failed to re-initialize (i.e. upgrade)\",\n color=discord.Color.red(),\n )\n )\n\n await msg.delete()\n await ctx.send(\n embed=discord.Embed(title=\"Reload Successful\", color=discord.Color.green())\n )\n log.info(\"Reloading complete.\")\n\n\nbot.run(TOKEN)\n", "sub_path": "bot/bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 2520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "settings.COMMAND_PREFIX", "line_number": 13, "usage_type": "name"}, {"api_name": "settings.DEBUG", "line_number": 17, "usage_type": "name"}, {"api_name": "extensions.INIT_EXTENSIONS", "line_number": 21, "usage_type": "name"}, {"api_name": "helpers.database.init_db", "line_number": 28, "usage_type": "call"}, {"api_name": "discord.ext.commands.context", "line_number": 41, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 41, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 51, "usage_type": "call"}, {"api_name": "discord.Color.orange", "line_number": 52, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 52, "usage_type": "attribute"}, {"api_name": "extensions.INIT_EXTENSIONS", "line_number": 56, "usage_type": "name"}, {"api_name": "discord.ext.commands.ExtensionNotLoaded", "line_number": 66, "usage_type": "name"}, {"api_name": "discord.ext.commands.ExtensionNotFound", "line_number": 67, "usage_type": "name"}, {"api_name": "discord.ext.commands.ExtensionFailed", "line_number": 68, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 72, "usage_type": "call"}, {"api_name": "discord.Color.red", "line_number": 74, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 74, "usage_type": "attribute"}, {"api_name": "helpers.database.init_db", "line_number": 81, "usage_type": "call"}, {"api_name": "sqlite3.OperationalError", "line_number": 82, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 84, "usage_type": "call"}, {"api_name": "discord.Color.red", "line_number": 86, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 86, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 92, "usage_type": "call"}, {"api_name": "discord.Color.green", "line_number": 92, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 92, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 40, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 40, "usage_type": "name"}, {"api_name": "settings.TOKEN", "line_number": 97, "usage_type": "argument"}]} +{"seq_id": "29760291", "text": "from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.home),\n path(\"invite/\", views.invite),\n path(\"discord/\", views.discord),\n path(\"info/maker/\", views.maker),\n path(\"info/bot/\", views.bot),\n path(\"user//\", views.search_user),\n path(\"search/\", views.search),\n]\n", "sub_path": "app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 334, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "288631901", "text": "from entity import Entity\nfrom util import *\nfrom components import *\nfrom pyglet.window import key\n\nclass Limb(Entity):\n \"\"\"\n A limb of an Actor\n Contains a mesh and a collision component\n \"\"\"\n def __init__(self, eman, mesh):\n super().__init__(eman)\n\n self.addComponent(mesh)\n self.addComponent(CollisionComponent(\"limb\", [\"weapon\",\"ground\"], useAABB=False, AABB=None, collidable=True))\n self.addComponent(SVAComponent())\n\n\nclass Actor(Entity):\n \"\"\"\n An Actor is an player controlled entity.\n Actors the following Limbs:\n Head\n L Arm\n R Arm\n L Leg\n R Leg\n Body\n\n \"\"\"\n def __init__(self, eman):\n super().__init__(eman)\n # Head\n self.head = Limb(eman,\n MeshComponent(shape=Rectangle(10, 30, Vector(50,130, 0), CENTER, [Color(255,255,255)*4], None)))\n # Body\n self.body = Limb(eman,\n MeshComponent(shape=Rectangle(30, 50, Vector(40,100, 0), TOPLEFT, [Color(255,255,0)*4], None)))\n # Arms\n self.larm = Limb(eman,\n MeshComponent(shape=Rectangle(20, 30, Vector(40,100, 0), TOPRIGHT, [Color(255,0,0)*4], None)))\n self.rarm = Limb(eman,\n MeshComponent(shape=Rectangle(20, 30, Vector(70, 100, 0), TOPLEFT, [Color(255,0,255)*4], None)))\n # Legs\n self.lleg = Limb(eman,\n MeshComponent(shape=Rectangle(20, 50, Vector(30, 50, 0), TOPLEFT, [Color(128,0,255)*4], None)))\n self.rleg = Limb(eman,\n MeshComponent(shape=Rectangle(20, 50, Vector(60, 50, 0), TOPLEFT, [Color(255,0,128)*4], None)))\n\n self.addComponent(KeyHoldComponent({Key(key.Q, 0): [\n\"\"\"\nowner.larm.getSingleComponentByType(SVAComponent).OMEGA = Vector(0,0,3)\n\"\"\",\n\"\"\"\nowner.larm.getSingleComponentByType(SVAComponent).OMEGA = Vector(0,0,0)\n\"\"\"\n]}))\n self.addComponent(KeyHoldComponent({Key(key.W, 0): [\n\"\"\"\nowner.rarm.getSingleComponentByType(SVAComponent).OMEGA = Vector(0,0,3)\n\"\"\",\n\"\"\"\nowner.rarm.getSingleComponentByType(SVAComponent).OMEGA = Vector(0,0,0)\n\"\"\"\n]}))\n self.addComponent(KeyHoldComponent({Key(key.A, 0): [\n\"\"\"\nowner.lleg.getSingleComponentByType(SVAComponent).OMEGA = Vector(0,0,3)\n\"\"\",\n\"\"\"\nowner.lleg.getSingleComponentByType(SVAComponent).OMEGA = Vector(0,0,0)\n\"\"\"\n]}))\n self.addComponent(KeyHoldComponent({Key(key.S, 0): [\n\"\"\"\nowner.rleg.getSingleComponentByType(SVAComponent).OMEGA = Vector(0,0,3)\n\"\"\",\n\"\"\"\nowner.rleg.getSingleComponentByType(SVAComponent).OMEGA = Vector(0,0,0)\n\"\"\"\n]}))\n\n def addSVAComponent(self,\n position=Vector(0,0,0), velocity=Vector(0,0,0), acceleration=Vector(0,0,0),\n a_position=Vector(0,0,0), a_velocity=Vector(0,0,0), a_acceleration=Vector(0,0,0),\n bounded=True):\n self.addComponent(SVAComponent(position, velocity, acceleration, a_position, a_velocity, a_acceleration, bounded))\n\n def interact(self, other):\n pass\n", "sub_path": "source/entities/actor.py", "file_name": "actor.py", "file_ext": "py", "file_size_in_byte": 3092, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "entity.Entity", "line_number": 6, "usage_type": "name"}, {"api_name": "entity.Entity", "line_number": 19, "usage_type": "name"}, {"api_name": "pyglet.window.key.Q", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pyglet.window.key", "line_number": 50, "usage_type": "name"}, {"api_name": "pyglet.window.key.W", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pyglet.window.key", "line_number": 58, "usage_type": "name"}, {"api_name": "pyglet.window.key.A", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pyglet.window.key", "line_number": 66, "usage_type": "name"}, {"api_name": "pyglet.window.key.S", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pyglet.window.key", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "277964083", "text": "from django.forms import ModelForm, Textarea\nfrom csm.models import Issue, UserOnProject, Label, Comment, Commit\nfrom django.db.models import Q\nfrom django.forms.extras.widgets import SelectDateWidget\n\n\n\n\nclass IssueCreateForm(ModelForm):\n\n def __init__(self,issue,*args,**kwargs):\n super (IssueCreateForm,self ).__init__(*args,**kwargs) # populates the post\n self.fields['assigned_users'].queryset = UserOnProject.objects.filter(project_id = issue.project_id)\n self.fields['labels'].queryset = Label.objects.filter(Q(project_id = issue.project_id) | Q(project_id = None))\n\n class Meta:\n model = Issue\n fields = ['title', 'description', 'assigned_users', 'labels', 'date_end']\n widgets = {\n 'description': Textarea(attrs={'cols': 50, 'rows': 10}),\n 'date_end' : SelectDateWidget()\n }\n\n\nclass IssueEditForm(ModelForm):\n\n def __init__(self,issue,*args,**kwargs):\n super (IssueEditForm,self ).__init__(*args,**kwargs) # populates the post\n self.fields['assigned_users'].queryset = UserOnProject.objects.filter(project_id = issue.project_id)\n self.fields['labels'].queryset = Label.objects.filter(Q(project_id = issue.project_id) | Q(project_id = None))\n\n class Meta:\n model = Issue\n fields = ['title', 'description', 'assigned_users', 'opened', 'completed', 'labels', 'date_end']\n widgets = {\n 'description': Textarea(attrs={'cols': 50, 'rows': 10}),\n 'date_end' : SelectDateWidget()\n }\n\n\nclass CommentForm(ModelForm):\n\n class Meta:\n model = Comment\n fields = ['body']\n widgets = {\n 'body': Textarea(attrs={'cols': 50, 'rows': 10}),\n }\n\n\nclass CommitForm(ModelForm):\n\n class Meta:\n model = Commit\n fields = ['link']\n ", "sub_path": "CSMProject/csm/forms/issue_forms.py", "file_name": "issue_forms.py", "file_ext": "py", "file_size_in_byte": 1830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.forms.ModelForm", "line_number": 9, "usage_type": "name"}, {"api_name": "csm.models.UserOnProject.objects.filter", "line_number": 13, "usage_type": "call"}, {"api_name": "csm.models.UserOnProject.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "csm.models.UserOnProject", "line_number": 13, "usage_type": "name"}, {"api_name": "csm.models.Label.objects.filter", "line_number": 14, "usage_type": "call"}, {"api_name": "csm.models.Label.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "csm.models.Label", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 14, "usage_type": "call"}, {"api_name": "csm.models.Issue", "line_number": 17, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 20, "usage_type": "call"}, {"api_name": "django.forms.extras.widgets.SelectDateWidget", "line_number": 21, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 25, "usage_type": "name"}, {"api_name": "csm.models.UserOnProject.objects.filter", "line_number": 29, "usage_type": "call"}, {"api_name": "csm.models.UserOnProject.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "csm.models.UserOnProject", "line_number": 29, "usage_type": "name"}, {"api_name": "csm.models.Label.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "csm.models.Label.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "csm.models.Label", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 30, "usage_type": "call"}, {"api_name": "csm.models.Issue", "line_number": 33, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 36, "usage_type": "call"}, {"api_name": "django.forms.extras.widgets.SelectDateWidget", "line_number": 37, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 41, "usage_type": "name"}, {"api_name": "csm.models.Comment", "line_number": 44, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 47, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 51, "usage_type": "name"}, {"api_name": "csm.models.Commit", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "420573472", "text": "'''\n Copyright (c) 2016-2017 Wind River Systems, Inc.\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at:\n http://www.apache.org/licenses/LICENSE-2.0\n \n Unless required by applicable law or agreed to in writing, software distributed\n under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n OR CONDITIONS OF ANY KIND, either express or implied.\n'''\n\n\"\"\"\nThis module contains the Relay class which is a secure way to pipe data to a\nlocal socket connection. This is useful for Telnet which is not secure by\ndefault.\n\"\"\"\n\nimport logging\nimport random\nimport select\nimport socket\nimport ssl\nimport threading\nimport time\nimport sys\n# -------------------------------------------------------------------\n# Note: when using a proxy server, the socket class is overlayed with\n# pysocks class. Keep around a local copy so that local socket\n# connections don't use the proxy\n# -------------------------------------------------------------------\nnon_proxy_socket = None\n\n# yocto supports websockets, not websocket, so check for that\ntry:\n import websocket\nexcept ImportError:\n import websockets as websocket\n\nCONNECT_MSG = \"CONNECTED-129812\"\n\nclass Relay(object):\n \"\"\"\n Class for establishing a secure pipe between a cloud based websocket and a\n local socket. This is useful for things like Telnet which are not secure to\n use remotely.\n \"\"\"\n\n def __init__(self, wsock_host, sock_host, sock_port, secure=True,\n log=None, local_socket=None, reconnect=False):\n \"\"\"\n Initialize a relay object for piping data between a websocket and a\n local socket\n \"\"\"\n\n self.wsock_host = wsock_host\n self.sock_host = sock_host\n self.sock_port = sock_port\n self.secure = secure\n self.log = log\n self.proxy = None\n self.log_name = \"Relay:{}:{}({:0>5})\".format(self.sock_host,\n self.sock_port,\n random.randint(0,99999))\n self.reconnect = reconnect\n if self.log is None:\n self.logger = logging.getLogger(self.log_name)\n log_handler = logging.StreamHandler()\n #log_formatter = logging.Formatter(constants.LOG_FORMAT, datefmt=constants.LOG_TIME_FORMAT)\n #log_handler.setFormatter(log_formatter)\n self.logger.addHandler(log_handler)\n self.logger.setLevel(logging.DEBUG)\n self.log = self.logger.log\n\n self.running = False\n self.thread = None\n self.ws_thread = None\n self.lsock = None\n self.wsock = None\n self.lconnect = 0\n\n def _connect_local(self):\n ret = False\n try:\n # check for proxy. If not proxy, this\n # is None.\n if non_proxy_socket:\n self.lsock = non_proxy_socket(socket.AF_INET,\n socket.SOCK_STREAM)\n else:\n self.lsock = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM)\n\n self.lsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)\n self.lsock.connect((self.sock_host,\n self.sock_port))\n self.lsock.setblocking(0)\n except socket.error as err:\n self.running = False\n ret = True\n self.log(logging.ERROR, \"{} Failed to open local socket.\".format(self.log_name))\n self.log(logging.ERROR, \"Reason: {} \".format(str(err)))\n return ret\n\n def _on_local_message(self):\n \"\"\"\n Main loop that pipes all data from one socket to the next. The\n websocket connection is established first and has its own\n callback, so this is where the local socket will be handled.\n \"\"\"\n # ws data must be in binary format. The websocket lib uses\n # this op code\n op_binary = 0x2\n while self.running is True:\n if self.lsock:\n socket_list = [self.lsock]\n read_sockets, write_sockets, _es = select.select(socket_list, [], [], 1)\n if len(read_sockets):\n try:\n data = self.lsock.recv(4096)\n except:\n # during a close a read might return a EBADF,\n # that is ok, pass it don't dump an exception\n pass\n if data:\n self.log(logging.DEBUG, \"_on_local_message: send {} -> ws\".format(len(data)))\n try:\n self.wsock.send(data, opcode=op_binary)\n except websocket.WebSocketConnectionClosedException:\n self.log(logging.ERROR, \"Websocket closed\")\n break\n else:\n self.log(logging.INFO, \"{}: Received NULL from local socket\".format(self.log_name))\n if self.reconnect and self.running:\n self.log(logging.INFO, \"Reconnecting local socket\")\n time.sleep(2)\n self._connect_local()\n else:\n self.running = False\n break\n else:\n time.sleep(1)\n if self.lsock:\n self.lsock.close()\n self.lsock = None\n if self.wsock:\n self.wsock.close()\n self.wsock = None\n self.log(logging.INFO, \"{} - Sockets Closed\".format(self.log_name))\n\n def _on_open(self, ws):\n self.log(logging.INFO, \"_on_open: starting thread loop\")\n self.thread = threading.Thread(target=self._on_local_message)\n self.thread.start()\n\n def _on_message(self, ws, data):\n\n if data:\n if data == CONNECT_MSG:\n # If the local socket has not been established yet,\n # and we have received the connection string, start\n # local socket.\n self._connect_local()\n self.lconnect = 1;\n self.log(logging.DEBUG, \"{} Local socket opened\".format(self.log_name))\n else:\n # send to local socket\n self.log(logging.DEBUG, \"_on_message: send {} -> local socket\".format(len(data)))\n\n # py3 data of type string needs to be byte encoded\n if isinstance(data, str) and sys.version_info[0] > 2:\n data = bytes(data, 'utf-8')\n self.lsock.send(data)\n\n def _on_error(self, ws, exception):\n self.log(logging.ERROR, \"_on_error: exception {}\".format(str(exception)))\n if self.lsock:\n self.lsock.close()\n if self.wsock:\n self.wsock.close()\n self.stop()\n\n def _on_close(self, ws):\n self.log(logging.INFO,\"_on_close: websocket closed\")\n if self.lsock:\n self.lsock.close()\n self.running = False\n\n def start(self):\n \"\"\"\n Establish the websocket connection and start the main loop\n \"\"\"\n\n if not self.running:\n self.running = True\n sslopt = {}\n if not self.secure:\n sslopt[\"cert_reqs\"] = ssl.CERT_NONE\n self.wsock = websocket.WebSocketApp(\n self.wsock_host,\n on_message=self._on_message,\n on_error=self._on_error,\n on_close=self._on_close,\n on_open=self._on_open)\n kwargs = {'sslopt': sslopt}\n if self.proxy:\n self.log(logging.DEBUG, \"start:self.proxy={} \".format(self.proxy)),\n kwargs['http_proxy_host'] = self.proxy.host\n kwargs['http_proxy_port'] = self.proxy.port\n self.ws_thread = threading.Thread(target=self.wsock.run_forever, kwargs=kwargs)\n self.ws_thread.start()\n else:\n raise RuntimeError(\"{} - Already running!\".format(self.log_name))\n\n def stop(self):\n \"\"\"\n Stop piping data between the two connections and stop the loop thread\n \"\"\"\n\n self.log(logging.INFO, \"{} Stopping\".format(self.log_name))\n self.running = False\n self.reconnect = False\n if self.thread:\n self.thread.join()\n self.thread = None\n if self.ws_thread:\n self.ws_thread.join()\n self.ws_thread = None\n\n\nrelays = []\n\ndef create_relay(url, host, port, secure=True, log_func=None, local_socket=None,\n reconnect=False, proxy=None):\n global relays, non_proxy_socket\n\n non_proxy_socket = local_socket\n newrelay = Relay(url, host, port, secure=secure, log=log_func, reconnect=reconnect)\n if proxy:\n newrelay.proxy = proxy\n newrelay.start()\n relays.append(newrelay)\n\ndef stop_relays():\n global relays\n\n threads = []\n while relays:\n relay = relays.pop()\n thread = threading.Thread(target=relay.stop)\n thread.start()\n threads.append(thread)\n\n for thread in threads:\n thread.join()\n\n", "sub_path": "device_cloud/relay.py", "file_name": "relay.py", "file_ext": "py", "file_size_in_byte": 9344, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "random.randint", "line_number": 65, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 68, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 69, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 73, "usage_type": "attribute"}, {"api_name": "socket.AF_INET", "line_number": 89, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 90, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 92, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 92, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 93, "usage_type": "attribute"}, {"api_name": "socket.IPPROTO_TCP", "line_number": 95, "usage_type": "attribute"}, {"api_name": "socket.TCP_NODELAY", "line_number": 95, "usage_type": "attribute"}, {"api_name": "socket.error", "line_number": 99, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 102, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 103, "usage_type": "attribute"}, {"api_name": "select.select", "line_number": 118, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 127, "usage_type": "attribute"}, {"api_name": "websockets.WebSocketConnectionClosedException", "line_number": 130, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 131, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 134, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 136, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 137, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 143, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 150, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 153, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 154, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 166, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 169, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 172, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 177, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 185, "usage_type": "attribute"}, {"api_name": "ssl.CERT_NONE", "line_number": 199, "usage_type": "attribute"}, {"api_name": "websockets.WebSocketApp", "line_number": 200, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 208, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 211, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 221, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 251, "usage_type": "call"}]} +{"seq_id": "510908711", "text": "import asyncio\nimport json\nimport re\nimport sys\nfrom os.path import dirname, join\nfrom typing import Any, Coroutine, Dict, List, cast\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nfrom hubitatmaker.hub import Hub, InvalidConfig\n\nwith open(join(dirname(__file__), \"hub_edit.html\")) as f:\n hub_edit_page = f.read()\n\nwith open(join(dirname(__file__), \"devices.json\")) as f:\n devices = f.read()\n\nwith open(join(dirname(__file__), \"device_details.json\")) as f:\n device_details = json.loads(f.read())\n\nwith open(join(dirname(__file__), \"events.json\")) as f:\n events = json.loads(f.read())\n\n\ndef run(cr: Coroutine) -> Any:\n return asyncio.get_event_loop().run_until_complete(cr)\n\n\nclass FakeResponse:\n def __init__(self, status=200, text: str = \"\"):\n self.status = status\n self._text = text\n\n async def json(self):\n return json.loads(self._text)\n\n async def text(self):\n return self._text\n\n\nclass FakeServer:\n url = \"http://localhost:9999\"\n\n\nrequests: List[Dict[str, Any]] = []\n\n\nclass fake_request:\n def __init__(self, method: str, url: str, **kwargs: Any):\n data = kwargs\n\n if url.endswith(\"/hub/edit\"):\n self.response = FakeResponse(text=hub_edit_page)\n elif url.endswith(\"/devices\"):\n self.response = FakeResponse(text=devices)\n else:\n dev_match = re.match(\".*/devices/(\\\\d+)$\", url)\n if dev_match:\n dev_id = dev_match.group(1)\n self.response = FakeResponse(\n text=json.dumps(device_details.get(dev_id, {}))\n )\n else:\n self.response = FakeResponse(text=\"{}\")\n\n requests.append({\"method\": method, \"url\": url, \"data\": kwargs})\n\n async def __aenter__(self):\n return self.response\n\n async def __aexit__(self, exc_type, exc, tb):\n pass\n\n\ndef fake_get_mac_address(**kwargs: str):\n return \"aa:bb:cc:dd:ee:ff\"\n\n\nclass TestHub(TestCase):\n def setUp(self):\n requests = []\n\n def test_hub_checks_arguments(self) -> None:\n \"\"\"The hub should check for its required inputs.\"\"\"\n self.assertRaises(InvalidConfig, Hub, \"\", \"1234\", \"token\")\n self.assertRaises(InvalidConfig, Hub, \"1.2.3.4\", \"\", \"token\")\n self.assertRaises(InvalidConfig, Hub, \"1.2.3.4\", \"1234\", \"\")\n Hub(\"1.2.3.4\", \"1234\", \"token\")\n\n @patch(\"getmac.get_mac_address\", new=fake_get_mac_address)\n def test_initial_values(self) -> None:\n \"\"\"Hub properties should have expected initial values.\"\"\"\n hub = Hub(\"1.2.3.4\", \"1234\", \"token\")\n self.assertEqual(list(hub.devices), [])\n self.assertEqual(hub.mac, \"aa:bb:cc:dd:ee:ff\")\n\n @patch(\"aiohttp.request\", new=fake_request)\n @patch(\"getmac.get_mac_address\", new=fake_get_mac_address)\n @patch(\"hubitatmaker.server.Server\")\n def test_start_server(self, MockServer) -> None:\n \"\"\"Hub should start a server when asked to.\"\"\"\n hub = Hub(\"1.2.3.4\", \"1234\", \"token\", True)\n run(hub.start())\n self.assertTrue(MockServer.called)\n\n @patch(\"aiohttp.request\", new=fake_request)\n @patch(\"getmac.get_mac_address\", new=fake_get_mac_address)\n @patch(\"hubitatmaker.server.Server\")\n def test_start(self, MockServer) -> None:\n \"\"\"start() should request data from the Hubitat hub.\"\"\"\n hub = Hub(\"1.2.3.4\", \"1234\", \"token\")\n run(hub.start())\n # 33 requests - 1 to get device list, 32 to update devices\n self.assertEqual(len(requests), 33)\n self.assertRegex(requests[1][\"url\"], \"devices$\")\n self.assertRegex(requests[2][\"url\"], \"devices/\\d+$\")\n self.assertRegex(requests[-1][\"url\"], \"devices/\\d+$\")\n\n @patch(\"aiohttp.request\", new=fake_request)\n @patch(\"getmac.get_mac_address\", new=fake_get_mac_address)\n @patch(\"hubitatmaker.server.Server\")\n def test_stop_server(self, MockServer) -> None:\n \"\"\"Hub should stop a server when stopped.\"\"\"\n hub = Hub(\"1.2.3.4\", \"1234\", \"token\", True)\n run(hub.start())\n self.assertTrue(MockServer.return_value.start.called)\n hub.stop()\n self.assertTrue(MockServer.return_value.stop.called)\n\n @patch(\"aiohttp.request\", new=fake_request)\n @patch(\"getmac.get_mac_address\", new=fake_get_mac_address)\n @patch(\"hubitatmaker.server.Server\")\n def test_devices_loaded(self, MockServer) -> None:\n \"\"\"Started hub should have parsed device info.\"\"\"\n hub = Hub(\"1.2.3.4\", \"1234\", \"token\")\n run(hub.start())\n self.assertEqual(len(hub.devices), 9)\n\n @patch(\"aiohttp.request\", new=fake_request)\n @patch(\"getmac.get_mac_address\", new=fake_get_mac_address)\n @patch(\"hubitatmaker.server.Server\")\n def test_process_event(self, MockServer) -> None:\n \"\"\"Started hub should process a device event.\"\"\"\n hub = Hub(\"1.2.3.4\", \"1234\", \"token\")\n run(hub.start())\n device = hub.devices[\"176\"]\n attr = device.attributes[\"switch\"]\n self.assertEqual(attr.value, \"off\")\n\n hub.process_event(events[0])\n\n attr = device.attributes[\"switch\"]\n self.assertEqual(attr.value, \"on\")\n", "sub_path": "hubitatmaker/tests/test_hub.py", "file_name": "test_hub.py", "file_ext": "py", "file_size_in_byte": 5163, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.Coroutine", "line_number": 25, "usage_type": "name"}, {"api_name": "asyncio.get_event_loop", "line_number": 26, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 25, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 49, "usage_type": "name"}, {"api_name": "re.match", "line_number": 57, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 61, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 79, "usage_type": "name"}, {"api_name": "hubitatmaker.hub.InvalidConfig", "line_number": 85, "usage_type": "argument"}, {"api_name": "hubitatmaker.hub.Hub", "line_number": 85, "usage_type": "argument"}, {"api_name": "hubitatmaker.hub.InvalidConfig", "line_number": 86, "usage_type": "argument"}, {"api_name": "hubitatmaker.hub.Hub", "line_number": 86, "usage_type": "argument"}, {"api_name": "hubitatmaker.hub.InvalidConfig", "line_number": 87, "usage_type": "argument"}, {"api_name": "hubitatmaker.hub.Hub", "line_number": 87, "usage_type": "argument"}, {"api_name": "hubitatmaker.hub.Hub", "line_number": 88, "usage_type": "call"}, {"api_name": "hubitatmaker.hub.Hub", "line_number": 93, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 90, "usage_type": "call"}, {"api_name": "hubitatmaker.hub.Hub", "line_number": 102, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 97, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 98, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 99, "usage_type": "call"}, {"api_name": "hubitatmaker.hub.Hub", "line_number": 111, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 106, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 107, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 108, "usage_type": "call"}, {"api_name": "hubitatmaker.hub.Hub", "line_number": 124, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 119, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 120, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 121, "usage_type": "call"}, {"api_name": "hubitatmaker.hub.Hub", "line_number": 135, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 130, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 131, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 132, "usage_type": "call"}, {"api_name": "hubitatmaker.hub.Hub", "line_number": 144, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 139, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 140, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "364920898", "text": "import numpy as np\nimport os\nfrom PIL import Image\nimport tqdm\nfrom src.utils import data_utils, misc\nfrom src import dataset\nfrom multiprocessing import Pool\nimport pickle\nimport argparse\n\nimport torch\n\nfrom src import data_fitter\nfrom src.utils import train_utils, config, data_utils\nfrom src import dataset, data_fitter, bern_gp_rloss_explorer, beta_gp_rloss_explorer, betaab_gp_rloss_explorer, simple_explorer\n\nROOT_DIR = os.path.expanduser(\"~/datasets/cocostuff\")\nCACHE_DIR = \"data/cocos3_10k\"\npreds_dir = os.path.expanduser(\"~/repos/deeplab-pytorch/data/features/cocostuff164k_on_cocostuff10k/deeplabv2_resnet101_msc\")\nprimary_label_strs = [\"bird\", \"cat\", \"dog\", \"horse\", \"sheep\", \"cow\", \"elephant\", \"bear\", \"zebra\", \"giraffe\"]\nscene_label_hierarchy = {\n \"water-other\": [\"sea\", \"river\"],\n \"ground-other\": [\"ground-other\", \"playingfield\", \"platform\", \"railroad\", \"pavement\", \"road\", \"gravel\", \"mud\", \"dirt\", \"snow\", \"sand\", \"solid-other\", \"hill\", \"mountain\", \"stone\", \"rock\", \"wood\", \"plant-other\", \"straw\", \"moss\", \"branch\", \"flower\", \"leaves\", \"bush\", \"tree\", \"grass\"],\n \"sky-other\": [\"sky-other\", \"clouds\"], \n \"structural-other\": [\"structural-other\", \"cage\", \"fence\", \"railing\", \"net\", \"building-other\", \"house\", \"roof\", \"tent\", \"skyscraper\", \"bridge\"], \n \"furniture-other\": [\"furniture-other\", \"stairs\", \"light\", \"counter\", \"mirror-stuff\", \"cupboard\", \"shelf\", \"cabinet\", \"table\", \"desk-stuff\", \"door-stuff\", \"window-other\", \"window-blind\", \"metal\", \"plastic\", \"cardboard\", \"paper\", \"floor-other\", \"floor-stone\", \"floor-marble\", \"floor-wood\", \"floor-tile\", \"carpet\", \"ceiling-other\", \"ceiling-tile\", \"wall-other\", \"wall-concrete\", \"wall-stone\", \"wall-brick\", \"wall-concrete\", \"wall-tile\", \"wall-panel\"],\n}\n# scene_label_strs = [\"sea\", \"fog\", \"ground-other\", \"railroad\", \"road\", \"snow\", \"sand\", \"hill\", \"wood\", \"tree\", \"bush\", \"grass\", \"railing\", \"fence\", \"bridge\", \"house\", \"sky-other\", \"textile-other\", \"furniture-other\", \"window-other\", \"floor-other\", \"wall-other\", \"plastic\"]\ndev = config.device\n\n\"\"\"\nLabeled ai: 0 acc: 0.7360\nLabeled ai: 1 acc: 0.9420\nLabeled ai: 2 acc: 0.8520\nLabeled ai: 3 acc: 0.9140\nLabeled ai: 4 acc: 0.7720\nLabeled ai: 5 acc: 0.7800\n\"\"\"\ngen_params = {\n 'animal': np.arange(10), \n 'water': np.arange(2), \n 'ground': np.arange(2), \n 'sky': np.arange(2),\n 'building': np.arange(2),\n 'furniture': np.arange(2),\n}\n\nD = np.meshgrid(*[np.arange(len(gen_params[key])) for key in gen_params])\nattr_dim = len(gen_params)\nfor _ in range(attr_dim):\n D[_] = np.reshape(D[_], -1)\n# -1 x attr_dim\nD = np.stack(D, axis=1)\nprint (\"Arms shape: \", np.shape(D), D[0], D[-1])\n\n\ndef get_labels():\n labels_strs = []\n # The checkpoint from: https://github.com/kazuto1011/deeplab-pytorch does not use unlabeled label\n # so are the annotations images of cocos.\n with open(os.path.join(ROOT_DIR, \"labels.txt\")) as f:\n for line in f:\n line = line.strip()\n flds = line.split()\n if flds[1] == \"unlabeled\":\n continue\n labels_strs.append(flds[1])\n \n primary_label_map = dict([(labels_strs.index(_pl), pi) for pi, _pl in enumerate(primary_label_strs)])\n scene_label_map = dict([(labels_strs.index(_sl), si) for si, _sl in enumerate(scene_label_hierarchy)])\n fine_to_coarse_scene_index = {}\n for k in scene_label_hierarchy:\n for fine_label in scene_label_hierarchy[k]:\n fine_index = labels_strs.index(fine_label)\n coarse_index = labels_strs.index(k)\n fine_to_coarse_scene_index[fine_index] = coarse_index\n# scene_label_map = dict([(sl, si) for si, sl in enumerate(range(92, 183))])\n \n print (fine_to_coarse_scene_index)\n return primary_label_map, scene_label_map, fine_to_coarse_scene_index\n\ndef _get_ids():\n train_ids = [fname[:-4] for fname in os.listdir(preds_dir + \"/train2017/logit/\") if fname.endswith(\".npy\")]\n val_ids = [fname[:-4] for fname in os.listdir(preds_dir + \"/val2017/logit/\") if fname.endswith(\".npy\")]\n return train_ids, val_ids\n\n\ndef _filter_fn(obj):\n fname, _id = obj\n img = Image.open(fname)\n img = np.array(img)\n labels = np.unique(img)\n _pls = [label for label in labels if label in primary_label_map]\n if len(_pls) == 1:\n return (_id, True)\n return (_id, False)\n\n\ndef filter_ids_with_single_object(ids, train=True):\n good_ids = []\n \n fnames = []\n for _id in ids:\n fname = _id + \".png\"\n fname = ROOT_DIR + \"/dataset/annotations/\" + (\"train2017\" if train else \"val2017\") + \"/\" + fname\n fnames.append((fname, _id))\n\n with Pool(20) as p:\n all_ids = list(tqdm.tqdm(p.imap(_filter_fn, fnames), total=len(ids)))\n good_ids = [_id for _id, _good in all_ids if _good]\n return good_ids\n \n\ndef fetch_preds(id_train_pair):\n _id, train = id_train_pair\n primary_labels = np.array([_pl for _pl in primary_label_map])\n scene_labels = np.array([_sl for _sl in scene_label_map])\n \n fldr = \"train2017\" if train else \"val2017\"\n pred_fname = preds_dir + \"/\" + fldr + \"/logit/\" + _id + \".npy\"\n assert os.path.exists(pred_fname)\n\n logits = np.load(pred_fname)\n preds = np.argmax(logits, axis=0)\n pls = [primary_label_map[_pl] for _pl in np.unique(preds) if _pl in primary_labels]\n random_pick = 0\n # this could happen\n if len(pls) > 1:\n scores = [len(np.where(preds==_pl)[0]) for _pl in pls]\n # retain the label with the highest prevalence\n good_pl_idx = np.argmax(scores)\n pl = pls[good_pl_idx]\n elif len(pls) == 0:\n primary_preds = np.argmax(logits[primary_labels, :, :], axis=0)\n pls = np.unique(primary_preds)\n scores = [len(np.where(primary_preds==_pl)[0]) for _pl in pls]\n good_pl_idx = np.argmax(scores)\n pl = pls[good_pl_idx]\n random_pick = 1\n elif len(pls) == 1:\n pl = pls[0]\n probs = torch.softmax(torch.tensor(logits), dim=0)\n primary_prob_cumsum = torch.mean(probs[primary_labels, :, :], dim=[1, 2]).numpy()\n\n z = np.zeros(len(scene_label_map))\n scene_probs = np.zeros([len(scene_label_map), 2])\n scene_coarse_preds = [fine_to_coarse_scene_index[_pl] for _pl in preds.flatten() if _pl in fine_to_coarse_scene_index]\n for fine_index, coarse_index in fine_to_coarse_scene_index.items():\n _cl = scene_label_map[coarse_index]\n scene_probs[_cl, 1] += torch.mean(probs[fine_index, :, :]).numpy()\n scene_probs[_cl, 0] += torch.mean(1 - probs[fine_index, :, :]).numpy()\n scene_coarse_labels = np.unique(scene_coarse_preds)\n scores = [len(np.where(scene_coarse_preds==_pl)[0]) for _pl in scene_coarse_labels]\n for si in np.argsort(-np.array(scores)):\n support = scores[si]\n _pl = scene_coarse_labels[si]\n z[scene_label_map[_pl]] = 1\n \n return {\"id\": _id, \"pl\": pl, \"sl\": z, \n \"random_pick\": random_pick, \n \"primary_logits\": primary_prob_cumsum,\n \"scene_logits\": scene_probs}\n\n\ndef predictions(ids, train=True):\n all_pl, all_sl = [], []\n logits_pl, logits_sl = [], []\n with Pool(5) as p:\n objs = list(tqdm.tqdm(p.imap(fetch_preds, zip(ids, [train]*len(ids))), total=len(ids)))\n id_to_obj_map = dict([(obj[\"id\"], obj) for obj in objs])\n num_random = 0\n for _id in ids:\n obj = id_to_obj_map[_id]\n pl, z = obj[\"pl\"], obj[\"sl\"]\n all_pl.append(pl)\n all_sl.append(z) \n logits_pl.append(obj[\"primary_logits\"])\n logits_sl.append(obj[\"scene_logits\"])\n num_random += obj[\"random_pick\"]\n \n print (\"Randomly picked examples: %d\" % num_random)\n return all_pl, all_sl, logits_pl, logits_sl\n\n\ndef fetch_gt(id_train_pair):\n _id, train = id_train_pair\n fldr = \"train2017\" if train else \"val2017\"\n fname = _id + \".png\"\n fname = ROOT_DIR + \"/dataset/annotations/\" + fldr + \"/\" + fname\n img = Image.open(fname)\n img = np.array(img)\n gt_labels = np.unique(img)\n pls = [primary_label_map[_pl] for _pl in gt_labels if _pl in primary_label_map]\n assert len(pls) == 1, \"Found two primary labels for id: %s, labels: %s\" % (_id, pls)\n pl = pls[0]\n gt_labels = [fine_to_coarse_scene_index[_label] for _label in gt_labels if _label in fine_to_coarse_scene_index]\n sls = [scene_label_map[_label] for _label in gt_labels if _label in scene_label_map]\n scores = [len(np.where(img==_label)[0]) for _label in gt_labels if _label in scene_label_map]\n z = np.zeros(len(scene_label_map))\n for si in np.argsort(-np.array(scores)):\n sl = sls[si]\n z[sl] = 1\n return {\"id\": _id, \"pl\": pl, \"sl\": z}\n\n\ndef ground_truth(ids, train=True):\n all_pl, all_sl = [], []\n with Pool(20) as p:\n gts = list(tqdm.tqdm(p.imap(fetch_gt, zip(ids, [train]*len(ids))), total=len(ids)))\n id_to_gt_map = dict([(_gt[\"id\"], _gt) for _gt in gts])\n for _id in ids:\n gt_obj = id_to_gt_map[_id]\n pl, z = gt_obj[\"pl\"], gt_obj[\"sl\"]\n all_pl.append(pl)\n all_sl.append(z)\n return all_pl, all_sl\n\n\nclass COCOSDataset(dataset.Dataset):\n def __init__(self, pred_pls, pred_sls, gt_pls, gt_sls, seed): \n assert len(pred_pls) == len(pred_sls)\n assert len(gt_pls) == len(gt_sls)\n assert len(pred_pls) == len(gt_pls)\n \n self.seed = seed\n LABELED_DATA_SIZE = 500\n self._arms = D\n #a small subset of labeled\n np.random.seed(0)\n idxs = np.random.permutation(np.arange(len(pred_pls)))\n all_labeled_idxs, unlabeled_idxs = idxs[:3*LABELED_DATA_SIZE], idxs[3*LABELED_DATA_SIZE:]\n np.random.seed(self.seed)\n labeled_idxs = np.random.choice(all_labeled_idxs, LABELED_DATA_SIZE)\n \n self.arm_hash_to_index = {}\n self.arm_to_idxs = {}\n seen_hashes = set()\n # Make arms from what is seen\n for arm in self._arms:\n arm_hash = self.hash_arm(arm)\n if arm_hash not in seen_hashes:\n self.arm_hash_to_index[arm_hash] = len(seen_hashes)\n seen_hashes.add(arm_hash)\n \n assert len(np.shape(self._arms)) == 2, \"Unexpected shape of arms: %s\" % np.shape(self._arms)\n \n arm_indices = []\n for idx in range(len(pred_pls)):\n arm = np.concatenate([[gt_pls[idx]], gt_sls[idx]]).astype(np.int32)\n arm_hash = self.hash_arm(arm)\n arm_index = self.arm_hash_to_index[arm_hash]\n arm_indices.append(arm_index)\n arm_indices = np.array(arm_indices)\n \n # this should only keep track of unlabeled indices\n for ui, idx in enumerate(unlabeled_idxs):\n arm = np.concatenate([[gt_pls[idx]], gt_sls[idx]]).astype(np.int32)\n arm_hash = self.hash_arm(arm)\n arm_index = self.arm_hash_to_index[arm_hash]\n x_indices = self.arm_to_idxs.get(arm_index, [])\n x_indices.append(ui) \n self.arm_to_idxs[arm_index] = x_indices\n \n num_empty = 0\n for ai in range(len(self.arms)):\n if ai not in self.arm_to_idxs:\n num_empty += 1\n print (\"Found %d/%d empty arms\" % (num_empty, len(self.arms)))\n \n print (\"Found %d unique arms and %0.2f average number of examples per arm\" % (len(self._arms), np.mean([len(self.arm_to_idxs.get(ai, [])) for ai in range(len(self._arms))])))\n self.labeled_data = (labeled_idxs, gt_pls[labeled_idxs], arm_indices[labeled_idxs])\n self.U = (unlabeled_idxs, gt_pls[unlabeled_idxs], arm_indices[unlabeled_idxs])\n \n assert len(self.arm_hash_to_index) == len(self._arms)\n\n @property\n def arms(self):\n return self._arms\n \n @property\n def num_arms(self):\n return len(self.arms)\n \n def sample(self, num_sample):\n idxs = np.random.choice(len(self), num_sample)\n x, y, arm_ids = self.U[0][idxs], self.U[1][idxs], self.U[2][idxs]\n return x, y, arm_ids\n \n def sample_arm(self, arm_index, num_sample):\n \"\"\"\n Sample randomly from arm (integer index) \n :return: np.array of x, y \n \"\"\"\n # allowing repeats\n idxs = np.random.choice(self.arm_to_idxs[arm_index], num_sample)\n x, y, arm_ids = self.U[0][idxs], self.U[1][idxs], self.U[2][idxs]\n return x, y\n \n def full_labeled_data(self):\n return self.labeled_data\n \n def full_data_arm(self, arm_index):\n idxs = self.arm_to_idxs[arm_index]\n x, y = self.U[0][idxs], self.U[1][idxs]\n return x, y\n \n def full_data(self):\n return self.U\n \n def num_attrs(self):\n return np.shape(self.arms)[-1]\n\n def __len__(self):\n return len(self.U[0])\n \n @staticmethod\n def hash_arm(arm):\n return \"::\".join(map(str, arm))\n \n def hash_to_arm_index(self, hashed_arm: str):\n # this can happen here since self.arms do not span the universe \n if hashed_arm not in self.arm_hash_to_index:\n return None\n return self.arm_hash_to_index[hashed_arm]\n\n \nclass JointModelFromCache():\n def __init__(self, logits_per_attr):\n \"\"\"\n Logits should be of shape: [len(full_data) x num_labels_for_this_attr]_{num_attr}\n \"\"\"\n self.logits = logits_per_attr\n \n def logit_per_attr(self, np_x, debug=False):\n return [self.logits[ai][np_x] for ai in range(len(self.logits))]\n\n\ndef check(gt_pls, gt_sls):\n arm_hash_to_index = {}\n arms = []\n for si in range(len(gt_pls)):\n arm = np.concatenate([[gt_pls[si]], gt_sls[si]]).astype(np.int32)\n arm_hash = COCOSDataset.hash_arm(arm)\n\n if arm_hash not in arm_hash_to_index:\n arms.append(arm)\n x_indices = arm_hash_to_index.get(arm_hash, [])\n x_indices.append(si)\n arm_hash_to_index[arm_hash] = x_indices\n \n print (\"Found %d unique arms and %0.2f average number of examples per arm\" % (len(arm_hash_to_index), np.mean([len(arm_hash_to_index[_h]) for _h in arm_hash_to_index])))\n \n\ndef _one_hot(preds, depth, on=1, off=0):\n z = np.ones([len(preds), depth])*off\n z[np.arange(len(preds)), preds.astype(np.int64)] = on\n return z\n\n\ndef evaluate(preds, gt):\n corr, num = {}, {}\n for i in range(len(preds)):\n _corr = (preds[i] == gt[i])\n _l = gt[i]\n corr[_l] = corr.get(_l, 0) +_corr\n num[_l] = num.get(_l, 0) + 1\n\n for k in corr:\n print (\"Key: %s Acc: %0.4f num: %d\" % (str(k), corr[k]/num[k], num[k]))\n\n\ndef evaluate_sls(pred_sls, gt_sls):\n num_sls = len(pred_sls[0])\n corr = np.zeros(num_sls)\n for i in range(len(pred_sls)):\n corr += (pred_sls[i] == gt_sls[i]).astype(np.float32)\n\n print (\"Per scene label acc:\")\n print (corr/len(pred_sls))\n\n\ndef prepare(seed):\n gt_pls, gt_sls, (pred_pls, plogits), (pred_sls, slogits) = parse_preds_gts()\n check(gt_pls, gt_sls)\n # num_examples x num_labels\n plogits = np.array(plogits)\n # num_examples x num_attrs x 2\n slogits = np.array(slogits)\n print (\"Shape of p: %s, scene: %s\" % (plogits.shape, slogits.shape))\n \n cocos_dataset = COCOSDataset(pred_pls, pred_sls, gt_pls, gt_sls, seed)\n num_sl_attrs = pred_sls.shape[-1]\n \n models = [dataset.ModelFromCache(_one_hot(pred_pls, len(primary_label_map), on=1, off=-1))] + [dataset.ModelFromCache(_one_hot(pred_sls[:, si], 2, on=1, off=-1)) for si in range(num_sl_attrs)]\n joint_model = JointModelFromCache([_one_hot(pred_pls, len(primary_label_map), on=1, off=-1)] + [_one_hot(pred_sls[:, si], 2, on=1, off=-1) for si in range(num_sl_attrs)])\n\n# models = [dataset.ModelFromCache(plogits)] + [dataset.ModelFromCache(slogits[:, si, :]) for si in range(num_sl_attrs)]\n# joint_model = JointModelFromCache([plogits] + [slogits[:, si, :] for si in range(num_sl_attrs)])\n \n config = data_fitter.Config()\n config.CALIBRATION_TOL = 1e-3\n config.CALIBRATION_TOPK = 5\n \n cocos_fitter = data_fitter.Fitter(cocos_dataset, models=models, device=dev, cache_dir=CACHE_DIR, joint_model=joint_model, config=config)\n cocos_fitter.set_primary_task_index(0)\n \n in_features = 1 + gt_sls.shape[-1]\n kernel_embedding_model = torch.nn.Sequential(\n torch.nn.Linear(in_features, 20),\n torch.nn.ReLU(),\n torch.nn.Linear(20, 20)\n )\n cocos_fitter.set_deep_kernel(kernel_embedding_model, 20)\n \n if not os.path.exists(cocos_fitter.model_name):\n cocos_fitter.fit(use_edge_potentials=True)\n if not os.path.exists(cocos_fitter.model_name_no_edge_potential):\n cocos_fitter.fit(use_edge_potentials=False)\n \n return cocos_dataset, cocos_fitter\n \ndef parse_preds_gts():\n train_ids = data_utils.cache(\n lambda: filter_ids_with_single_object(train_ids, train=True),\n CACHE_DIR + \"/train_ids.pkl\"\n )\n print (\"Found %d good ones in train\" % len(train_ids))\n \n val_ids = data_utils.cache(\n lambda: filter_ids_with_single_object(val_ids, train=False),\n CACHE_DIR + \"/val_ids.pkl\"\n )\n print (\"Found %d good ones in val\" % len(val_ids))\n \n train_pls, train_sls, train_plogits, train_slogits = data_utils.cache(\n lambda: predictions(train_ids, train=True),\n CACHE_DIR + \"/train_preds.pkl\"\n )\n val_pls, val_sls, val_plogits, val_slogits = data_utils.cache(\n lambda: predictions(val_ids, train=False),\n CACHE_DIR + \"/val_preds.pkl\"\n )\n pred_pls, pred_sls = train_pls + val_pls, train_sls + val_sls\n plogits, slogits = train_plogits + val_plogits, train_slogits + val_slogits\n \n train_pls2, train_sls2 = data_utils.cache(\n lambda: ground_truth(train_ids, train=True),\n CACHE_DIR + \"/train_gt.pkl\"\n )\n val_pls2, val_sls2 = data_utils.cache(\n lambda: ground_truth(val_ids, train=False),\n CACHE_DIR + \"/val_gt.pkl\"\n )\n gt_pls, gt_sls = train_pls2 + val_pls2, train_sls2 + val_sls2\n \n# evaluate(pred_pls, gt_pls)\n evaluate_sls(pred_sls, gt_sls)\n return np.array(gt_pls), np.array(gt_sls), (np.array(pred_pls), plogits), (np.array(pred_sls), slogits)\n\n\nprimary_label_map, scene_label_map, fine_to_coarse_scene_index = get_labels()\nif __name__ == '__main__':\n parser = misc.get_arg_parser()\n \n args = parser.parse_args()\n \n cocos_dataset, cocos_data_fitter = prepare(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n \n _args = [cocos_dataset, cocos_data_fitter, cocos_data_fitter.cache_dir, dev]\n _kwargs = {'explore_strategy': args.et, 'seed': args.seed, \"sample_type\": args.sample_type}\n if args.explorer == \"bern_gp_rloss\":\n _kwargs['width'] = args.width\n if args.ablation:\n bern_gp_rloss_explorer.estimation_ablation(_args, _kwargs)\n else:\n explorer = bern_gp_rloss_explorer.BernGPExplorer(*_args, **_kwargs)\n elif args.explorer == 'beta_gp_rloss':\n misc.populate_params(_kwargs, args)\n # _kwargs[\"sample_type\"] = \"correctednoep\"\n if args.ablation:\n if args.alpha_beta:\n betaab_gp_rloss_explorer.estimation_ablation(_args, _kwargs)\n else:\n beta_gp_rloss_explorer.estimation_ablation(_args, _kwargs)\n else:\n explorer = beta_gp_rloss_explorer.BetaGPExplorer(*_args, **_kwargs)\n elif args.explorer == 'simple':\n if args.ablation:\n simple_explorer.estimation_ablation(_args, _kwargs)\n else:\n explorer = simple_explorer.SimpleExplorer(*_args, **_kwargs)\n \n if not args.ablation:\n explorer.explore_and_fit(budget=2000)", "sub_path": "cocos3_10k.py", "file_name": "cocos3_10k.py", "file_ext": "py", "file_size_in_byte": 19666, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.expanduser", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "src.utils.config.device", "line_number": 29, "usage_type": "attribute"}, {"api_name": "src.utils.config", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 83, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 84, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 90, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 90, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 92, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 108, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 154, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 168, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 169, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 190, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 190, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 200, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 208, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 209, "usage_type": "call"}, {"api_name": "src.dataset.Dataset", "line_number": 219, "usage_type": "attribute"}, {"api_name": "src.dataset", "line_number": 219, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 229, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 230, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 232, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 233, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 249, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 257, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 285, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 295, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 342, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 356, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 376, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 386, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 388, "usage_type": "call"}, {"api_name": "src.dataset.ModelFromCache", "line_number": 394, "usage_type": "call"}, {"api_name": "src.dataset", "line_number": 394, "usage_type": "name"}, {"api_name": "src.utils.config", "line_number": 400, "usage_type": "name"}, {"api_name": "src.data_fitter.Config", "line_number": 400, "usage_type": "call"}, {"api_name": "src.data_fitter", "line_number": 400, "usage_type": "name"}, {"api_name": "src.utils.config.CALIBRATION_TOL", "line_number": 401, "usage_type": "attribute"}, {"api_name": "src.utils.config", "line_number": 401, "usage_type": "name"}, {"api_name": "src.utils.config.CALIBRATION_TOPK", "line_number": 402, "usage_type": "attribute"}, {"api_name": "src.utils.config", "line_number": 402, "usage_type": "name"}, {"api_name": "src.data_fitter.Fitter", "line_number": 404, "usage_type": "call"}, {"api_name": "src.data_fitter", "line_number": 404, "usage_type": "name"}, {"api_name": "src.utils.config", "line_number": 404, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 408, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 408, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 409, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 409, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 410, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 410, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 411, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 411, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 415, "usage_type": "call"}, {"api_name": "os.path", "line_number": 415, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 417, "usage_type": "call"}, {"api_name": "os.path", "line_number": 417, "usage_type": "attribute"}, {"api_name": "src.utils.data_utils.cache", "line_number": 423, "usage_type": "call"}, {"api_name": "src.utils.data_utils", "line_number": 423, "usage_type": "name"}, {"api_name": "src.utils.data_utils.cache", "line_number": 429, "usage_type": "call"}, {"api_name": "src.utils.data_utils", "line_number": 429, "usage_type": "name"}, {"api_name": "src.utils.data_utils.cache", "line_number": 435, "usage_type": "call"}, {"api_name": "src.utils.data_utils", "line_number": 435, "usage_type": "name"}, {"api_name": "src.utils.data_utils.cache", "line_number": 439, "usage_type": "call"}, {"api_name": "src.utils.data_utils", "line_number": 439, "usage_type": "name"}, {"api_name": "src.utils.data_utils.cache", "line_number": 446, "usage_type": "call"}, {"api_name": "src.utils.data_utils", "line_number": 446, "usage_type": "name"}, {"api_name": "src.utils.data_utils.cache", "line_number": 450, "usage_type": "call"}, {"api_name": "src.utils.data_utils", "line_number": 450, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 458, "usage_type": "call"}, {"api_name": "src.utils.misc.get_arg_parser", "line_number": 463, "usage_type": "call"}, {"api_name": "src.utils.misc", "line_number": 463, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 468, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 468, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 469, "usage_type": "call"}, {"api_name": "src.bern_gp_rloss_explorer.estimation_ablation", "line_number": 476, "usage_type": "call"}, {"api_name": "src.bern_gp_rloss_explorer", "line_number": 476, "usage_type": "name"}, {"api_name": "src.bern_gp_rloss_explorer.BernGPExplorer", "line_number": 478, "usage_type": "call"}, {"api_name": "src.bern_gp_rloss_explorer", "line_number": 478, "usage_type": "name"}, {"api_name": "src.utils.misc.populate_params", "line_number": 480, "usage_type": "call"}, {"api_name": "src.utils.misc", "line_number": 480, "usage_type": "name"}, {"api_name": "src.betaab_gp_rloss_explorer.estimation_ablation", "line_number": 484, "usage_type": "call"}, {"api_name": "src.betaab_gp_rloss_explorer", "line_number": 484, "usage_type": "name"}, {"api_name": "src.beta_gp_rloss_explorer.estimation_ablation", "line_number": 486, "usage_type": "call"}, {"api_name": "src.beta_gp_rloss_explorer", "line_number": 486, "usage_type": "name"}, {"api_name": "src.beta_gp_rloss_explorer.BetaGPExplorer", "line_number": 488, "usage_type": "call"}, {"api_name": "src.beta_gp_rloss_explorer", "line_number": 488, "usage_type": "name"}, {"api_name": "src.simple_explorer.estimation_ablation", "line_number": 491, "usage_type": "call"}, {"api_name": "src.simple_explorer", "line_number": 491, "usage_type": "name"}, {"api_name": "src.simple_explorer.SimpleExplorer", "line_number": 493, "usage_type": "call"}, {"api_name": "src.simple_explorer", "line_number": 493, "usage_type": "name"}]} +{"seq_id": "522510901", "text": "import discord\r\nfrom discord.ext import commands\r\nfrom discord.ext import tasks\r\nfrom itertools import cycle\r\n\r\nbot = commands.Bot(command_prefix = \".\")\r\nstatus = cycle(['Coding Server with \\'.\\'','by Anonyme#5137'])\r\n\r\n\r\n@bot.event\r\nasync def on_ready():\r\n change_status.start()\r\n print('Mod is ready to be used')\r\n\r\n\r\n\r\n\r\n#Commands to use\r\n@bot.command(aliases=['h','assist','aide'])\r\nasync def command(ctx): \r\n await ctx.send('`Prefix to use` : \\\"**.**\\\"')\r\n await ctx.send('**.clear [amount of message to delete]** \\n*delete specific number of messages*\\n_e.g : **.clear 5**_\\n --------------------')\r\n await ctx.send('**.cls** \\n*clears chosen channel\\'s chat*\\n_e.g : **.cls**_\\n --------------------')\r\n await ctx.send('**.kick [Player Tag]** \\n*Kicks the player from the server*\\n_e.g : **.kick Discord#0000** or **.kick @Discord**_\\n --------------------')\r\n await ctx.send('**.ban [Player Tag]** \\n*Bans the player from the server*\\n_e.g : **.ban Discord#0000** or **.kick @Discord**_\\n --------------------')\r\n await ctx.send('**.unban [Player Tag]** \\n*Unbans the player from the server*\\n_e.g : **.unban** or **Discord#0000**_\\n --------------------')\r\n await ctx.send('**.e** *or* **.event [\"Event\"** + **\"Date/Timing\"**] \\n*Create an event\\'s announcement that contains an Event and its date/timing*\\n_e.g : **.event** or **.e \"Summer Travel to...\" \"17:00 am - Place\"**_\\n___USE QUOTES FOR **\"EVENT\"** and **\"DATE\"**, SEPARATED WITH SPACE___\\n --------------------')\r\n await ctx.send('MORE FEATURES COMMING SOON\\n --------------------')\r\n\r\n\r\n\r\n@tasks.loop(seconds=3)\r\nasync def change_status():\r\n await bot.change_presence(activity=discord.Game(next(status)))\r\n\r\n\r\n#CLEAR + ROLE NEEDED\r\n@bot.command()\r\n@commands.has_any_role('bot','Dad','Admin')\r\nasync def clear(ctx, amount = 5):\r\n await ctx.channel.purge(limit=amount)\r\n\r\n\r\n@clear.error\r\nasync def clear_error(ctx, error):\r\n if isinstance(error, commands.MissingAnyRole):\r\n await ctx.send('You need \\\"**`Dad`**\\\" or \\\"**`bot`**\\\" role or \\\"**`Admin`**\\\" role.')\r\n\r\n \r\n\r\n#CLS + ROLE NEEDED\r\n@bot.command()\r\n@commands.has_any_role('Dad','bot','Admin')\r\nasync def cls(ctx, amount = 999999999999999999999999999999999999999999999999999999999999):\r\n await ctx.channel.purge(limit=amount)\r\n\r\n@cls.error\r\nasync def cls_error(ctx, error):\r\n if isinstance(error, commands.MissingAnyRole):\r\n await ctx.send('You need \\\"**`Dad`**\\\" or \\\"**`bot`**\\\" role or \\\"**`Admin`**\\\" role.') \r\n\r\n\r\n\r\n\r\n\r\n\r\n#REMOVE\r\n@bot.command()\r\n@commands.has_any_role('bot','Dad','Admin')\r\nasync def kick(ctx, member : discord.Member, *, reason = None ):\r\n await member.kick(reason=reason)\r\n await ctx.send(f'{member.mention} was kicked from the server')\r\n\r\n@kick.error\r\nasync def kick_error(ctx, error):\r\n if isinstance(error, commands.MissingAnyRole):\r\n await ctx.send('You need \\\"**`Dad`**\\\" or \\\"**`bot`**\\\" role or \\\"**`Admin`**\\\" role.')\r\n\r\n\r\n#ban\r\n@bot.command()\r\n@commands.has_any_role('bot','Dad','Admin')\r\nasync def ban(ctx, member : discord.Member, *, reason = None ):\r\n await member.ban(reason=reason)\r\n await ctx.send(f'{member.name}#{member.discriminator} was banned from the server')\r\n\r\n@ban.error\r\nasync def ban_error(ctx, error):\r\n if isinstance(error, commands.MissingAnyRole):\r\n await ctx.send('You need \\\"**`Dad`**\\\" or \\\"**`bot`**\\\" role or \\\"**`Admin`**\\\" role.')\r\n\r\n\r\n\r\n#unban\r\n@bot.command(aliases=['uba'])\r\n@commands.has_any_role('bot','Dad','Admin')\r\nasync def unban(ctx, *, member):\r\n banned_users_list = await ctx.guild.bans()\r\n member_name, member_discriminator = member.split(\"#\")\r\n \r\n for ban_entry in banned_users_list:\r\n user = ban_entry.user\r\n\r\n if (user.name, user.discriminator) == (member_name, member_discriminator):\r\n await ctx.guild.unban(user)\r\n await ctx.send(f'{user.mention} was unbanned')\r\n return\r\n\r\n@unban.error\r\nasync def unban_error(ctx, error):\r\n if isinstance(error, commands.MissingAnyRole):\r\n await ctx.send('You need \\\"**`Dad`**\\\" or \\\"**`bot`**\\\" role or \\\"**`Admin`**\\\" role.')\r\n\r\n\r\n#Annonce\r\n@bot.command(aliases=['e', 'event'])\r\n@commands.has_any_role('bot','Dad','Admin')\r\nasync def evenement(ctx, a, b):\r\n await ctx.send('```diff\\n-Announcement:\\n```\\n```ini\\n[Event: {}]\\n```\\n```json\\n\\\"Date: {}\\\"\\n```'.format(a.capitalize(),b.capitalize()))\r\n\r\n@evenement.error\r\nasync def evenement_error(ctx, error):\r\n if isinstance(error, commands.MissingAnyRole):\r\n await ctx.send('You need \\\"**`Dad`**\\\" or \\\"**`bot`**\\\" role or \\\"**`Admin`**\\\" role.')\r\n \r\n \r\n \r\ntoken = 't2NzE4MjczNDU4MTI2OTc5MDgy.Xt5xsw.B4M7uKFMu3j49p3ynqAFw08G3Rg'\r\ntoken_print = ''.join(token[2:])\r\n\r\n\r\nbot.run(token_print)\r\n\r\n", "sub_path": "modBot.py", "file_name": "modBot.py", "file_ext": "py", "file_size_in_byte": 4800, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 6, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 6, "usage_type": "name"}, {"api_name": "itertools.cycle", "line_number": 7, "usage_type": "call"}, {"api_name": "discord.Game", "line_number": 34, "usage_type": "call"}, {"api_name": "discord.ext.tasks.loop", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.ext.tasks", "line_number": 32, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_any_role", "line_number": 39, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 39, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingAnyRole", "line_number": 46, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 46, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_any_role", "line_number": 53, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 53, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingAnyRole", "line_number": 59, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 59, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 70, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.has_any_role", "line_number": 69, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 69, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingAnyRole", "line_number": 76, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 76, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 83, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.has_any_role", "line_number": 82, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 82, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingAnyRole", "line_number": 89, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 89, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_any_role", "line_number": 96, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 96, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingAnyRole", "line_number": 111, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 111, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_any_role", "line_number": 117, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 117, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingAnyRole", "line_number": 123, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 123, "usage_type": "name"}]} +{"seq_id": "225362437", "text": "from copy import copy\nimport asyncio\nimport inspect\nimport collections\nimport discord\nimport datetime\nimport re\n\nfrom redbot.core import Config, checks, commands\nfrom redbot.core.i18n import Translator\nfrom redbot.core.utils.predicates import MessagePredicate\n\n_ = Translator(\"Warnings\", __file__)\n\n\nasync def warning_points_add_check(\n config: Config, ctx: commands.Context, user: discord.Member, points: int\n):\n \"\"\"Handles any action that needs to be taken or not based on the points\"\"\"\n guild = ctx.guild\n guild_settings = config.guild(guild)\n act = {}\n async with guild_settings.actions() as registered_actions:\n for a in registered_actions:\n # Actions are sorted in decreasing order of points.\n # The first action we find where the user is above the threshold will be the\n # highest action we can take.\n if points >= a[\"points\"]:\n act = a\n break\n if act and act[\"exceed_command\"] is not None: # some action needs to be taken\n await create_and_invoke_context(ctx, act[\"exceed_command\"], user)\n\n\nasync def warning_points_remove_check(\n config: Config, ctx: commands.Context, user: discord.Member, points: int\n):\n guild = ctx.guild\n guild_settings = config.guild(guild)\n act = {}\n async with guild_settings.actions() as registered_actions:\n for a in registered_actions:\n if points >= a[\"points\"]:\n act = a\n else:\n break\n if act and act[\"drop_command\"] is not None: # some action needs to be taken\n await create_and_invoke_context(ctx, act[\"drop_command\"], user)\n\n\nasync def create_and_invoke_context(\n realctx: commands.Context, command_str: str, user: discord.Member\n):\n m = copy(realctx.message)\n m.content = command_str.format(user=user.mention, prefix=realctx.prefix)\n fctx = await realctx.bot.get_context(m, cls=commands.Context)\n try:\n await realctx.bot.invoke(fctx)\n except (commands.CheckFailure, commands.CommandOnCooldown):\n await fctx.reinvoke()\n\n\ndef get_command_from_input(bot, userinput: str):\n com = None\n orig = userinput\n while com is None:\n com = bot.get_command(userinput)\n if com is None:\n userinput = \" \".join(userinput.split(\" \")[:-1])\n if len(userinput) == 0:\n break\n if com is None:\n return None, _(\"I could not find a command from that input!\")\n\n check_str = inspect.getsource(checks.is_owner)\n if any(inspect.getsource(x) in check_str for x in com.checks):\n # command the user specified has the is_owner check\n return (\n None,\n _(\"That command requires bot owner. I can't allow you to use that for an action\"),\n )\n return \"{prefix}\" + orig, None\n\n\nasync def get_command_for_exceeded_points(ctx: commands.Context):\n \"\"\"Gets the command to be executed when the user is at or exceeding\n the points threshold for the action\"\"\"\n await ctx.send(\n _(\n \"Enter the command to be run when the user **exceeds the points for \"\n \"this action to occur.**\\n**If you do not wish to have a command run, enter** \"\n \"`none`.\\n\\nEnter it exactly as you would if you were \"\n \"actually trying to run the command, except don't put a prefix and \"\n \"use `{user}` in place of any user/member arguments\\n\\n\"\n \"WARNING: The command entered will be run without regard to checks or cooldowns. \"\n \"Commands requiring bot owner are not allowed for security reasons.\\n\\n\"\n \"Please wait 15 seconds before entering your response.\"\n )\n )\n await asyncio.sleep(15)\n\n await ctx.send(_(\"You may enter your response now.\"))\n\n try:\n msg = await ctx.bot.wait_for(\n \"message\", check=MessagePredicate.same_context(ctx), timeout=30\n )\n except asyncio.TimeoutError:\n return None\n else:\n if msg.content == \"none\":\n return None\n\n command, m = get_command_from_input(ctx.bot, msg.content)\n if command is None:\n await ctx.send(m)\n return None\n\n return command\n\n\nasync def get_command_for_dropping_points(ctx: commands.Context):\n \"\"\"\n Gets the command to be executed when the user drops below the points\n threshold\n\n This is intended to be used for reversal of the action that was executed\n when the user exceeded the threshold\n \"\"\"\n await ctx.send(\n _(\n \"Enter the command to be run when the user **returns to a value below \"\n \"the points for this action to occur.** Please note that this is \"\n \"intended to be used for reversal of the action taken when the user \"\n \"exceeded the action's point value.\\n**If you do not wish to have a command run \"\n \"on dropping points, enter** `none`.\\n\\nEnter it exactly as you would \"\n \"if you were actually trying to run the command, except don't put a prefix \"\n \"and use `{user}` in place of any user/member arguments\\n\\n\"\n \"WARNING: The command entered will be run without regard to checks or cooldowns. \"\n \"Commands requiring bot owner are not allowed for security reasons.\\n\\n\"\n \"Please wait 15 seconds before entering your response.\"\n )\n )\n await asyncio.sleep(15)\n\n await ctx.send(_(\"You may enter your response now.\"))\n\n try:\n msg = await ctx.bot.wait_for(\n \"message\", check=MessagePredicate.same_context(ctx), timeout=30\n )\n except asyncio.TimeoutError:\n return None\n else:\n if msg.content == \"none\":\n return None\n command, m = get_command_from_input(ctx.bot, msg.content)\n if command is None:\n await ctx.send(m)\n return None\n\n return command\n\n\nasync def EmbedPaginateWarnsList(\n self,\n ctx,\n items: list,\n items_per_page: int = 15,\n title=discord.Embed.Empty,\n desc=discord.Embed.Empty,\n author=discord.Embed.Empty,\n author_url=discord.Embed.Empty,\n author_icon_url=discord.Embed.Empty,\n thumbnail=discord.Embed.Empty,\n):\n maxPage = len(items) // items_per_page + (len(items) % items_per_page > 0)\n pages = [items[i * items_per_page : (i + 1) * items_per_page] for i in range(maxPage)]\n count = 0\n for page in pages:\n count += 1\n # print(f\"Page {count} : {page}\")\n\n async def showPage(page):\n em = discord.Embed(title=title, description=desc, color=0x3DF270)\n em.set_author(name=author, url=author_url, icon_url=author_icon_url)\n em.set_thumbnail(url=thumbnail)\n count = (page - 1) * items_per_page\n total = len(items)\n for warning in pages[page - 1]:\n id = warning.get(\"id\", \"None\")\n count += 1\n num_points = warning[\"points\"]\n time = datetime.datetime.fromtimestamp(warning[\"time\"]).strftime(\n \"%m/%d/%y @ %I:%M %p UTC\"\n )\n unwarn = (\n datetime.datetime.fromtimestamp(warning[\"unwarn\"]).strftime(\n \"%m/%d/%y @ %I:%M %p UTC\"\n )\n if warning.get(\"unwarn\")\n else None\n )\n mod = ctx.guild.get_member(warning[\"mod\"])\n if mod is None:\n mod = discord.utils.get(self.bot.get_all_members(), id=warning[\"mod\"])\n if mod is None:\n mod = await self.bot.get_user_info(warning[\"mod\"])\n em.add_field(\n name=f\"{count} of {total} | {num_points} point warning | Warning ID (*{id}*)\",\n value=f\"Issued by {mod.mention}\",\n inline=False,\n )\n em.add_field(\n name=f\"Issued on {time}\",\n value=f'Reason : {warning[\"description\"]}'\n + (f\"\\nUnwarning: {unwarn}\" if unwarn else \"\")\n + \"\\n------------------------------------------------------------------------------\",\n inline=False,\n )\n em.set_footer(\n text=f\"Page {currentPage} out of {maxPage}\",\n icon_url=\"https://www.clipartmax.com/png/middle/171-1715896_paper-book-icon-textbook-icon.png\",\n )\n return em\n\n firstRun = True\n while True:\n if firstRun:\n firstRun = False\n currentPage = 1\n em = await showPage(currentPage)\n msg = await ctx.send(embed=em)\n\n if maxPage == 1 and currentPage == 1:\n toReact = [\"✅\"]\n elif currentPage == 1:\n toReact = [\"⏩\", \"✅\"]\n elif currentPage == maxPage:\n toReact = [\"⏪\", \"✅\"]\n elif currentPage > 1 and currentPage < maxPage:\n toReact = [\"⏪\", \"⏩\", \"✅\"]\n\n for reaction in toReact:\n await msg.add_reaction(reaction)\n\n def checkReaction(reaction, user):\n return user == ctx.message.author and str(reaction.emoji).startswith(\n (\"⏪\", \"⏩\", \"✅\")\n ) # and reaction.message == msg\n\n try:\n result, user = await self.bot.wait_for(\n \"reaction_add\", timeout=120, check=checkReaction\n )\n except asyncio.TimeoutError:\n em.set_footer(\n text=f\"Page {currentPage} out of {maxPage}. Timeout. Please reinvoke the command to change pages.\",\n icon_url=\"https://www.clipartmax.com/png/middle/171-1715896_paper-book-icon-textbook-icon.png\",\n )\n try:\n await msg.edit(embed=em)\n await msg.clear_reactions()\n except (discord.NotFound, discord.Forbidden):\n pass\n break\n else:\n try:\n if \"⏪\" in str(result.emoji):\n # print('Previous Page')\n currentPage -= 1\n em = await showPage(currentPage)\n await msg.edit(embed=em)\n await msg.clear_reactions()\n elif \"⏩\" in str(result.emoji):\n # print('Next Page')\n currentPage += 1\n em = await showPage(currentPage)\n await msg.edit(embed=em)\n await msg.clear_reactions()\n elif \"✅\" in str(result.emoji):\n # print('Close List')\n await msg.delete()\n await ctx.message.delete()\n break\n except (discord.NotFound, discord.Forbidden):\n pass\n\n\nclass Time(commands.Converter):\n TIME_AMNT_REGEX = re.compile(\"([1-9][0-9]*)([a-z]+)\", re.IGNORECASE)\n TIME_QUANTITIES = collections.OrderedDict(\n [\n (\"seconds\", 1),\n (\"minutes\", 60),\n (\"hours\", 3600),\n (\"days\", 86400),\n (\"weeks\", 604800),\n (\"months\", 2.628e6),\n (\"years\", 3.154e7),\n ]\n ) # (amount in seconds, max amount)\n\n def get_seconds(self, time):\n \"\"\"Returns the amount of converted time or None if invalid\"\"\"\n seconds = 0\n for time_match in self.TIME_AMNT_REGEX.finditer(time):\n time_amnt = int(time_match.group(1))\n time_abbrev = time_match.group(2)\n time_quantity = discord.utils.find(\n lambda t: t[0].startswith(time_abbrev), self.TIME_QUANTITIES.items()\n )\n if time_quantity is not None:\n seconds += time_amnt * time_quantity[1]\n return None if seconds == 0 else seconds\n\n async def convert(self, ctx, arg):\n result = None\n seconds = self.get_seconds(arg)\n result = seconds\n if result is None:\n raise commands.BadArgument('Unable to parse Time \"{}\" '.format(arg))\n return result\n\n @classmethod\n async def fromString(cls, arg):\n result = None\n seconds = cls.get_seconds(cls, arg)\n result = seconds\n if result is None:\n raise commands.BadArgument('Unable to parse Time \"{}\" '.format(arg))\n return result\n", "sub_path": "warnings/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 12138, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "redbot.core.i18n.Translator", "line_number": 13, "usage_type": "call"}, {"api_name": "redbot.core.Config", "line_number": 17, "usage_type": "name"}, {"api_name": "redbot.core.commands.Context", "line_number": 17, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 17, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 17, "usage_type": "attribute"}, {"api_name": "redbot.core.Config", "line_number": 36, "usage_type": "name"}, {"api_name": "redbot.core.commands.Context", "line_number": 36, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 36, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 36, "usage_type": "attribute"}, {"api_name": "redbot.core.commands.Context", "line_number": 52, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 52, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 52, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 54, "usage_type": "call"}, {"api_name": "redbot.core.commands.Context", "line_number": 56, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 56, "usage_type": "name"}, {"api_name": "redbot.core.commands.CheckFailure", "line_number": 59, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 59, "usage_type": "name"}, {"api_name": "redbot.core.commands.CommandOnCooldown", "line_number": 59, "usage_type": "attribute"}, {"api_name": "inspect.getsource", "line_number": 75, "usage_type": "call"}, {"api_name": "redbot.core.checks.is_owner", "line_number": 75, "usage_type": "attribute"}, {"api_name": "redbot.core.checks", "line_number": 75, "usage_type": "name"}, {"api_name": "inspect.getsource", "line_number": 76, "usage_type": "call"}, {"api_name": "redbot.core.commands.Context", "line_number": 85, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 85, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 100, "usage_type": "call"}, {"api_name": "redbot.core.utils.predicates.MessagePredicate.same_context", "line_number": 106, "usage_type": "call"}, {"api_name": "redbot.core.utils.predicates.MessagePredicate", "line_number": 106, "usage_type": "name"}, {"api_name": "asyncio.TimeoutError", "line_number": 108, "usage_type": "attribute"}, {"api_name": "redbot.core.commands.Context", "line_number": 122, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 122, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 144, "usage_type": "call"}, {"api_name": "redbot.core.utils.predicates.MessagePredicate.same_context", "line_number": 150, "usage_type": "call"}, {"api_name": "redbot.core.utils.predicates.MessagePredicate", "line_number": 150, "usage_type": "name"}, {"api_name": "asyncio.TimeoutError", "line_number": 152, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 170, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 171, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 172, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 173, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 174, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 175, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 185, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 194, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 194, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 198, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 198, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 206, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 206, "usage_type": "attribute"}, {"api_name": "asyncio.TimeoutError", "line_number": 256, "usage_type": "attribute"}, {"api_name": "discord.NotFound", "line_number": 264, "usage_type": "attribute"}, {"api_name": "discord.Forbidden", "line_number": 264, "usage_type": "attribute"}, {"api_name": "discord.NotFound", "line_number": 286, "usage_type": "attribute"}, {"api_name": "discord.Forbidden", "line_number": 286, "usage_type": "attribute"}, {"api_name": "redbot.core.commands.Converter", "line_number": 290, "usage_type": "attribute"}, {"api_name": "redbot.core.commands", "line_number": 290, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 291, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 291, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 292, "usage_type": "call"}, {"api_name": "discord.utils.find", "line_number": 310, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 310, "usage_type": "attribute"}, {"api_name": "redbot.core.commands.BadArgument", "line_number": 322, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 322, "usage_type": "name"}, {"api_name": "redbot.core.commands.BadArgument", "line_number": 331, "usage_type": "call"}, {"api_name": "redbot.core.commands", "line_number": 331, "usage_type": "name"}]} +{"seq_id": "370891678", "text": "import numpy as np\nfrom utils import deg2rad, rad2deg, normalize_angle_deg\nfrom BackUp import BackUp\nimport random\n\n\n\ndef dist_to_rock(Rover):\n idx_in_front = np.where(np.abs(Rover.obs_angles) < deg2rad(2.5))[0]\n\n if not len(idx_in_front):\n print(\"Dist to rock: N/A\")\n return None\n\n min_dist = np.min(Rover.rock_dists[idx_in_front])\n print(\"Dist to rock: %.2f\" % min_dist)\n return min_dist\n\n\n\n\n\n\ndef update_steering(Rover):\n Rover.steer = calc_steering_angle(Rover)\n\n\n\ndef update_throttle(Rover):\n if Rover.objective == 'mapping':\n target_velocity = Rover.max_vel\n throttle = Rover.throttle_set\n else:\n target_velocity = 0.5\n throttle = 0.1\n\n if Rover.vel < target_velocity:\n Rover.throttle = throttle\n else:\n Rover.throttle = 0\n\n\ndef stop_rover(Rover):\n Rover.throttle = 0\n Rover.brake = Rover.brake_set\n Rover.steer = 0\n Rover.mode = 'stop'\n\n\ndef turn_rover(Rover, rate=-15):\n Rover.throttle = 0\n Rover.brake = 0\n Rover.steer = rate # Could be more clever here about which way to turn\n\n\ndef decision_step(Rover):\n if Rover.home_pos is None:\n assert(Rover.pos is not None)\n Rover.home_pos = Rover.pos\n\n\n if Rover.stuck_counter > 100 and not type(Rover.state_machine.current_state()) == type(BackUp) :\n Rover.state_machine.push_front(BackUp(Rover))\n Rover.stuck_counter = 0\n\n Rover.state_machine.run()\n\n if abs(Rover.throttle) > 0:\n if abs(Rover.vel) < 0.2:\n Rover.stuck_counter += 1\n else:\n Rover.stuck_counter = 0\n\n return Rover\n\n", "sub_path": "code/decision.py", "file_name": "decision.py", "file_ext": "py", "file_size_in_byte": 1618, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.where", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 9, "usage_type": "call"}, {"api_name": "utils.deg2rad", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 15, "usage_type": "call"}, {"api_name": "BackUp.BackUp", "line_number": 62, "usage_type": "argument"}, {"api_name": "BackUp.BackUp", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "593742952", "text": "import requests\nfrom cx_Freeze import Executable, setup\n\nbuildOptions = {\n \"packages\": [\"os\", 'requests', 'queue', 'idna', 'gc', 'pysocks'],\n 'includes': ['requests', 'dermod', 'queue', 'idna', 'gc', 'pysocks'],\n \"excludes\": [\"tkinter\"],\n 'include_files': [\"extra/\", (requests.certs.where(), 'cacert.pem')]\n}\n\nsetup(\n name=\"DBooru\",\n version=\"1.0.0\",\n requires=['requests', 'idna', 'gc', 'pysocks'],\n options={\"build_exe\": buildOptions},\n executables=[Executable(\"main.py\"), Executable('webv3.py')]\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 528, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "requests.certs.where", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.certs", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cx_Freeze.setup", "line_number": 11, "usage_type": "call"}, {"api_name": "cx_Freeze.Executable", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "240933798", "text": "from levels.levels import dummy\nimport serial\nfrom time import sleep\nfrom config import COM, BAUDRATE, TIMEOUT, SINGLE_PRESS_DURATION,\\\n CENTER, HAT_CENTER\n\ncommands = dummy\n\n\ndef SplitHex(num):\n return [(num >> 8), (num & 0xFF)]\n\n\ndef Timeout(time=SINGLE_PRESS_DURATION):\n print(\"timeout for:\", time,\n \"ms\")\n sleep(time/1000)\n\n\ndef Command(lx=CENTER, ly=CENTER, rx=CENTER, ry=CENTER, hat=HAT_CENTER, button=0):\n print(\"sending:\", lx, ly, rx, ry, hat, button)\n button = SplitHex(button)\n return bytearray([lx, ly, rx, ry, hat, button[0], button[1]])\n\n\nprint(\"Connecting...\")\n\nser = serial.Serial(COM, BAUDRATE, timeout=TIMEOUT)\nser.setDTR(False)\nsleep(0.022)\nser.setDTR(True)\n\nindex = 0\ntry:\n while index < len(commands):\n com = commands[index]\n case = com[0]\n if (case == None):\n ser.write(Command())\n Timeout(com[1])\n elif (case == \"GOTO\"):\n index = com[1] - 1\n elif (case == \"COPY\"):\n _tmp = commands[com[1]]\n ser.write(Command(_tmp[0], _tmp[1], _tmp[2],\n _tmp[3], _tmp[4], _tmp[5]))\n Timeout(_tmp[6] if (len(_tmp) == 7) else SINGLE_PRESS_DURATION)\n else:\n ser.write(Command(com[0], com[1], com[2], com[3], com[4], com[5]))\n Timeout(com[6] if (len(com) == 7) else SINGLE_PRESS_DURATION)\n index += 1\n ser.write(Command())\n ser.close()\nexcept KeyboardInterrupt:\n ser.write(Command())\n print(\"Exiting\")\n ser.close()\nexcept:\n raise", "sub_path": "python/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 1547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "levels.levels.dummy", "line_number": 7, "usage_type": "name"}, {"api_name": "config.SINGLE_PRESS_DURATION", "line_number": 14, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "config.CENTER", "line_number": 20, "usage_type": "name"}, {"api_name": "config.HAT_CENTER", "line_number": 20, "usage_type": "name"}, {"api_name": "serial.Serial", "line_number": 28, "usage_type": "call"}, {"api_name": "config.COM", "line_number": 28, "usage_type": "argument"}, {"api_name": "config.BAUDRATE", "line_number": 28, "usage_type": "argument"}, {"api_name": "config.TIMEOUT", "line_number": 28, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "config.SINGLE_PRESS_DURATION", "line_number": 47, "usage_type": "name"}, {"api_name": "config.SINGLE_PRESS_DURATION", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "82240921", "text": "import gdal\nimport numpy as np\n\n# filename= ('/Users/lauro/Documents/PROJECTS/BOLIVIA/Bolivia2/simulazioni/Prova6/Results6b_verbose2_WATERDEPTH_36000.tif');\n\n# readFile(filename):\n\"\"\"def readGeotiff(filename):\n filehandle = gdal.Open(filename)\n band1 = filehandle.GetRasterBand(1)\n scale = band1.GetScale()\n print (scale)\n geotransform = filehandle.GetGeoTransform()\n geoproj = filehandle.GetProjection()\n band1data = band1.ReadAsArray()\n xsize = filehandle.RasterXSize\n ysize = filehandle.RasterYSize\n return band1data, xsize, ysize, geotransform, geoproj\n\"\"\"\n\ndef readGeotiff (filename):\n filehandle = gdal.Open(filename)\n geotransform = filehandle.GetGeoTransform()\n geoproj = filehandle.GetProjection()\n xsize = filehandle.RasterXSize\n ysize = filehandle.RasterYSize\n band_tot = filehandle.RasterCount\n\n if band_tot > 1:\n data3d= np.zeros((ysize,xsize,band_tot))\n\n for i in range (0,band_tot):\n band = filehandle.GetRasterBand(i+1)\n scale = band.GetScale()\n nodata = band.GetNoDataValue()\n dataset = band.ReadAsArray()\n dataset [dataset==nodata]= np.nan\n if band_tot > 1:\n data3d[:,:,i] = dataset\n else:\n data3d = dataset\n\n return data3d, xsize, ysize, geotransform, geoproj\n\n\n\ndef writeGeotiffSingleBand(filename, geotransform, geoprojection, data, nodata=np.nan, BandName= \"\"):\n (x, y) = data.shape\n format = \"GTiff\"\n driver = gdal.GetDriverByName(format)\n # dst_datatype = gdal.GDT_Byte #byte\n dst_datatype = gdal.GDT_Float32\n dst_ds = driver.Create(\n filename, y, x, 1, dst_datatype, options=[\n 'COMPRESS=DEFLATE'])\n # sDATETIME= \"2013:04:30 12:00:00\"#The format is: \"YYYY:MM:DD HH:MM:SS\",\n # with hours like those on a 24-hour clock, and one space character\n # between the date and the time. The length of the string, including the\n # terminating NUL, is 20 bytes.\n #dst_ds.SetMetadata({'TIFFTAG_SOFTWARE': 'Hydra2D'})\n dst_ds.GetRasterBand(1).SetNoDataValue(nodata)\n dst_ds.GetRasterBand(1).WriteArray(data)\n dst_ds.GetRasterBand(1).SetDescription (BandName)\n dst_ds.SetGeoTransform(geotransform)\n dst_ds.SetProjection(geoprojection)\n return 1\n\n\ndef writeGeotiff(filename, geotransform, geoprojection, data, nodata=np.nan, BandNames=None, globalDescr=\"\"):\n\n dim = len(data.shape)\n\n if dim == 2:\n (x, y) = data.shape\n iNbands = 1\n else:\n (x, y, z) = data.shape\n iNbands = z\n\n\n format = \"GTiff\"\n driver = gdal.GetDriverByName(format)\n # dst_datatype = gdal.GDT_Byte #byte\n dst_datatype = gdal.GDT_Float32\n # dst_ds = driver.Create(filename,y,x,1,dst_datatype,options = [\n # 'COMPRESS=DEFLATE', 'PREDICTOR=3' ]) #incompatibility of PREDICTOR con\n # Geoserver 2.2.4 (Bolivia)\n dst_ds = driver.Create(filename, y,x,iNbands, dst_datatype, options=['COMPRESS=DEFLATE'])\n dst_ds.SetDescription(globalDescr)\n dst_ds.SetGeoTransform(geotransform)\n dst_ds.SetProjection(geoprojection)\n # write data\n if iNbands == 1:\n dst_ds.GetRasterBand(1).WriteArray(data[:, :])\n dst_ds.GetRasterBand(1).SetNoDataValue(nodata)\n if BandNames != None:\n dst_ds.GetRasterBand(1).SetDescription (BandNames)\n else:\n for i in range(0, iNbands):\n dst_ds.GetRasterBand(1).SetNoDataValue(nodata)\n dst_ds.GetRasterBand(i + 1).WriteArray(data[:, :, i])\n if BandNames != None:\n dst_ds.GetRasterBand(i + 1).SetDescription (BandNames[i])\n return 1\n\n\ndef writeGeotiffSingleBandByte(filename, geotransform, geoprojection, data, descr = \"\"):\n (x, y) = data.shape\n format = \"GTiff\"\n driver = gdal.GetDriverByName(format)\n # dst_datatype = gdal.GDT_Byte #byte\n dst_datatype = gdal.GDT_Byte\n dst_ds = driver.Create(\n filename, y, x, 1, dst_datatype, options=['COMPRESS=DEFLATE'])\n dst_ds.SetMetadata({'TIFFTAG_IMAGEDESCRIPTION': descr})\n dst_ds.GetRasterBand(1).WriteArray(data)\n dst_ds.SetGeoTransform(geotransform)\n dst_ds.SetProjection(geoprojection)\n return 1", "sub_path": "indexes/CDI/geotiff.py", "file_name": "geotiff.py", "file_ext": "py", "file_size_in_byte": 4140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "gdal.Open", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 46, "usage_type": "attribute"}, {"api_name": "gdal.GetDriverByName", "line_number": 49, "usage_type": "call"}, {"api_name": "gdal.GDT_Float32", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 68, "usage_type": "attribute"}, {"api_name": "gdal.GetDriverByName", "line_number": 81, "usage_type": "call"}, {"api_name": "gdal.GDT_Float32", "line_number": 83, "usage_type": "attribute"}, {"api_name": "gdal.GetDriverByName", "line_number": 109, "usage_type": "call"}, {"api_name": "gdal.GDT_Byte", "line_number": 111, "usage_type": "attribute"}]} +{"seq_id": "610677958", "text": "\nfrom flask import Flask, render_template, request\nimport pandas as pd\nimport json\nimport plotly\nimport plotly.express as px\n\nimport csv, re, operator\n# from textblob import TextBlob\n\napp = Flask(__name__)\n\nperson = {\n 'first_name': 'Nohossat',\n 'last_name' : 'TRAORE',\n 'address' : '9 rue Léon Giraud · PARIS · FRANCE',\n 'job': 'Web developer',\n 'tel': '0678282923',\n 'email': 'nohossat.tra@yahoo.com',\n 'description' : 'Suite à une expérience internationale en développement web et dans le domaine des arts, l’impact de l’intelligence artificielle dans nos vies me surprend de jour en jour. \\n Aujourd’hui, je souhaite changer de cap et comprendre les secrets que recèlent nos données. J’aimerais mettre à profit ces découvertes au service des entreprises/associations à dimension sociale.',\n 'social_media' : [\n {\n 'link': 'https://www.facebook.com/nono',\n 'icon' : 'fa-facebook-f'\n },\n {\n 'link': 'https://github.com/nono',\n 'icon' : 'fa-github'\n },\n {\n 'link': 'linkedin.com/in/nono',\n 'icon' : 'fa-linkedin-in'\n },\n {\n 'link': 'https://twitter.com/nono',\n 'icon' : 'fa-twitter'\n }\n ],\n 'img': 'img/img_nono.jpg',\n 'experiences' : [\n {\n 'title' : 'Web Developer',\n 'company': 'AZULIK',\n 'description' : 'Project manager and lead developer for several AZULIK websites.',\n 'timeframe' : 'July 2018 - November 2019'\n },\n {\n 'title' : 'Freelance Web Developer',\n 'company': 'Independant',\n 'description' : 'Create Wordpress websites for small and medium companies. ',\n 'timeframe' : 'February 2017 - Present'\n },\n {\n 'title' : 'Sharepoint Intern',\n 'company': 'ALTEN',\n 'description' : 'Help to manage a 600 Sharepoint sites platform (audit, migration to Sharepoint newer versions)',\n 'timeframe' : 'October 2015 - October 2016'\n }\n ],\n 'education' : [\n {\n 'university': 'Paris Diderot',\n 'degree': 'Projets informatiques et Startégies d\\'entreprise (PISE)',\n 'description' : 'Gestion de projets IT, Audit, Programmation',\n 'mention' : 'Bien',\n 'timeframe' : '2015 - 2016'\n },\n {\n 'university': 'Paris Dauphine',\n 'degree': 'Master en Management global',\n 'description' : 'Fonctions supports (Marketing, Finance, Ressources Humaines, Comptabilité)',\n 'mention' : 'Bien',\n 'timeframe' : '2015'\n },\n {\n 'university': 'Lycée Turgot - Paris Sorbonne',\n 'degree': 'CPGE Economie & Gestion',\n 'description' : 'Préparation au concours de l\\'ENS Cachan, section Economie',\n 'mention' : 'N/A',\n 'timeframe' : '2010 - 2012'\n }\n ],\n 'programming_languages' : {\n 'HMTL' : ['fa-html5', '100'],\n 'CSS' : ['fa-css3-alt', '100'],\n 'SASS' : ['fa-sass', '90'],\n 'JS' : ['fa-js-square', '90'],\n 'Wordpress' : ['fa-wordpress', '80'],\n 'Python': ['fa-python', '70'],\n 'Mongo DB' : ['fa-database', '60'],\n 'MySQL' : ['fa-database', '60'],\n 'NodeJS' : ['fa-node-js', '50']\n },\n 'languages' : {'French' : 'Native', 'English' : 'Professional', 'Spanish' : 'Professional', 'Italian' : 'Limited Working Proficiency'},\n 'interests' : ['Dance', 'Travel', 'Languages']\n}\n\n@app.route('/')\ndef cv(person=person):\n return render_template('index.html', person=person)\n\n\n\n\n@app.route('/callback', methods=['POST', 'GET'])\ndef cb():\n\treturn gm(request.args.get('data'))\n\n@app.route('/chart')\ndef index():\n\treturn render_template('chartsajax.html', graphJSON=gm())\n\ndef gm(country='United Kingdom'):\n\tdf = pd.DataFrame(px.data.gapminder())\n\n\tfig = px.line(df[df['country']==country], x=\"year\", y=\"gdpPercap\")\n\n\tgraphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\treturn graphJSON\n\n\n@app.route('/senti')\ndef main():\n\ttext = \"\"\n\tvalues = {\"positive\": 0, \"negative\": 0, \"neutral\": 0}\n\n\twith open('ask_politics.csv', 'rt') as csvfile:\n\t\treader = csv.DictReader(csvfile, delimiter=',', quotechar='\"')\n\t\tfor idx, row in enumerate(reader):\n\t\t\tif idx > 0 and idx % 2000 == 0:\n\t\t\t\tbreak\n\t\t\tif 'text' in row:\n\t\t\t\tnolinkstext = re.sub(r'''(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))''', '', row['text'], flags=re.MULTILINE)\n\t\t\t\ttext = nolinkstext\n\n\t\t\tblob = TextBlob(text)\n\t\t\tfor sentence in blob.sentences:\n\t\t\t\tsentiment_value = sentence.sentiment.polarity\n\t\t\t\tif sentiment_value >= -0.1 and sentiment_value <= 0.1:\n\t\t\t\t\tvalues['neutral'] += 1\n\t\t\t\telif sentiment_value < 0:\n\t\t\t\t\tvalues['negative'] += 1\n\t\t\t\telif sentiment_value > 0:\n\t\t\t\t\tvalues['positive'] += 1\n\n\tvalues = sorted(values.items(), key=operator.itemgetter(1))\n\ttop_ten = list(reversed(values))\n\tif len(top_ten) >= 11:\n\t\ttop_ten = top_ten[1:11]\n\telse :\n\t\ttop_ten = top_ten[0:len(top_ten)]\n\n\ttop_ten_list_vals = []\n\ttop_ten_list_labels = []\n\tfor language in top_ten:\n\t\ttop_ten_list_vals.append(language[1])\n\t\ttop_ten_list_labels.append(language[0])\n\n\tgraph_values = [{\n\t\t\t\t\t'labels': top_ten_list_labels,\n\t\t\t\t\t'values': top_ten_list_vals,\n\t\t\t\t\t'type': 'pie',\n\t\t\t\t\t'insidetextfont': {'color': '#FFFFFF',\n\t\t\t\t\t\t\t\t\t\t'size': '14',\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t'textfont': {'color': '#FFFFFF',\n\t\t\t\t\t\t\t\t\t\t'size': '14',\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t}]\n\n\tlayout = {'title': '意见挖掘'}\n\n\treturn render_template('sentiment.html', graph_values=graph_values, layout=layout)\n\n\nif __name__ == '__main__':\n app.run(debug= True,port=5000,threaded=True)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5873, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 114, "usage_type": "call"}, {"api_name": "plotly.express.data.gapminder", "line_number": 114, "usage_type": "call"}, {"api_name": "plotly.express.data", "line_number": 114, "usage_type": "attribute"}, {"api_name": "plotly.express", "line_number": 114, "usage_type": "name"}, {"api_name": "plotly.express.line", "line_number": 116, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 116, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 118, "usage_type": "call"}, {"api_name": "plotly.utils", "line_number": 118, "usage_type": "attribute"}, {"api_name": "csv.DictReader", "line_number": 128, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 133, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 133, "usage_type": "attribute"}, {"api_name": "operator.itemgetter", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "415247438", "text": "from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nimport uvicorn\n\nfrom app import db, ml, viz\n\ndescription = \"\"\"\nMISSION: Be a one-stop resource for users to receive the most accurate city information.\n\n\nCITYSPIRE APP:\nAn app that analyzes data from cities such as populations, cost of living,\\n\nrental rates, crime rates, park (walk score), and many other social \\n\nand economic factors that are important in deciding where someone would like to live.\\n\nThis app will present such important data in an intuitive and easy to understand interface.\\n\n\nUse data to find a place right for you to live.\n\"\"\"\n\napp = FastAPI(\n title=\"CITYSPIRE API\",\n description=description,\n docs_url=\"/\",\n)\n\napp.include_router(db.router, tags=[\"Database\"])\napp.include_router(ml.router, tags=[\"Machine Learning\"])\napp.include_router(viz.router, tags=[\"Visualization\"])\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await db.database.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await db.database.disconnect()\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app)\n", "sub_path": "app/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1246, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "fastapi.FastAPI", "line_number": 20, "usage_type": "call"}, {"api_name": "app.include_router", "line_number": 26, "usage_type": "call"}, {"api_name": "app.db.router", "line_number": 26, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 26, "usage_type": "name"}, {"api_name": "app.include_router", "line_number": 27, "usage_type": "call"}, {"api_name": "app.ml.router", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app.ml", "line_number": 27, "usage_type": "name"}, {"api_name": "app.include_router", "line_number": 28, "usage_type": "call"}, {"api_name": "app.viz.router", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app.viz", "line_number": 28, "usage_type": "name"}, {"api_name": "app.add_middleware", "line_number": 30, "usage_type": "call"}, {"api_name": "fastapi.middleware.cors.CORSMiddleware", "line_number": 31, "usage_type": "argument"}, {"api_name": "app.db.database.connect", "line_number": 41, "usage_type": "call"}, {"api_name": "app.db.database", "line_number": 41, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 41, "usage_type": "name"}, {"api_name": "app.on_event", "line_number": 39, "usage_type": "call"}, {"api_name": "app.db.database.disconnect", "line_number": 46, "usage_type": "call"}, {"api_name": "app.db.database", "line_number": 46, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 46, "usage_type": "name"}, {"api_name": "app.on_event", "line_number": 44, "usage_type": "call"}, {"api_name": "uvicorn.run", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "5359790", "text": "import os\r\nfrom email.parser import Parser\r\nclass TestingCorpus:\r\n '''Class of methods for test handling.'''\r\n def __init__(self, corpus_dir):\r\n self.corpus_dir = corpus_dir\r\n self.file_names_list = self.list_file_names()\r\n self.pos_tag = 'SPAM'\r\n self.neg_tag = 'OK'\r\n \r\n def list_file_names(self):\r\n '''Returns a list of files in the directory (ignores special files beginning with \"!\").'''\r\n file_list = []\r\n for fname in os.listdir(self.corpus_dir):\r\n if not fname.startswith('!'):\r\n file_list.append(fname)\r\n return file_list\r\n \r\n def file_as_lower_string(self,fname):\r\n '''Returns the required file as a string converted to lower chars.'''\r\n return self.file_as_string(fname).lower()\r\n \r\n def file_as_string(self, fname):\r\n '''Returns the required file as a string. Necessary for file_as_lower_string().'''\r\n file_path = os.path.join(self.corpus_dir, fname)\r\n with open(file_path, 'r', encoding='utf-8') as file:\r\n file_string = file.read()\r\n return file_string\r\n \r\n def parse_email(self, fname):\r\n '''Used for get important parts of the mail.'''\r\n parser = Parser()\r\n email = parser.parsestr(self.file_as_lower_string(fname))\r\n sender = self.get_sender(email)\r\n subject = self.get_subject(email)\r\n return (sender, subject)\r\n \r\n def get_sender(self, email):\r\n '''Returns the sender's address. Necessary for parse_email().'''\r\n sender = email['from']\r\n if sender == None:\r\n return 'none'\r\n if \"<\" in sender:\r\n strip_chars = ''\r\n for char in sender:\r\n if char == '<':\r\n break\r\n else:\r\n strip_chars += char\r\n sender = sender.lstrip(strip_chars)\r\n return sender.strip().strip('<>')\r\n \r\n def get_subject(self, email):\r\n '''Returns the subject. Necessary for parse_email().'''\r\n subject = email['subject']\r\n if subject == None:\r\n return 'none'\r\n return subject\r\n \r\n", "sub_path": "RPH/SPAM/testingcorpus.py", "file_name": "testingcorpus.py", "file_ext": "py", "file_size_in_byte": 2185, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "email.parser.Parser", "line_number": 32, "usage_type": "call"}, {"api_name": "email.parser", "line_number": 33, "usage_type": "name"}, {"api_name": "email.parser", "line_number": 34, "usage_type": "argument"}, {"api_name": "email.parser", "line_number": 35, "usage_type": "argument"}, {"api_name": "email.parser", "line_number": 40, "usage_type": "name"}, {"api_name": "email.parser", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "108492439", "text": "import pytest\nfrom Pages.login_page import Loginpage\nfrom selenium import webdriver\nfrom Utils import utils as utils\nimport allure\n\n\n#@pytest.mark.usefixtures(\"setup\")\nclass TestLogin():\n\n @pytest.fixture(scope=\"class\")\n def setup(self):\n global driver\n driver = webdriver.Chrome(\n executable_path=\"C:\\\\Users\\\\C5261196\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe\")\n driver.maximize_window()\n yield\n driver.close()\n\n def test_login_valid(self,setup):\n driver.get(utils.URL)\n login = Loginpage(driver)\n login.enter_email_textbox(utils.USERNAME)\n login.enter_password_textbox(utils.PASSWORD)\n login.click_login_button()\n", "sub_path": "PythonWebAutomation-master/Tests/login_test.py", "file_name": "login_test.py", "file_ext": "py", "file_size_in_byte": 712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 14, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 11, "usage_type": "call"}, {"api_name": "Utils.utils.URL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "Utils.utils", "line_number": 21, "usage_type": "name"}, {"api_name": "Pages.login_page.Loginpage", "line_number": 22, "usage_type": "call"}, {"api_name": "Utils.utils.USERNAME", "line_number": 23, "usage_type": "attribute"}, {"api_name": "Utils.utils", "line_number": 23, "usage_type": "name"}, {"api_name": "Utils.utils.PASSWORD", "line_number": 24, "usage_type": "attribute"}, {"api_name": "Utils.utils", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "584440933", "text": "\"\"\"\nCopyright 2021 BlazeMeter Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport json\nimport os\nfrom subprocess import PIPE\nfrom bzt import TaurusConfigError, ToolError\nfrom bzt.modules import ScenarioExecutor\nfrom bzt.modules.console import ExecutorWidget\nfrom bzt.modules.aggregator import ResultsReader, ConsolidatingAggregator\nfrom bzt.utils import RequiredTool, CALL_PROBLEMS, FileReader, shutdown_process, get_full_path, is_windows, is_mac, \\\n untar\n\n\nclass VegetaExecutor(ScenarioExecutor):\n def __init__(self):\n super(VegetaExecutor, self).__init__()\n self.output_file = None\n self.log_file = None\n self.script = None\n self.process = None\n self.vegeta = None\n self.kpi_file = None\n self.scenario = None\n\n def prepare(self):\n super(VegetaExecutor, self).prepare()\n self.scenario = self.get_scenario()\n self.install_required_tools()\n\n self.script = self.get_script_path()\n if not self.script:\n requests = self.scenario.get_requests()\n if not requests:\n raise TaurusConfigError(\"Either 'script' or 'scenario' should be present for Vegeta executor\")\n self.script = os.path.join(self.engine.artifacts_dir, \"vegeta.txt\")\n with open(self.script, \"w\") as f:\n i = 0\n for request in requests:\n f.write(\"{} {}\\n\".format(request.method, request.url))\n headers = \"\\n\".join([\"{}: {}\".format(key, value) for key, value in request.headers.items()])\n if headers:\n f.write(\"{}\\n\".format(headers))\n if request.body:\n json_body_file = os.path.join(self.engine.artifacts_dir, \"body-{}.json\".format(i))\n with open(json_body_file, \"w\") as g:\n g.write(json.dumps(request.body))\n f.write(\"@{}\\n\".format(json_body_file))\n f.write(\"\\n\")\n i += 1\n\n self.stdout = open(self.engine.create_artifact(\"Vegeta\", \".out\"), \"w\")\n self.stderr = open(self.engine.create_artifact(\"Vegeta\", \".err\"), \"w\")\n\n self.kpi_file = self.engine.create_artifact(\"kpi\", \".csv\")\n self.reader = VegetaLogReader(self.kpi_file, self.log)\n if isinstance(self.engine.aggregator, ConsolidatingAggregator):\n self.engine.aggregator.add_underling(self.reader)\n\n def startup(self):\n cmdline = [self.vegeta.tool_path, \"attack\", \"-targets\", self.script]\n load = self.get_load()\n\n if load.throughput:\n cmdline += ['-rate', str(load.throughput)]\n\n if load.hold:\n cmdline += ['-duration', str(int(load.hold)) + \"s\"]\n\n if load.concurrency:\n cmdline += ['-max-workers', str(int(load.concurrency))]\n\n if self.scenario and 'timeout' in self.scenario:\n cmdline += ['-timeout', str(int(self.scenario.get('timeout'))) + \"s\"]\n\n user_cmd = self.settings.get(\"cmdline\")\n if user_cmd:\n cmdline += user_cmd.split(\" \")\n\n self.process = self._execute(cmdline, stdout=PIPE, shell=False)\n with open(self.kpi_file, 'wb') as f:\n self._execute([self.vegeta.tool_path, \"encode\", \"-to=csv\"], stdin=self.process.stdout, stdout=f, shell=False)\n\n def get_widget(self):\n if not self.widget:\n label = \"%s\" % self\n self.widget = ExecutorWidget(self, \"Vegeta: \" + label.split('/')[1])\n return self.widget\n\n def check(self):\n retcode = self.process.poll()\n if retcode is not None:\n ToolError(f\"Vegeta tool exited with non-zero code: {retcode}\")\n return True\n return False\n\n def shutdown(self):\n shutdown_process(self.process, self.log)\n\n def post_process(self):\n if self.kpi_file:\n self.engine.existing_artifact(self.kpi_file)\n super(VegetaExecutor, self).post_process()\n\n def install_required_tools(self):\n self.vegeta = self._get_tool(Vegeta, config=self.settings)\n self.vegeta.tool_name = self.vegeta.tool_name.lower()\n if not self.vegeta.check_if_installed():\n self.vegeta.install()\n\n def resource_files(self):\n return [self.get_script_path(required=True)]\n\n\nclass VegetaLogReader(ResultsReader):\n def __init__(self, filename, parent_logger):\n super(VegetaLogReader, self).__init__()\n self.log = parent_logger.getChild(self.__class__.__name__)\n self.file = FileReader(filename=filename, parent_logger=self.log)\n\n def _read(self, last_pass=False):\n lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)\n\n for line in lines:\n log_vals = [val.strip() for val in line.split(',')]\n\n _tstamp = int(log_vals[0][:10])\n _url = log_vals[10]\n _concur = 1\n _etime = float(log_vals[2]) / 1000000000.0\n _con_time = 0\n _latency = 0\n _rstatus = log_vals[1]\n _error = log_vals[5] or None\n _bytes = int(log_vals[4])\n\n yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _bytes\n\n\nclass Vegeta(RequiredTool):\n DOWNLOAD_LINK = \\\n \"https://github.com/tsenart/vegeta/releases/download/v{version}/vegeta_{version}_{platform}_amd64.tar.gz \"\n VERSION = \"12.8.4\"\n LOCAL_PATH = \"~/.bzt/vegeta-taurus/{version}/\"\n\n def __init__(self, config=None, **kwargs):\n settings = config or {}\n version = settings.get(\"version\", self.VERSION)\n self.tool_path = get_full_path(settings.get(\"path\", self.LOCAL_PATH.format(version=version) + 'vegeta'))\n if not is_windows():\n platform = 'darwin' if is_mac() else 'linux'\n download_link = settings.get(\"download-link\", self.DOWNLOAD_LINK).format(version=version, platform=platform)\n else:\n download_link = ''\n super(Vegeta, self).__init__(tool_path=self.tool_path, download_link=download_link, version=version, **kwargs)\n\n def check_if_installed(self):\n self.log.debug('Checking Vegeta Framework: %s' % self.tool_path)\n try:\n out, err = self.call([self.tool_path, '-version'])\n except CALL_PROBLEMS as exc:\n self.log.warning(\"%s check failed: %s\", self.tool_name, exc)\n return False\n\n if err:\n out += err\n self.log.debug(\"Vegeta output: %s\", out)\n return True\n\n def install(self):\n if is_windows():\n raise ToolError(\"Unable to install Vegeta on Windows! Manual installation required.\")\n\n dest = get_full_path(self.tool_path, step_up=1)\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n self.log.info(\"Will install %s into %s\", self.tool_name, dest)\n vegeta_dist = self._download(use_link=True)\n\n self.log.info(\"Untaring %s\", vegeta_dist)\n untar(vegeta_dist, dest, rel_path='vegeta')\n os.remove(vegeta_dist)\n os.chmod(get_full_path(self.tool_path), 0o755)\n self.log.info(\"Installed Vegeta successfully\")\n\n if not self.check_if_installed():\n raise ToolError(\"Unable to run %s after installation!\" % self.tool_name)\n", "sub_path": "bzt/modules/vegeta.py", "file_name": "vegeta.py", "file_ext": "py", "file_size_in_byte": 7796, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "bzt.modules.ScenarioExecutor", "line_number": 27, "usage_type": "name"}, {"api_name": "bzt.TaurusConfigError", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "bzt.modules.aggregator.ConsolidatingAggregator", "line_number": 69, "usage_type": "argument"}, {"api_name": "subprocess.PIPE", "line_number": 92, "usage_type": "name"}, {"api_name": "bzt.modules.console.ExecutorWidget", "line_number": 99, "usage_type": "call"}, {"api_name": "bzt.ToolError", "line_number": 105, "usage_type": "call"}, {"api_name": "bzt.utils.shutdown_process", "line_number": 110, "usage_type": "call"}, {"api_name": "bzt.modules.aggregator.ResultsReader", "line_number": 127, "usage_type": "name"}, {"api_name": "bzt.utils.FileReader", "line_number": 131, "usage_type": "call"}, {"api_name": "bzt.utils.RequiredTool", "line_number": 152, "usage_type": "name"}, {"api_name": "bzt.utils.get_full_path", "line_number": 161, "usage_type": "call"}, {"api_name": "bzt.utils.is_windows", "line_number": 162, "usage_type": "call"}, {"api_name": "bzt.utils.is_mac", "line_number": 163, "usage_type": "call"}, {"api_name": "bzt.utils.CALL_PROBLEMS", "line_number": 173, "usage_type": "name"}, {"api_name": "bzt.utils.is_windows", "line_number": 183, "usage_type": "call"}, {"api_name": "bzt.ToolError", "line_number": 184, "usage_type": "call"}, {"api_name": "bzt.utils.get_full_path", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 188, "usage_type": "call"}, {"api_name": "bzt.utils.untar", "line_number": 194, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 195, "usage_type": "call"}, {"api_name": "os.chmod", "line_number": 196, "usage_type": "call"}, {"api_name": "bzt.utils.get_full_path", "line_number": 196, "usage_type": "call"}, {"api_name": "bzt.ToolError", "line_number": 200, "usage_type": "call"}]} +{"seq_id": "189932280", "text": "# -*- mode: python; coding: utf-8 -*-\n# Copyright 2018 the HERA Collaboration\n# Licensed under the 2-clause BSD license.\n\n\"\"\"Some low-level configuration management utility functions.\n\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os.path\nimport subprocess\nimport six\nfrom astropy.time import Time\nfrom astropy.time import TimeDelta\nimport datetime\n\nfrom . import mc\n\nPAST_DATE = '2000-01-01'\nall_hera_zone_prefixes = ['HH', 'HA', 'HB'] # This is for hookup_cache to get all\ndefault_station_prefixes = ['HH', 'HA', 'HB'] # This is for defaults for sys etc.\n\n\ndef get_cm_repo_git_hash(mc_config_path=None, cm_csv_path=None):\n \"\"\"\n Get the current cm_version for recording with antenna locations.\n \"\"\"\n if cm_csv_path is None:\n cm_csv_path = mc.get_cm_csv_path(mc_config_file=mc_config_path)\n if cm_csv_path is None:\n raise ValueError('No cm_csv_path defined in mc_config file.')\n\n git_hash = subprocess.check_output(['git', '-C', cm_csv_path, 'rev-parse', 'HEAD'],\n stderr=subprocess.STDOUT).strip()\n return git_hash\n\n\ndef log(msg, **kwargs):\n fp = open(mc.cm_log_file, 'a')\n dt = Time.now()\n fp.write('-------------------' + str(dt.datetime) + ' ' + msg\n + '-------------------\\n\\n')\n for key, value in kwargs.items():\n if key == 'args':\n fp.write('--args\\n\\t')\n vargs = vars(value)\n for k, v in vargs.items():\n fp.write(str(k) + ': ' + str(v) + '; ')\n fp.write('\\n\\n')\n elif key == 'data_dict':\n fp.write('--data\\n\\t')\n for k, v in value.items():\n fp.write(' ' + k + ' ')\n for d in v:\n fp.write(str(d) + '; ')\n fp.write('\\n\\n')\n else:\n fp.write('--other\\n\\t')\n fp.write(str(key) + ': ' + str(value) + '\\n\\n')\n fp.close()\n\n\n# #######################################Key stuff\ndef make_part_key(hpn, rev):\n return \":\".join([hpn, rev]).strip()\n\n\ndef split_part_key(key):\n return key.split(':')[0], key.split(':')[1]\n\n\ndef make_connection_key(hpn, rev, port, start_gps):\n return \":\".join([hpn, rev, port, str(start_gps)]).strip()\n\n\ndef split_connection_key(key):\n ks = key.split(':')\n return ks[0], ks[1], ks[2], ks[3]\n\n\ndef stringify(X):\n if X is None:\n return None\n if isinstance(X, six.string_types):\n return X\n if isinstance(X, list):\n return ','.join(X)\n return str(X)\n\n\ndef listify(X):\n if X is None:\n return None\n if isinstance(X, six.string_types) and ',' in X:\n return X.split(',')\n if isinstance(X, list):\n return X\n return [X]\n\n\ndef add_verbosity_args(parser):\n \"\"\"Add a standardized \"--verbosity\" argument to an ArgParser object. Supported\n values are \"l\", \"m\", and \"h\", which presumably stand for \"low\", \"medium\",\n and \"high\".\n\n The function name is plural because it's conceivable that in the future we might\n want to provide multiple arguments related to this general topic.\n\n \"\"\"\n parser.add_argument('-v', '--verbosity', help=\"Verbosity level: 'l', 'm', or 'h'. [l].\",\n choices=['l', 'm', 'h'], default=\"m\")\n\n\n# ##############################################DATE STUFF\ndef add_date_time_args(parser):\n \"\"\"Add standardized \"--date\" and \"--time\" arguments to an ArgParser object.\n Their values should then be converted into a Python DateTime object using\n the function `get_astropytime`.\n\n \"\"\"\n parser.add_argument(\n '--date', help=\"UTC YYYY/MM/DD or '<' or '>' or 'n/a' or 'now' [now]\",\n default='now')\n parser.add_argument(\n '--time', help=\"UTC hh:mm or float (hours), must include --date if use --time\", default=0.0)\n\n\ndef is_active(at_date, start_date, stop_date):\n at_date = get_astropytime(at_date)\n start_date = get_astropytime(start_date)\n stop_date = get_stopdate(stop_date)\n return at_date >= start_date and at_date <= stop_date\n\n\ndef future_date():\n \"\"\"\n Future is defined here, since defining a far FUTURE_DATE typically gives a\n warning about UTC vs UT1 etc\n \"\"\"\n return Time.now() + TimeDelta(300, format='jd')\n\n\ndef get_stopdate(stop_date):\n if stop_date is None:\n return future_date()\n return get_astropytime(stop_date)\n\n\ndef get_time_for_display(display):\n \"\"\"\n Provide a reader-friendly time string for any time parse-able by get_astropytime -\n if that results in None, then the string None is displayed.\n \"\"\"\n d = get_astropytime(display)\n\n if d is None:\n d = 'None'\n elif isinstance(d, Time):\n d = \"{:%Y-%m-%d %H:%M:%S}\".format(d.datetime)\n return d\n\n\ndef get_astropytime(_date, _time=0):\n \"\"\"\n Take in various incarnations of _date/_time and return an astropy.Time object or None.\n No time zone is allowed.\n\n Returns: either astropy Time or None\n\n Parameters:\n -----------\n _date: date in various formats:\n return astropy Time\n astropy Time: just gets returned\n datetime: just gets converted\n int, long, float: interpreted as gps_second\n string: '<' - PAST_DATE\n '>' - future_date()\n 'now' or 'current'\n 'YYYY/M/D' or 'YYYY-M-D'\n return None:\n string: 'none' return None\n None/False: return None\n _time: only used if _date is 'YYYY/M/D'/'YYYY-M-D' string\n float, int: hours in decimal time\n string: HH[:MM[:SS]]\n \"\"\"\n\n if isinstance(_date, Time):\n return _date\n if isinstance(_date, datetime.datetime):\n return Time(_date, format='datetime')\n if _date is None or _date is False:\n return None\n if isinstance(_date, (six.integer_types, float)):\n if int(_date) > 1000000000:\n return Time(_date, format='gps')\n raise ValueError('Invalid format: date as a number should be gps time, not {}.'.format(_date))\n if isinstance(_date, str):\n if _date == '<':\n return Time(PAST_DATE, scale='utc')\n if _date == '>':\n return future_date()\n if _date.lower() == 'now' or _date.lower() == 'current':\n return Time.now()\n if _date.lower() == 'none':\n return None\n _date = _date.replace('/', '-')\n try:\n return_date = Time(_date, scale='utc')\n except ValueError:\n raise ValueError('Invalid format: date should be YYYY/M/D or YYYY-M-D, not {}'.format(_date))\n if isinstance(_time, (float, int)):\n return return_date + TimeDelta(_time * 3600.0, format='sec')\n if isinstance(_time, str):\n add_time = 0.0\n for i, d in enumerate(_time.split(':')):\n if i > 2:\n raise ValueError('Time can only be hours[:minutes[:seconds]], not {}.'.format(_time))\n add_time += (float(d)) * 3600.0 / (60.0**i)\n return return_date + TimeDelta(add_time, format='sec')\n raise ValueError('Invalid format: time should be H[:M[:S]] (ints or floats)')\n\n raise TypeError(\"Not supported: type {}\".format(type(_date)))\n\n\ndef put_keys_in_numerical_order(keys):\n \"\"\"\n Takes a list of hookup keys in the format of prefix+number:revision and puts them in number order.\n Returns the ordered list of keys\n \"\"\"\n keylib = {}\n n = None\n for k in keys:\n colon = k.find(':')\n for i in range(len(k)):\n try:\n n = int(k[i:colon])\n break\n except ValueError:\n continue\n if n is None or n in keylib.keys():\n return keys\n keylib[n] = [k[:i], k[colon:]]\n if not len(keylib.keys()):\n return keys\n keyordered = []\n for n in sorted(keylib.keys()):\n kre = keylib[n][0] + str(n) + keylib[n][1]\n keyordered.append(kre)\n return keyordered\n\n\ndef get_date_from_pair(d1, d2, ret='earliest'):\n \"\"\"\n Returns either the earliest or latest of two dates. This handles either ordering\n and when either or both are None.\n \"\"\"\n if d1 is None and d2 is None:\n return None\n if ret == 'earliest':\n if d1 is None:\n return d2\n elif d2 is None:\n return d1\n else:\n return d1 if d1 < d2 else d2\n elif ret == 'latest':\n if d1 is None:\n return d1\n elif d2 is None:\n return d2\n else:\n return d1 if d1 > d2 else d2\n else:\n raise ValueError(\"Must supply earliest/latest.\")\n\n\ndef query_default(a, args):\n vargs = vars(args)\n default = vargs[a]\n s = '%s [%s]: ' % (a, str(default))\n v = raw_input(s)\n if len(v) == 0:\n v = default\n elif v.lower() == 'none':\n v = None\n return v\n\n\ndef query_yn(s, default='y'):\n if default:\n s += ' [' + default + ']'\n s += ': '\n ans = raw_input(s)\n if len(ans) == 0 and default:\n ans = default.lower()\n elif len(ans) > 0:\n ans = ans.lower()\n else:\n print('No answer provided.')\n ans = query_yn(s)\n return ans[0] == 'y'\n", "sub_path": "hera_mc/cm_utils.py", "file_name": "cm_utils.py", "file_ext": "py", "file_size_in_byte": 9350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "subprocess.check_output", "line_number": 34, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 35, "usage_type": "attribute"}, {"api_name": "astropy.time.Time.now", "line_number": 41, "usage_type": "call"}, {"api_name": "astropy.time.Time", "line_number": 41, "usage_type": "name"}, {"api_name": "six.string_types", "line_number": 85, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 95, "usage_type": "attribute"}, {"api_name": "astropy.time.Time.now", "line_number": 141, "usage_type": "call"}, {"api_name": "astropy.time.Time", "line_number": 141, "usage_type": "name"}, {"api_name": "astropy.time.TimeDelta", "line_number": 141, "usage_type": "call"}, {"api_name": "astropy.time.Time", "line_number": 159, "usage_type": "argument"}, {"api_name": "astropy.time.Time", "line_number": 190, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 192, "usage_type": "attribute"}, {"api_name": "astropy.time.Time", "line_number": 193, "usage_type": "call"}, {"api_name": "six.integer_types", "line_number": 196, "usage_type": "attribute"}, {"api_name": "astropy.time.Time", "line_number": 198, "usage_type": "call"}, {"api_name": "astropy.time.Time", "line_number": 202, "usage_type": "call"}, {"api_name": "astropy.time.Time.now", "line_number": 206, "usage_type": "call"}, {"api_name": "astropy.time.Time", "line_number": 206, "usage_type": "name"}, {"api_name": "astropy.time.Time", "line_number": 211, "usage_type": "call"}, {"api_name": "astropy.time.TimeDelta", "line_number": 215, "usage_type": "call"}, {"api_name": "astropy.time.TimeDelta", "line_number": 222, "usage_type": "call"}]} +{"seq_id": "465121378", "text": "from typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n t = 0\n i = 1\n while i < len(nums):\n if nums[i] == nums[i-1]:\n t += 1\n else:\n t = 0\n if t >= 2:\n nums.pop(i)\n t -= 1\n else:\n i += 1\n return len(nums)\n\n\nif __name__ == \"__main__\":\n solu = Solution()\n nums = [1, 1, 1, 2, 2, 3]\n print(solu.removeDuplicates(nums))", "sub_path": "remove_duplicates2.py", "file_name": "remove_duplicates2.py", "file_ext": "py", "file_size_in_byte": 514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "322541390", "text": "''' Flexible upsampling and merging module that allows for the use\nof transposed convolutions or bilinear upsampling. Allows for the\noptional inclusion of skip layers as in the original FCN.\n'''\nimport torch.nn as nn\n\n# pylint: disable=too-many-arguments, too-many-locals, unidiomatic-typecheck,\n# pylint: disable=invalid-name,arguments-differ\n\n\nclass Upsampling(nn.Module):\n ''' Defines a decoder.\n '''\n def __init__(\n self,\n stride,\n inchannels,\n channels,\n mode,\n feature_ind,\n merge_which_skips):\n '''\n args:\n :parameter ``stride``: the outstride of the network or total\n downsampling rate\n :parameter ``inchannels``: list() of ints or None's which\n state the width of the coarse features to be added to the pred\n :parameter ``channels``: list() or int of\n '''\n super(Upsampling, self).__init__()\n import math\n num_up = math.log(stride, 2)\n assert num_up % 1 == 0, 'stride must be a power of 2'\n assert len(inchannels) == len(merge_which_skips)\n assert type(channels) in [int, list]\n\n num_up = int(num_up)\n if isinstance(channels, int):\n channels = [channels for _ in range(num_up)]\n if mode == 'bilinear':\n # We don't currently support changes in width for the output\n # features using bilinear interpolation\n for i, elem in enumerate(channels[1:]):\n assert channels[i - 1] == elem\n\n # self.mode = mode\n # self.stride = stride\n merging = len(merge_which_skips) > 0\n layers = []\n merge_conv = []\n _inchannels = [el for el in inchannels]\n _inchannels.reverse()\n channels_in = channels[0]\n for i in range(num_up):\n # Each layer doubles spatial resolution\n if mode == 'transposed':\n layers.append(nn.ConvTranspose2d(\n channels_in, channels[i + 1],\n 3, stride=2, padding=1,\n output_padding=1))\n\n elif merging and mode == 'bilinear':\n layers.append(\n nn.Upsample(\n scale_factor=2,\n mode='bilinear'))\n\n if i in merge_which_skips:\n merge_conv.append(\n nn.Conv2d(\n _inchannels.pop(),\n channels[i],\n 1,\n padding=0))\n else:\n merge_conv.append(None)\n channels_in = channels[i + 1]\n\n if num_up in merge_which_skips:\n merge_conv.append(\n nn.Conv2d(\n _inchannels.pop(),\n channels[num_up],\n 1,\n padding=0))\n else:\n merge_conv.append(None)\n\n if not merging and mode == 'bilinear':\n layers = [\n nn.Upsample(\n scale_factor=stride,\n mode='bilinear')]\n\n self.layers = nn.ModuleList(layers)\n self.merge_conv = nn.ModuleList(merge_conv)\n self.feature_ind = [ind for ind in feature_ind]\n self.merge_features = [None for _ in range(num_up+1)]\n self.merge_which_skips = list(merge_which_skips)\n self.merge_which_skips.sort()\n self.feature_ind.sort(reverse=True)\n\n def forward(self, x):\n # import pdb\n # pdb.set_trace()\n assert len(x) == 3\n x, low, outsize = x\n for layer, feat in zip(self.merge_which_skips, self.feature_ind):\n self.merge_features[layer] = low[feat]\n\n ul_len = len(self.layers)\n # Merge after upsampling\n for i, layer in enumerate(self.layers):\n if self.merge_conv[i] is not None:\n # low_x = self.merge_conv[i](low[i])\n low_x = self.merge_conv[i](self.merge_features[i])\n x = x + low_x\n\n if i + 1 < ul_len and self.merge_conv[i + 1] is not None:\n sz = self.merge_features[i + 1].size()[-2:]\n x = layer(x, output_size=sz)\n elif i + 1 == ul_len:\n x = layer(x, output_size=outsize)\n else:\n x = layer(x)\n\n # Merge at original spatial res\n i = len(self.layers)\n if self.merge_features[i] is not None:\n low_x = self.merge_conv[i](self.merge_features[i])\n x = low_x + x\n\n return x\n", "sub_path": "pytorch_segmentation/models/upsampling.py", "file_name": "upsampling.py", "file_ext": "py", "file_size_in_byte": 4616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "math.log", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Upsample", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.Upsample", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "317593339", "text": "from __future__ import division\nfrom builtins import str\nfrom builtins import range\nfrom builtins import object\nfrom past.utils import old_div\nimport copy\nimport datetime\nimport random\nfrom contextlib import contextmanager\n\nimport pytest\nimport pytest_twisted\nimport pytz\nfrom mock import MagicMock, Mock, PropertyMock, call, patch\nfrom twisted.internet import defer\nfrom twisted.internet.task import Clock\n\nfrom cardinal import util\nfrom cardinal.bot import CardinalBot, user_info\nfrom cardinal.unittest_util import get_mock_db\nfrom plugins.ticker import plugin\nfrom plugins.ticker.plugin import (\n TickerPlugin,\n colorize,\n get_delta,\n)\n\n\ndef get_fake_now(market_is_open=True):\n tz = pytz.timezone('America/New_York')\n fake_now = datetime.datetime.now(tz)\n if market_is_open:\n # Ensure it is open\n fake_now = fake_now.replace(hour=10)\n while fake_now.weekday() >= 5:\n fake_now = fake_now - datetime.timedelta(days=1)\n else:\n # Ensure it is closed\n fake_now = fake_now.replace(hour=18)\n\n return fake_now\n\n\n@contextmanager\ndef mock_api(response,\n fake_now=None,\n raise_times=0,\n throttle_times=0):\n fake_now = fake_now or get_fake_now()\n responses = copy.deepcopy(response) \\\n if isinstance(response, list) else \\\n [copy.deepcopy(response)]\n\n response_mock = MagicMock()\n type(response_mock).status_code = PropertyMock(return_value=200)\n\n # hack since nonlocal doesn't exist in py2\n context = {'raise_times': raise_times, 'throttle_times': throttle_times}\n\n def mock_deferToThread(*args, **kwargs):\n if context['raise_times'] > 0:\n context['raise_times'] -= 1\n raise Exception('mock exception')\n\n elif context['throttle_times'] > 0:\n context['throttle_times'] -= 1\n response_mock.json.return_value = make_throttle_response()\n\n else:\n response_mock.json.return_value = responses.pop(0)\n\n return response_mock\n\n with patch.object(plugin, 'deferToThread') as mock_defer, \\\n patch.object(plugin, 'est_now', return_value=fake_now):\n mock_defer.side_effect = mock_deferToThread\n\n yield mock_defer\n\n\ndef make_throttle_response():\n return {\n \"Note\": \"Thank you for using Alpha Vantage! Our standard API call \"\n \"frequency is 5 calls per minute and 500 calls per day. \"\n \"Please visit https://www.alphavantage.co/premium/ if you \"\n \"would like to target a higher API call frequency.\"\n }\n\n\ndef make_global_quote_response(symbol,\n open_=None,\n close=None,\n previous_close=None,\n latest_trading_day=None,\n ):\n if latest_trading_day is None:\n latest_trading_day = datetime.datetime.today()\n\n open_ = open_ or \\\n random.randrange(95, 105) + random.random()\n high = random.randrange(106, 110) + random.random()\n low = random.randrange(90, 94) + random.random()\n close = close or \\\n random.randrange(95, 105) + random.random()\n previous_close = previous_close or \\\n random.randrange(95, 105) + random.random()\n volume = random.randrange(100000, 999999)\n\n change = close - previous_close\n change_percent = old_div(1.0 * close, previous_close) * 100 - 100\n\n return {\n \"Global Quote\": {\n \"01. symbol\": symbol.upper(),\n \"02. open\": \"{:.4f}\".format(open_),\n \"03. high\": \"{:.4f}\".format(high),\n \"04. low\": \"{:.4f}\".format(low),\n \"05. price\": \"{:.4f}\".format(close),\n \"06. volume\": \"{}\".format(volume),\n \"07. latest trading day\": latest_trading_day.strftime('%Y-%m-%d'),\n \"08. previous close\": \"{:.4f}\".format(previous_close),\n \"09. change\": \"{:.4f}\".format(change),\n \"10. change percent\": \"{:.4f}%\".format(change_percent),\n },\n }\n\n\ndef make_time_series_daily_response(symbol,\n last_open=None,\n last_close=None,\n previous_close=None,\n start=None):\n \"\"\"Mock response for TIME_SERIES_DAILY API\"\"\"\n\n last_market_day = start or datetime.datetime.today()\n while last_market_day.weekday() >= 5:\n last_market_day = last_market_day - datetime.timedelta(days=1)\n\n previous_market_day = last_market_day - datetime.timedelta(days=1)\n while previous_market_day.weekday() >= 5:\n previous_market_day = previous_market_day - datetime.timedelta(days=1)\n\n def make_random_response():\n open_ = random.randrange(95, 105) + random.random()\n high = random.randrange(106, 110) + random.random()\n low = random.randrange(90, 94) + random.random()\n close = random.randrange(95, 105) + random.random()\n volume = random.randrange(100000, 999999)\n return {\n \"1. open\": \"{:.4f}\".format(open_),\n \"2. high\": \"{:.4f}\".format(high),\n \"3. low\": \"{:.4f}\".format(low),\n \"4. close\": \"{:.4f}\".format(close),\n \"5. volume\": \"{}\".format(volume)\n }\n\n time_series_daily = {}\n market_day = last_market_day # this changes each iteration\n for days in range(0, 60):\n # don't add data for weekends\n if market_day.weekday() < 5:\n response = make_random_response()\n\n # Override last open / last close for testing\n if market_day == last_market_day:\n if last_open:\n response[\"1. open\"] = \"{:.4f}\".format(last_open)\n if last_close:\n response[\"4. close\"] = \"{:.4f}\".format(last_close)\n if market_day == previous_market_day and previous_close:\n response[\"4. close\"] = \"{:.4f}\".format(previous_close)\n\n time_series_daily[market_day.strftime(\"%Y-%m-%d\")] = response\n market_day = market_day - datetime.timedelta(days=1)\n\n compact = True\n return {\n \"Meta Data\": {\n \"1. Information\": \"Daily Prices (open, high, low, close) and \"\n \"Volumes\",\n \"2. Symbol\": symbol,\n \"3. Last Refreshed\": datetime.datetime.now().strftime(\"%Y-%m-%d\"),\n \"4. Output Size\": \"Compact\" if compact else \"Full size\",\n \"5. Time Zone\": \"US/Eastern\"\n },\n \"Time Series (Daily)\": time_series_daily,\n }\n\n\ndef test_get_delta():\n assert get_delta(105, 100) == 5.0\n assert get_delta(95, 100) == -5.0\n assert get_delta(100, 100) == 0\n\n\ndef test_colorize():\n assert colorize(-0.151) == '\\x0304-0.15%\\x03'\n assert colorize(-0.1) == '\\x0304-0.10%\\x03'\n assert colorize(0) == '\\x03040.00%\\x03'\n assert colorize(0.1) == '\\x03090.10%\\x03'\n assert colorize(0.159) == '\\x03090.16%\\x03'\n\n\nclass TestTickerPlugin(object):\n @pytest.fixture(autouse=True)\n def setup_method_fixture(self, request, tmpdir):\n self.api_key = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n self.channel = '#test'\n self.channels = [self.channel]\n self.stocks = {\n 'INX': 'S&P 500',\n 'DJI': 'Dow',\n 'VEU': 'Foreign',\n 'AGG': 'US Bond',\n }\n self.relay_bots = [\n {\"nick\": \"relay.bot\", \"user\": \"relay\", \"vhost\": \"relay\"},\n ]\n\n d = tmpdir.mkdir('storage')\n\n get_db, self.db = get_mock_db()\n self.mock_cardinal = Mock(spec=CardinalBot)\n self.mock_cardinal.network = self.network = 'irc.darkscience.net'\n self.mock_cardinal.storage_path = str(d.dirpath())\n self.mock_cardinal.get_db.side_effect = get_db\n\n self.plugin = TickerPlugin(self.mock_cardinal, {\n 'api_key': self.api_key,\n 'channels': self.channels,\n 'stocks': self.stocks,\n 'relay_bots': self.relay_bots,\n })\n\n def test_config_defaults(self):\n plugin = TickerPlugin(self.mock_cardinal, {\n 'api_key': self.api_key,\n })\n assert plugin.config['api_key'] == self.api_key\n assert plugin.config['channels'] == []\n assert plugin.config['stocks'] == {}\n assert plugin.config['relay_bots'] == []\n\n def test_missing_api_key(self):\n with pytest.raises(KeyError):\n TickerPlugin(self.mock_cardinal, {})\n\n def test_missing_stocks(self):\n with pytest.raises(ValueError):\n TickerPlugin(self.mock_cardinal, {\n 'api_key': self.api_key,\n 'stocks': {\n 'a': 'a',\n 'b': 'b',\n 'c': 'c',\n 'd': 'd',\n 'e': 'e',\n 'f': 'f',\n },\n })\n\n @defer.inlineCallbacks\n def test_send_ticker(self):\n responses = [\n make_global_quote_response('DJI',\n previous_close=100,\n close=200),\n make_global_quote_response('AGG',\n previous_close=100,\n close=150.50),\n make_global_quote_response('VEU',\n previous_close=100,\n close=105),\n make_global_quote_response('INX',\n previous_close=100,\n close=50),\n ]\n\n with mock_api(responses, fake_now=get_fake_now(market_is_open=True)):\n yield self.plugin.send_ticker()\n\n self.mock_cardinal.sendMsg.assert_called_once_with(\n self.channel,\n 'Dow (\\x02DJI\\x02): \\x0309100.00%\\x03 | '\n 'Foreign (\\x02VEU\\x02): \\x03095.00%\\x03 | '\n 'S&P 500 (\\x02INX\\x02): \\x0304-50.00%\\x03 | '\n 'US Bond (\\x02AGG\\x02): \\x030950.50%\\x03'\n )\n\n @pytest.mark.parametrize(\"dt,should_send_ticker,should_do_predictions\", [\n (datetime.datetime(2020, 3, 21, 16, 0, 0), # Saturday 4pm\n False,\n False,),\n (datetime.datetime(2020, 3, 22, 16, 0, 0), # Sunday 4pm\n False,\n False,),\n (datetime.datetime(2020, 3, 23, 15, 45, 45), # Monday 3:45pm\n True,\n False,),\n (datetime.datetime(2020, 3, 23, 16, 0, 30), # Monday 4pm\n True,\n True,),\n (datetime.datetime(2020, 3, 23, 16, 15, 0), # Monday 4:15pm\n False,\n False,),\n (datetime.datetime(2020, 3, 27, 9, 15, 0), # Friday 9:15am\n False,\n False,),\n (datetime.datetime(2020, 3, 27, 9, 30, 15), # Friday 9:30am\n True,\n True,),\n (datetime.datetime(2020, 3, 27, 9, 45, 15), # Friday 9:45am\n True,\n False,),\n ])\n @patch.object(plugin.TickerPlugin, 'do_predictions')\n @patch.object(plugin.TickerPlugin, 'send_ticker')\n @patch.object(util, 'sleep')\n @patch.object(plugin, 'est_now')\n @pytest_twisted.inlineCallbacks\n def test_tick(self,\n est_now,\n sleep,\n send_ticker,\n do_predictions,\n dt,\n should_send_ticker,\n should_do_predictions):\n est_now.return_value = dt\n\n yield self.plugin.tick()\n\n if should_send_ticker:\n send_ticker.assert_called_once_with()\n else:\n assert send_ticker.mock_calls == []\n\n if should_do_predictions:\n sleep.assert_called_once_with(60)\n do_predictions.assert_called_once_with()\n else:\n assert sleep.mock_calls == []\n assert do_predictions.mock_calls == []\n\n @pytest.mark.parametrize(\"market_is_open\", [True, False])\n @patch.object(util, 'reactor', new_callable=Clock)\n @pytest_twisted.inlineCallbacks\n def test_do_predictions(self, mock_reactor, market_is_open):\n symbol = 'INX'\n base = 100.0\n\n user1 = 'user1'\n user2 = 'user2'\n prediction1 = 105.0\n prediction2 = 96.0\n\n actual = 95.0\n\n yield self.plugin.save_prediction(\n symbol,\n user1,\n base,\n prediction1,\n )\n yield self.plugin.save_prediction(\n symbol,\n user2,\n base,\n prediction2,\n )\n\n assert len(self.db['predictions']) == 1\n assert len(self.db['predictions'][symbol]) == 2\n\n response = make_global_quote_response(symbol, close=actual)\n\n with mock_api(response, fake_now=get_fake_now(market_is_open)):\n d = self.plugin.do_predictions()\n mock_reactor.advance(15)\n\n yield d\n\n assert len(self.mock_cardinal.sendMsg.mock_calls) == 3\n self.mock_cardinal.sendMsg.assert_called_with(\n self.channel,\n '{} had the closest guess for \\x02{}\\x02 out of {} predictions '\n 'with a prediction of {} (\\x0304{:.2f}%\\x03) '\n 'compared to the actual {} of {} (\\x0304{:.2f}%\\x03).'.format(\n user2,\n symbol,\n 2,\n prediction2,\n -4,\n 'open' if market_is_open else 'close',\n actual,\n -5))\n\n @patch.object(plugin, 'est_now')\n def test_send_prediction(self, mock_now):\n prediction = 105\n actual = 110\n base = 100\n nick = \"nick\"\n symbol = \"INX\"\n\n # Set the datetime to a known value so the message can be tested\n tz = pytz.timezone('America/New_York')\n mock_now.return_value = tz.localize(\n datetime.datetime(2020, 3, 20, 10, 50, 0, 0))\n\n prediction_ = {'when': '2020-03-20 10:50:00 EDT',\n 'prediction': prediction,\n 'base': base,\n }\n self.plugin.send_prediction(nick, symbol, prediction_, actual)\n\n message = (\"Prediction by nick for \\x02INX\\02: 105 (\\x03095.00%\\x03). \"\n \"Actual value at open: 110 (\\x030910.00%\\x03). \"\n \"Prediction set at 2020-03-20 10:50:00 EDT.\")\n self.mock_cardinal.sendMsg.assert_called_once_with('#test', message)\n\n @pytest.mark.skip(reason=\"Not written yet\")\n def test_check(self):\n pass\n\n @pytest.mark.parametrize(\"symbol,input_msg,output_msg,market_is_open\", [\n (\"INX\",\n \"!predict INX +5%\",\n \"Prediction by nick for \\x02INX\\x02 at market close: 105.00 (\\x03095.00%\\x03) \",\n True,\n ),\n (\"INX\",\n \"!predict INX -5%\",\n \"Prediction by nick for \\x02INX\\x02 at market close: 95.00 (\\x0304-5.00%\\x03) \",\n True,\n ),\n (\"INX\",\n \"!predict INX -5%\",\n \"Prediction by nick for \\x02INX\\x02 at market open: 95.00 (\\x0304-5.00%\\x03) \",\n False,\n ),\n # testing a few more formats of stock symbols\n (\"^RUT\",\n \"!predict ^RUT -5%\",\n \"Prediction by nick for \\x02^RUT\\x02 at market open: 95.00 (\\x0304-5.00%\\x03) \",\n False,\n ),\n (\"REE.MC\",\n \"!predict REE.MC -5%\",\n \"Prediction by nick for \\x02REE.MC\\x02 at market open: 95.00 (\\x0304-5.00%\\x03) \",\n False,\n ),\n (\"LON:HDLV\",\n \"!predict LON:HDLV -5%\",\n \"Prediction by nick for \\x02LON:HDLV\\x02 at market open: 95.00 (\\x0304-5.00%\\x03) \",\n False,\n ),\n ])\n @pytest_twisted.inlineCallbacks\n def test_predict(self,\n symbol,\n input_msg,\n output_msg,\n market_is_open):\n channel = \"#finance\"\n\n fake_now = get_fake_now(market_is_open=market_is_open)\n\n kwargs = {'previous_close': 100} if market_is_open else {'close': 100}\n response = make_global_quote_response(symbol, **kwargs)\n\n with mock_api(response, fake_now=fake_now):\n yield self.plugin.predict(self.mock_cardinal,\n user_info(\"nick\", \"user\", \"vhost\"),\n channel,\n input_msg)\n\n assert symbol in self.db['predictions']\n assert len(self.db['predictions'][symbol]) == 1\n\n self.mock_cardinal.sendMsg.assert_called_once_with(\n channel,\n output_msg)\n\n @pytest.mark.parametrize(\"message_pairs\", [\n ((\"!predict INX +5%\",\n \"Prediction by nick for \\x02INX\\x02 at market close: 105.00 (\\x03095.00%\\x03) \",\n ),\n (\"!predict INX -5%\",\n \"Prediction by nick for \\x02INX\\x02 at market close: 95.00 (\\x0304-5.00%\\x03) \"\n \"(replaces old prediction of 105.00 (\\x03095.00%\\x03) set at {})\"\n ),\n )\n ])\n @pytest_twisted.inlineCallbacks\n def test_predict_replace(self, message_pairs):\n channel = \"#finance\"\n symbol = 'INX'\n\n response = make_global_quote_response(symbol, previous_close=100)\n\n fake_now = get_fake_now()\n for input_msg, output_msg in message_pairs:\n with mock_api(response, fake_now):\n yield self.plugin.predict(self.mock_cardinal,\n user_info(\"nick\", \"user\", \"vhost\"),\n channel,\n input_msg)\n\n assert symbol in self.db['predictions']\n assert len(self.db['predictions'][symbol]) == 1\n\n self.mock_cardinal.sendMsg.assert_called_with(\n channel,\n output_msg.format(fake_now.strftime('%Y-%m-%d %H:%M:%S %Z'))\n if '{}' in output_msg else\n output_msg)\n\n @pytest.mark.parametrize(\"input_msg,output_msg\", [\n (\" !predict INX +5%\",\n \"Prediction by nick for \\x02INX\\x02 at market close: 105.00 (\\x03095.00%\\x03) \",\n ),\n (\" !predict INX -5%\",\n \"Prediction by nick for \\x02INX\\x02 at market close: 95.00 (\\x0304-5.00%\\x03) \",\n ),\n ])\n @pytest_twisted.inlineCallbacks\n def test_predict_relay_bot(self, input_msg, output_msg):\n symbol = 'INX'\n channel = \"#finance\"\n\n response = make_global_quote_response(symbol, previous_close=100)\n with mock_api(response):\n yield self.plugin.predict(self.mock_cardinal,\n user_info(\"relay.bot\", \"relay\", \"relay\"),\n channel,\n input_msg)\n\n assert symbol in self.db['predictions']\n assert len(self.db['predictions'][symbol]) == 1\n\n self.mock_cardinal.sendMsg.assert_called_once_with(\n channel,\n output_msg)\n\n @pytest.mark.parametrize(\"input_msg\", [\n \" !predict INX +5%\",\n \" !predict INX -5%\",\n ])\n @pytest_twisted.inlineCallbacks\n def test_predict_not_relay_bot(self, input_msg):\n channel = \"#finance\"\n\n yield self.plugin.predict(self.mock_cardinal,\n user_info(\"nick\", \"user\", \"vhost\"),\n channel,\n input_msg)\n\n assert len(self.db['predictions']) == 0\n assert self.mock_cardinal.sendMsg.mock_calls == []\n\n @pytest.mark.parametrize(\"user,message,value,expected\", [\n (\n user_info(\"whoami\", None, None),\n \"!predict INX 5%\",\n 100,\n (\"whoami\", \"INX\", 105, 100),\n ),\n (\n user_info(\"whoami\", None, None),\n \"!predict INX +5%\",\n 100,\n (\"whoami\", \"INX\", 105, 100),\n ),\n (\n user_info(\"whoami\", None, None),\n \"!predict INX -5%\",\n 100,\n (\"whoami\", \"INX\", 95, 100),\n ),\n (\n user_info(\"not.a.relay.bot\", None, None),\n \" !predict INX -5%\",\n 100,\n None,\n ),\n (\n user_info(\"relay.bot\", \"relay\", \"relay\"),\n \" !predict INX -5%\",\n 100,\n (\"whoami\", \"INX\", 95, 100),\n ),\n ])\n @pytest_twisted.inlineCallbacks\n def test_parse_prediction_open(\n self,\n user,\n message,\n value,\n expected,\n ):\n symbol = 'INX'\n\n response = make_global_quote_response(symbol, previous_close=value)\n with mock_api(response):\n result = yield self.plugin.parse_prediction(user, message)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"user,message,value,expected\", [\n (\n user_info(\"whoami\", None, None),\n \"!predict INX 5%\",\n 100,\n (\"whoami\", \"INX\", 105, 100),\n ),\n (\n user_info(\"whoami\", None, None),\n \"!predict INX +5%\",\n 100,\n (\"whoami\", \"INX\", 105, 100),\n ),\n (\n user_info(\"whoami\", None, None),\n \"!predict INX -5%\",\n 100,\n (\"whoami\", \"INX\", 95, 100),\n ),\n (\n user_info(\"not.a.relay.bot\", None, None),\n \" !predict INX -5%\",\n 100,\n None,\n ),\n (\n user_info(\"relay.bot\", \"relay\", \"relay\"),\n \" !predict INX -5%\",\n 100,\n (\"whoami\", \"INX\", 95, 100),\n ),\n ])\n @pytest_twisted.inlineCallbacks\n def test_parse_prediction_close(\n self,\n user,\n message,\n value,\n expected,\n ):\n symbol = 'INX'\n\n response = make_global_quote_response(symbol, close=value)\n with mock_api(response, fake_now=get_fake_now(market_is_open=False)):\n result = yield self.plugin.parse_prediction(user, message)\n\n assert result == expected\n\n @patch.object(plugin, 'est_now')\n def test_save_prediction(self, mock_now):\n symbol = 'INX'\n nick = 'whoami'\n base = 100\n prediction = 105\n\n tz = pytz.timezone('America/New_York')\n mock_now.return_value = tz.localize(datetime.datetime(\n 2020,\n 3,\n 23,\n 12,\n 0,\n 0,\n ))\n self.plugin.save_prediction(\n symbol,\n nick,\n base,\n prediction,\n )\n\n assert symbol in self.db['predictions']\n assert nick in self.db['predictions'][symbol]\n actual = self.db['predictions'][symbol][nick]\n assert actual == {\n 'when': '2020-03-23 12:00:00 EDT',\n 'base': base,\n 'prediction': prediction,\n }\n\n @defer.inlineCallbacks\n def test_get_quote(self):\n symbol = 'INX'\n response = make_global_quote_response(symbol)\n r = response[\"Global Quote\"]\n\n expected = {\n 'symbol': symbol,\n 'open': float(r['02. open']),\n 'high': float(r['03. high']),\n 'low': float(r['04. low']),\n 'price': float(r['05. price']),\n 'volume': int(r['06. volume']),\n 'latest trading day': datetime.datetime.today().replace(\n hour=0, minute=0, second=0, microsecond=0),\n 'previous close': float(r['08. previous close']),\n 'change': float(r['09. change']),\n 'change percent': float(r['10. change percent'][:-1]),\n }\n\n with mock_api(response):\n result = yield self.plugin.get_quote(symbol)\n\n assert result == expected\n\n @defer.inlineCallbacks\n def test_get_daily(self):\n symbol = 'INX'\n last_open = 100.0\n last_close = 101.0\n previous_close = 102.0\n\n response = make_global_quote_response(symbol,\n open_=last_open,\n close=last_close,\n previous_close=previous_close,\n )\n\n expected = {\n 'symbol': symbol,\n 'close': last_close,\n 'open': last_open,\n 'previous close': previous_close,\n # this one is calculated by our make response function so it\n # doesn't really test anything anymore\n 'change': float(\n '{:.4f}'.format(get_delta(last_close, previous_close))),\n }\n\n with mock_api(response):\n result = yield self.plugin.get_daily(symbol)\n assert result == expected\n\n @defer.inlineCallbacks\n def test_get_time_series_daily(self):\n symbol = 'INX'\n\n response = make_time_series_daily_response(symbol)\n with mock_api(response):\n result = yield self.plugin.get_time_series_daily(symbol)\n\n for date in response['Time Series (Daily)']:\n assert date in result\n # verify prefix is stripped and values are floats\n for key in ('open', 'high', 'low', 'close', 'volume'):\n assert key in result[date]\n assert isinstance(result[date][key], float)\n\n @defer.inlineCallbacks\n def test_get_time_series_daily_bad_format(self):\n symbol = 'INX'\n\n response = {}\n with mock_api(response):\n with pytest.raises(KeyError):\n yield self.plugin.get_time_series_daily(symbol)\n\n @defer.inlineCallbacks\n def test_make_av_request(self):\n # Verify that this returns the response unmodified, and that it\n # properly calculates params\n function = 'TIME_SERIES_DAILY'\n symbol = 'INX'\n outputsize = 'compact'\n\n response = make_time_series_daily_response(symbol)\n with mock_api(response) as defer_mock:\n result = yield self.plugin.make_av_request(\n function,\n params={\n 'symbol': symbol,\n 'outputsize': outputsize,\n })\n\n assert result == response\n\n defer_mock.assert_called_once_with(\n plugin.requests.get,\n plugin.AV_API_URL,\n params={\n 'apikey': self.api_key,\n 'function': function,\n 'symbol': symbol,\n 'outputsize': outputsize,\n 'datatype': 'json',\n })\n\n @defer.inlineCallbacks\n def test_make_av_request_no_params(self):\n # This one is mostly just for coverage\n function = 'TIME_SERIES_DAILY'\n symbol = 'INX'\n\n response = make_time_series_daily_response(symbol)\n with mock_api(response) as defer_mock:\n result = yield self.plugin.make_av_request(function)\n\n assert result == response\n\n defer_mock.assert_called_once_with(\n plugin.requests.get,\n plugin.AV_API_URL,\n params={\n 'apikey': self.api_key,\n 'function': function,\n 'datatype': 'json',\n })\n\n @patch.object(util, 'reactor', new_callable=Clock)\n @defer.inlineCallbacks\n def test_make_av_request_retry_when_throttled(self, mock_reactor):\n # Verify that this returns the response unmodified, and that it\n # properly calculates params\n function = 'TIME_SERIES_DAILY'\n symbol = 'INX'\n outputsize = 'compact'\n\n response = make_time_series_daily_response(symbol)\n throttle_times = plugin.MAX_RETRIES - 1\n with mock_api(response, throttle_times=throttle_times) as defer_mock:\n d = self.plugin.make_av_request(\n function,\n params={\n 'symbol': symbol,\n 'outputsize': outputsize,\n })\n\n # loop through retries\n for _ in range(throttle_times):\n mock_reactor.advance(plugin.RETRY_WAIT)\n\n result = yield d\n\n assert result == response\n\n defer_mock.assert_has_calls([call(\n plugin.requests.get,\n plugin.AV_API_URL,\n params={\n 'apikey': self.api_key,\n 'function': function,\n 'symbol': symbol,\n 'outputsize': outputsize,\n 'datatype': 'json',\n })] * (throttle_times + 1))\n\n @patch.object(util, 'reactor', new_callable=Clock)\n @defer.inlineCallbacks\n def test_make_av_request_retry_on_exception(self, mock_reactor):\n # Verify that this returns the response unmodified, and that it\n # properly calculates params\n function = 'TIME_SERIES_DAILY'\n symbol = 'INX'\n outputsize = 'compact'\n\n response = make_time_series_daily_response(symbol)\n raise_times = plugin.MAX_RETRIES - 1\n with mock_api(response, raise_times=raise_times) as defer_mock:\n d = self.plugin.make_av_request(\n function,\n params={\n 'symbol': symbol,\n 'outputsize': outputsize,\n })\n\n # loop through retries\n for _ in range(raise_times):\n mock_reactor.advance(plugin.RETRY_WAIT)\n\n result = yield d\n\n assert result == response\n\n defer_mock.assert_has_calls([call(\n plugin.requests.get,\n plugin.AV_API_URL,\n params={\n 'apikey': self.api_key,\n 'function': function,\n 'symbol': symbol,\n 'outputsize': outputsize,\n 'datatype': 'json',\n })] * (raise_times + 1))\n\n @patch.object(util, 'reactor', new_callable=Clock)\n @defer.inlineCallbacks\n def test_make_av_request_give_up_after_max_retries(self, mock_reactor):\n # Verify that this returns the response unmodified, and that it\n # properly calculates params\n function = 'TIME_SERIES_DAILY'\n symbol = 'INX'\n outputsize = 'compact'\n\n response = make_time_series_daily_response(symbol)\n raise_times = plugin.MAX_RETRIES\n with mock_api(response, raise_times=raise_times) as defer_mock:\n d = self.plugin.make_av_request(\n function,\n params={\n 'symbol': symbol,\n 'outputsize': outputsize,\n })\n\n # loop through retries\n for _ in range(raise_times):\n mock_reactor.advance(plugin.RETRY_WAIT)\n\n with pytest.raises(Exception):\n yield d\n\n defer_mock.assert_has_calls([call(\n plugin.requests.get,\n plugin.AV_API_URL,\n params={\n 'apikey': self.api_key,\n 'function': function,\n 'symbol': symbol,\n 'outputsize': outputsize,\n 'datatype': 'json',\n })] * (raise_times))\n\n @patch.object(plugin, 'est_now')\n def test_market_is_open(self, mock_now):\n tz = pytz.timezone('America/New_York')\n\n # Nothing special about this time - it's a Thursday 7:49pm\n mock_now.return_value = tz.localize(datetime.datetime(\n 2020,\n 3,\n 19,\n 19,\n 49,\n 55,\n 0,\n ))\n assert plugin.market_is_open() is False\n\n # The market was open earlier though\n mock_now.return_value = tz.localize(datetime.datetime(\n 2020,\n 3,\n 19,\n 13,\n 49,\n 55,\n 0,\n ))\n assert plugin.market_is_open() is True\n\n # But not before 9:30am\n mock_now.return_value = tz.localize(datetime.datetime(\n 2020,\n 3,\n 19,\n 9,\n 29,\n 59,\n 0,\n ))\n assert plugin.market_is_open() is False\n\n # Or this weekend\n mock_now.return_value = tz.localize(datetime.datetime(\n 2020,\n 3,\n 14,\n 13,\n 49,\n 55,\n 0,\n ))\n assert plugin.market_is_open() is False\n", "sub_path": "plugins/ticker/test_plugin.py", "file_name": "test_plugin.py", "file_ext": "py", "file_size_in_byte": 32360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pytz.timezone", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 36, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 50, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 52, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 54, "usage_type": "call"}, {"api_name": "mock.PropertyMock", "line_number": 55, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 74, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 74, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 74, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 75, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 75, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 75, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 44, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 97, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 100, "usage_type": "call"}, {"api_name": "random.random", "line_number": 100, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 101, "usage_type": "call"}, {"api_name": "random.random", "line_number": 101, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 102, "usage_type": "call"}, {"api_name": "random.random", "line_number": 102, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 104, "usage_type": "call"}, {"api_name": "random.random", "line_number": 104, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 106, "usage_type": "call"}, {"api_name": "random.random", "line_number": 106, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 107, "usage_type": "call"}, {"api_name": "past.utils.old_div", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 141, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 144, "usage_type": "call"}, {"api_name": "random.random", "line_number": 144, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 145, "usage_type": "call"}, {"api_name": "random.random", "line_number": 145, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 146, "usage_type": "call"}, {"api_name": "random.random", "line_number": 146, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 147, "usage_type": "call"}, {"api_name": "random.random", "line_number": 147, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 148, "usage_type": "call"}, {"api_name": "builtins.range", "line_number": 159, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 174, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 182, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 182, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin.get_delta", "line_number": 191, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.get_delta", "line_number": 192, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.get_delta", "line_number": 193, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.colorize", "line_number": 197, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.colorize", "line_number": 198, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.colorize", "line_number": 199, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.colorize", "line_number": 200, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.colorize", "line_number": 201, "usage_type": "call"}, {"api_name": "builtins.object", "line_number": 204, "usage_type": "name"}, {"api_name": "cardinal.unittest_util.get_mock_db", "line_number": 222, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 223, "usage_type": "call"}, {"api_name": "cardinal.bot.CardinalBot", "line_number": 223, "usage_type": "name"}, {"api_name": "builtins.str", "line_number": 225, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.TickerPlugin", "line_number": 228, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 205, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 236, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.TickerPlugin", "line_number": 236, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.config", "line_number": 239, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 239, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.config", "line_number": 240, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 240, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.config", "line_number": 241, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 241, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.config", "line_number": 242, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 242, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 245, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.TickerPlugin", "line_number": 246, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 249, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.TickerPlugin", "line_number": 250, "usage_type": "call"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 262, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 262, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 290, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 290, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 291, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 294, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 297, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 300, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 303, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 306, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 309, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 312, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 316, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 316, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.TickerPlugin", "line_number": 316, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 316, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 317, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 317, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.TickerPlugin", "line_number": 317, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 317, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 318, "usage_type": "call"}, {"api_name": "cardinal.util", "line_number": 318, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 318, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 319, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 319, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 319, "usage_type": "name"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 320, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 345, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 345, "usage_type": "attribute"}, {"api_name": "mock.patch.object", "line_number": 346, "usage_type": "call"}, {"api_name": "cardinal.util", "line_number": 346, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 346, "usage_type": "name"}, {"api_name": "twisted.internet.task.Clock", "line_number": 346, "usage_type": "name"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 347, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 407, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 409, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 398, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 398, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 398, "usage_type": "name"}, {"api_name": "pytest.mark.skip", "line_number": 422, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 422, "usage_type": "attribute"}, {"api_name": "cardinal.bot.user_info", "line_number": 474, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 426, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 426, "usage_type": "attribute"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 459, "usage_type": "attribute"}, {"api_name": "cardinal.bot.user_info", "line_number": 506, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 485, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 485, "usage_type": "attribute"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 495, "usage_type": "attribute"}, {"api_name": "cardinal.bot.user_info", "line_number": 535, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 519, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 519, "usage_type": "attribute"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 527, "usage_type": "attribute"}, {"api_name": "cardinal.bot.user_info", "line_number": 555, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 546, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 546, "usage_type": "attribute"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 550, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 562, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 562, "usage_type": "attribute"}, {"api_name": "cardinal.bot.user_info", "line_number": 564, "usage_type": "call"}, {"api_name": "cardinal.bot.user_info", "line_number": 570, "usage_type": "call"}, {"api_name": "cardinal.bot.user_info", "line_number": 576, "usage_type": "call"}, {"api_name": "cardinal.bot.user_info", "line_number": 582, "usage_type": "call"}, {"api_name": "cardinal.bot.user_info", "line_number": 588, "usage_type": "call"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 594, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 610, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 610, "usage_type": "attribute"}, {"api_name": "cardinal.bot.user_info", "line_number": 612, "usage_type": "call"}, {"api_name": "cardinal.bot.user_info", "line_number": 618, "usage_type": "call"}, {"api_name": "cardinal.bot.user_info", "line_number": 624, "usage_type": "call"}, {"api_name": "cardinal.bot.user_info", "line_number": 630, "usage_type": "call"}, {"api_name": "cardinal.bot.user_info", "line_number": 636, "usage_type": "call"}, {"api_name": "pytest_twisted.inlineCallbacks", "line_number": 642, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 665, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 666, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 658, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 658, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 658, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 703, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 703, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 690, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 690, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.get_delta", "line_number": 736, "usage_type": "call"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 715, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 715, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 743, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 743, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 764, "usage_type": "call"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 758, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 758, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.requests", "line_number": 787, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 787, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.AV_API_URL", "line_number": 788, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 788, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 767, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 767, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.requests", "line_number": 810, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 810, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.AV_API_URL", "line_number": 811, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 811, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 797, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 797, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.MAX_RETRIES", "line_number": 828, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 828, "usage_type": "name"}, {"api_name": "builtins.range", "line_number": 838, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.RETRY_WAIT", "line_number": 839, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 839, "usage_type": "name"}, {"api_name": "mock.call", "line_number": 845, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.requests", "line_number": 846, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 846, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.AV_API_URL", "line_number": 847, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 847, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 818, "usage_type": "call"}, {"api_name": "cardinal.util", "line_number": 818, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 818, "usage_type": "name"}, {"api_name": "twisted.internet.task.Clock", "line_number": 818, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 819, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 819, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.MAX_RETRIES", "line_number": 866, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 866, "usage_type": "name"}, {"api_name": "builtins.range", "line_number": 876, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.RETRY_WAIT", "line_number": 877, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 877, "usage_type": "name"}, {"api_name": "mock.call", "line_number": 883, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.requests", "line_number": 884, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 884, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.AV_API_URL", "line_number": 885, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 885, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 856, "usage_type": "call"}, {"api_name": "cardinal.util", "line_number": 856, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 856, "usage_type": "name"}, {"api_name": "twisted.internet.task.Clock", "line_number": 856, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 857, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 857, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.MAX_RETRIES", "line_number": 904, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 904, "usage_type": "name"}, {"api_name": "builtins.range", "line_number": 914, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.RETRY_WAIT", "line_number": 915, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 915, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 917, "usage_type": "call"}, {"api_name": "mock.call", "line_number": 920, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.requests", "line_number": 921, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 921, "usage_type": "name"}, {"api_name": "plugins.ticker.plugin.AV_API_URL", "line_number": 922, "usage_type": "attribute"}, {"api_name": "plugins.ticker.plugin", "line_number": 922, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 894, "usage_type": "call"}, {"api_name": "cardinal.util", "line_number": 894, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 894, "usage_type": "name"}, {"api_name": "twisted.internet.task.Clock", "line_number": 894, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 895, "usage_type": "attribute"}, {"api_name": "twisted.internet.defer", "line_number": 895, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 933, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 936, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.market_is_open", "line_number": 945, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 945, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 948, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.market_is_open", "line_number": 957, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 957, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 960, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.market_is_open", "line_number": 969, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 969, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 972, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin.market_is_open", "line_number": 981, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 981, "usage_type": "name"}, {"api_name": "mock.patch.object", "line_number": 931, "usage_type": "call"}, {"api_name": "plugins.ticker.plugin", "line_number": 931, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 931, "usage_type": "name"}]} +{"seq_id": "446705540", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nt_start = 0.0\nt_final = 5.0\n\n#starting points of numerical calculation:\nt = [0]\nx = [1]\nt_n = 0\n\nstep = 0.1\n\nwhile t_n < t_final:\n x_n = x[-1] + step* x[-1] #calculate new value, with the previous value\n x.append(x_n) #store it in your data list\n t_n += step # increase the time with the step\n t.append(t_n)\n\n#(semi-)anaytic:\nt2 = np.arange(t_start, t_final, 0.001)\nx2 = np.exp(t2)\n\n#plot both in one plot:\nplt.plot(t,x, 'r', label = \"numeric\")\nplt.plot(t2, x2, 'b', label = \"analytic\")\n\nplt.legend()\nplt.xlabel(\"x\")\nplt.ylabel(\"t\")\nplt.title(r\"Euler integration of $e^t$ with a step size of %.2f\" %step)\n\n#saving data to file and save figure:\n\n#file = open(\"Data_EulerInt.txt\", 'w')\n#file.write(\"Euler Integration of exp(t):\\n t \\t f(t)\\n\")\n#for i in range(len(x)):\n\t#file.write(\"%.4f \\t %.4f \\n\" %(t[i], x[i]))\n#file.close() # make sure you close your file!\n\n#plt.savefig(\"EulerIntegration.png\")\nplt.show()\n", "sub_path": "EulerIntegration.py", "file_name": "EulerIntegration.py", "file_ext": "py", "file_size_in_byte": 973, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.arange", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "342208856", "text": "import json\n\nwith open(\"itemprices.json\") as json_file:\n menu_card = json.load(json_file)\n\ndef calculate_order_total(order):\n global menu_card\n order_list = list(order)\n total = sum([menu_card[item] for item in order_list])\n print(f\"Your order total is {total}\")\n\nmenu = '''\n====== MENU for today: =======\n 1. \"Chicken Strips\": 280,\n 2. \"French Fries\": 140,\n 3. \"Hamburger\": 160,\n 4. \"Hotdog\": 120,\n 5. \"Large Drink\": 100,\n 6. \"Medium Drink\": 75,\n 7. \"Milk Shake\": 85,\n 8. \"Salad\": 125,\n 9. \"Small Drink\": 60\n'''\ndef take_orders():\n take = True\n while take:\n print(menu)\n order = input(\"Please entre your order\")\n calculate_order_total(order)\n repeat = input(\"Do you want to place another order ? Y/N\")\n if repeat == 'n' or repeat =='N':\n take = False\n\nif __name__ == \"__main__\":\n take_orders()", "sub_path": "menucalculator/menucalculator.py", "file_name": "menucalculator.py", "file_ext": "py", "file_size_in_byte": 909, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "json.load", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "629242202", "text": "# -*- coding: utf-8 -*-\nimport dataiku\nfrom dataiku.customrecipe import *\nimport pandas as pd, numpy as np\nfrom dataiku import pandasutils as pdu\nfrom google.cloud import translate\n\n#Get config\ncredential = get_input_names_for_role('folder_of_credentials')[0]\ncredential = dataiku.Folder(str(credential))\ncredentials_path = credential.get_path()+credential.list_paths_in_partition()[0]\ntranslate_client = translate.Client.from_service_account_json(credentials_path)\n\ncolumn_to_translate = get_recipe_config()[\"column_to_translate\"]\ntarget = get_recipe_config()[\"target_language\"]\ninput_data_path=get_input_names_for_role('dataset_to_translate')[0]\noutput_data_path = get_output_names_for_role('dataset_translated')[0]\n\n####################\n# INIT GOOGLE CLIENT\n####################\n# The target language\n\ndef translate_from_google(row,column_to_translate):\n text = row[column_to_translate]\n translation = translate_client.translate(\n text,\n target_language=target)\n row[column_to_translate+'_translated_'+target] = translation['translatedText'] \n row[column_to_translate+'_detectedSourceLanguage'] = translation['detectedSourceLanguage'] \n return row\n\n####################\n# MANIPULATE DATA\n####################\n\ninput_data= dataiku.Dataset(input_data_path)\ninput_data_df = input_data.get_dataframe()\n\ninput_data_df[column_to_translate+'_translated_'+target]=''\ninput_data_df[column_to_translate+'_detectedSourceLanguage']=''\n\ninput_data_df= input_data_df.apply(translate_from_google, column_to_translate=column_to_translate, axis=1)\ninput_data_translated_df = input_data_df\n\noutput_translated = dataiku.Dataset(output_data_path)\noutput_translated.write_with_schema(input_data_translated_df)", "sub_path": "google-translate-plugin/custom-recipes/google-translate-plugin-python/recipe.py", "file_name": "recipe.py", "file_ext": "py", "file_size_in_byte": 1712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "dataiku.Folder", "line_number": 10, "usage_type": "call"}, {"api_name": "google.cloud.translate.Client.from_service_account_json", "line_number": 12, "usage_type": "call"}, {"api_name": "google.cloud.translate.Client", "line_number": 12, "usage_type": "attribute"}, {"api_name": "google.cloud.translate", "line_number": 12, "usage_type": "name"}, {"api_name": "dataiku.Dataset", "line_number": 37, "usage_type": "call"}, {"api_name": "dataiku.Dataset", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "501893989", "text": "from os import path\nfrom setuptools import setup\n\nHERE = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(HERE, 'README.md'), encoding='utf-8') as fp:\n long_description = fp.read()\n\n# Get the list of required packages\nwith open(path.join(HERE, 'requirements.txt'), encoding='utf-8') as fp:\n requirements = [req.rstrip() for req in fp.readlines()]\n\nsetup(\n name=\"imageai\",\n version='2.0.1',\n description='A flexible Computer Vision and Deep Learning library for applications and systems.',\n url=\"https://moses.specpal.science\",\n author='Moses Olafenwa and John Olafenwa',\n author_email='guymodscientist@gmail.com',\n license='MIT',\n packages=['imageai'],\n\n long_description=long_description,\n long_description_content_type='text/markdown',\n\n install_requires=requirements,\n\n zip_safe=False,\n\n # Classifiers help users find your project by categorizing it.\n # For a list of valid classifiers, see https://pypi.org/classifiers/\n classifiers=[ # Optional\n # How mature is this project? Common values are\n # 3 - Alpha ; 4 - Beta ; 5 - Production/Stable\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: MIT License',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n\n # Specify the Python versions you support HERE. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1815, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.abspath", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "297499040", "text": "\n\"\"\"Functions for importing and exporting data.\n\"\"\"\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport os\nfrom textwrap import dedent\nfrom glob import glob\n\nimport pandas as pd\nimport numpy as np\nimport math\n\ntry:\n import xarray as xr\n has_xarray = True\nexcept ImportError:\n has_xarray = False\n\nclass ImpExper(object):\n ds = None\n\n def __enter__(self):\n if self.ds is not None:\n self.ds = self.ds.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is None:\n self.finish()\n\n if self.ds is not None:\n self.ds.__exit__(exc_type, exc_val, exc_tb)\n\n def finish(self):\n pass\n\nclass Exporter(ImpExper):\n def remove_static(self, list_name):\n pass\n\n def remove_series(self, list_name, attr):\n pass\n\nclass Importer(ImpExper):\n pass\n\nclass ExporterCSV(Exporter):\n def __init__(self, csv_folder_name, encoding):\n self.csv_folder_name = csv_folder_name\n self.encoding = encoding\n\n #make sure directory exists\n if not os.path.isdir(csv_folder_name):\n logger.warning(\"Directory {} does not exist, creating it\"\n .format(csv_folder_name))\n os.mkdir(csv_folder_name)\n\n def save_attributes(self, attrs):\n name = attrs.pop('name')\n df = pd.DataFrame(attrs, index=pd.Index([name], name='name'))\n fn = os.path.join(self.csv_folder_name, \"network.csv\")\n df.to_csv(fn, encoding=self.encoding)\n\n def save_snapshots(self, snapshots):\n fn = os.path.join(self.csv_folder_name, \"snapshots.csv\")\n snapshots.to_csv(fn, encoding=self.encoding)\n\n def save_investment_periods(self, investment_periods):\n fn = os.path.join(self.csv_folder_name, \"investment_periods.csv\")\n investment_periods.to_csv(fn, encoding=self.encoding)\n\n def save_static(self, list_name, df):\n fn = os.path.join(self.csv_folder_name, list_name + \".csv\")\n df.to_csv(fn, encoding=self.encoding)\n\n def save_series(self, list_name, attr, df):\n fn = os.path.join(self.csv_folder_name, list_name + \"-\" + attr + \".csv\")\n df.to_csv(fn, encoding=self.encoding)\n\n def remove_static(self, list_name):\n fns = glob(os.path.join(self.csv_folder_name, list_name) + \"*.csv\")\n if fns:\n for fn in fns: os.unlink(fn)\n logger.warning(\"Stale csv file(s) {} removed\".format(', '.join(fns)))\n\n def remove_series(self, list_name, attr):\n fn = os.path.join(self.csv_folder_name, list_name + \"-\" + attr + \".csv\")\n if os.path.exists(fn):\n os.unlink(fn)\n\ndef import_from_csv_folder(network, csv_folder_name, encoding=None, skip_time=False):\n \"\"\"\n Import network data from CSVs in a folder.\n The CSVs must follow the standard form, see ``pypsa/examples``.\n Parameters\n ----------\n csv_folder_name : string\n Name of folder\n encoding : str, default None\n Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python standard encodings\n `_\n skip_time : bool, default False\n Skip reading in time dependent attributes\n Examples\n ----------\n >>> network.import_from_csv_folder(csv_folder_name)\n \"\"\"\n\n basename = os.path.basename(csv_folder_name)\n with ImporterCSV(csv_folder_name, encoding=encoding) as importer:\n _import_from_importer(network, importer, basename=basename, skip_time=skip_time)\n\nclass ImporterCSV(Importer):\n\n def __init__(self, csv_folder_name, encoding):\n self.csv_folder_name = csv_folder_name\n self.encoding = encoding\n\n assert os.path.isdir(csv_folder_name), f\"Directory {csv_folder_name} does not exist.\"\n\n def get_attributes(self):\n fn = os.path.join(self.csv_folder_name, \"network.csv\")\n if not os.path.isfile(fn):\n return None\n \n return dict(pd.read_csv(fn, encoding=self.encoding).iloc[0])\n\n def get_snapshots(self):\n fn = os.path.join(self.csv_folder_name, \"snapshots.csv\")\n if not os.path.isfile(fn): return None\n df = pd.read_csv(fn, index_col=0, encoding=self.encoding, parse_dates=True)\n if \"snapshot\" in df:\n df[\"snapshot\"] = pd.to_datetime(df.snapshot)\n return df\n\n def get_investment_periods(self):\n fn = os.path.join(self.csv_folder_name, \"investment_periods.csv\")\n if not os.path.isfile(fn): return None\n return pd.read_csv(fn, index_col=0, encoding=self.encoding)\n\n def get_static(self, list_name):\n fn = os.path.join(self.csv_folder_name, list_name + \".csv\")\n return (pd.read_csv(fn, index_col=0, encoding=self.encoding)\n if os.path.isfile(fn) else None)\n\n def get_series(self, list_name):\n for fn in os.listdir(self.csv_folder_name):\n if fn.startswith(list_name+\"-\") and fn.endswith(\".csv\"):\n attr = fn[len(list_name)+1:-4]\n df = pd.read_csv(os.path.join(self.csv_folder_name, fn),\n index_col=0, encoding=self.encoding, parse_dates=True)\n yield attr, df\n\ndef _import_from_importer(network, importer, basename, skip_time=False):\n \"\"\"\n Import network data from importer.\n Parameters\n ----------\n skip_time : bool\n Skip importing time\n \"\"\"\n\n # If network.csv exists, attributes loaded as dict from csv:\n # name,now,srid,pypsa_version, e.g. AC-DC,now,4326,0.10.0\n\n attrs = importer.get_attributes()\n\n current_pypsa_version = [int(s) for s in network.pypsa_version.split(\".\")]\n pypsa_version = None\n\n if attrs is not None:\n network.name = attrs.pop('name')\n try:\n pypsa_version = [int(s) for s in attrs.pop(\"pypsa_version\").split(\".\")]\n except KeyError:\n pypsa_version = None\n\n for attr, val in attrs.items():\n setattr(network, attr, val)\n\n ##https://docs.python.org/3/tutorial/datastructures.html#comparing-sequences-and-other-types\n if pypsa_version is None or pypsa_version < current_pypsa_version:\n logger.warning(dedent(\"\"\"\n Importing PyPSA from older version of PyPSA than current version {}.\n Please read the release notes at https://pypsa.org/doc/release_notes.html\n carefully to prepare your network for import.\n \"\"\").format(network.pypsa_version))\n\n importer.pypsa_version = pypsa_version\n importer.current_pypsa_version = current_pypsa_version\n\n # if there is snapshots.csv, read in snapshot data\n df = importer.get_snapshots()\n\n if df is not None:\n\n # check if imported snapshots have MultiIndex\n snapshot_levels = set([\"period\", \"snapshot\"]).intersection(df.columns)\n if snapshot_levels:\n df.set_index(sorted(snapshot_levels), inplace=True)\n network.set_snapshots(df.index)\n\n cols = ['objective', 'generators', 'stores']\n if not df.columns.intersection(cols).empty:\n network.snapshot_weightings = df.reindex(index=network.snapshots,\n columns=cols)\n elif \"weightings\" in df.columns:\n network.snapshot_weightings = df[\"weightings\"].reindex(network.snapshots)\n\n network.set_snapshots(df.index)\n\n # read in investment period weightings\n periods = importer.get_investment_periods()\n\n if periods is not None:\n network._investment_periods = periods.index\n\n network._investment_period_weightings = (\n periods.reindex(network.investment_periods))\n\n\n imported_components = []\n\n # now read in other components; make sure buses and carriers come first\n for component in [\"Bus\", \"Carrier\"] + sorted(network.all_components - {\"Bus\", \"Carrier\", \"SubNetwork\"}):\n list_name = network.components[component][\"list_name\"]\n\n df = importer.get_static(list_name)\n if df is None:\n if component == \"Bus\":\n logger.error(\"Error, no buses found\")\n return\n else:\n continue\n\n import_components_from_dataframe(network, df, component)\n\n if not skip_time:\n for attr, df in importer.get_series(list_name):\n df.set_index(network.snapshots, inplace=True)\n import_series_from_dataframe(network, df, component, attr)\n\n logger.debug(getattr(network,list_name))\n\n imported_components.append(list_name)\n\n logger.info(\"Imported network{} has {}\".format(\" \" + basename, \", \".join(imported_components)))\n\ndef export_to_csv_folder(network, csv_folder_name, encoding=None, export_standard_types=False):\n \"\"\"\n Export network and components to a folder of CSVs.\n Both static and series attributes of all components are exported, but only\n if they have non-default values.\n If ``csv_folder_name`` does not already exist, it is created.\n Static attributes are exported in one CSV file per component,\n e.g. ``generators.csv``.\n Series attributes are exported in one CSV file per component per\n attribute, e.g. ``generators-p_set.csv``.\n Parameters\n ----------\n csv_folder_name : string\n Name of folder to which to export.\n encoding : str, default None\n Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python\n standard encodings\n `_\n export_standard_types : boolean, default False\n If True, then standard types are exported too (upon reimporting you\n should then set \"ignore_standard_types\" when initialising the network).\n Examples\n --------\n >>> network.export_to_csv_folder(csv_folder_name)\n \"\"\"\n\n basename = os.path.basename(csv_folder_name)\n with ExporterCSV(csv_folder_name=csv_folder_name, encoding=encoding) as exporter:\n _export_to_exporter(network, exporter, basename=basename,\n export_standard_types=export_standard_types)\n\ndef _export_to_exporter(network, exporter, basename, export_standard_types=False):\n \"\"\"\n Export to exporter.\n Both static and series attributes of components are exported, but only\n if they have non-default values.\n Parameters\n ----------\n exporter : Exporter\n Initialized exporter instance\n basename : str\n Basename, used for logging\n export_standard_types : boolean, default False\n If True, then standard types are exported too (upon reimporting you\n should then set \"ignore_standard_types\" when initialising the netowrk).\n \"\"\"\n\n #exportable component types\n #what about None???? - nan is float?\n allowed_types = (float, int, bool, str) + tuple(np.sctypeDict.values())\n\n #first export network properties\n attrs = dict((attr, getattr(network, attr))\n for attr in dir(network)\n if (not attr.startswith(\"__\") and\n isinstance(getattr(network,attr), allowed_types)))\n exporter.save_attributes(attrs)\n\n #now export snapshots\n if isinstance(network.snapshot_weightings.index, pd.MultiIndex):\n network.snapshot_weightings.index.rename([\"period\", \"snapshot\"], inplace=True)\n else:\n network.snapshot_weightings.index.rename(\"snapshot\", inplace=True)\n snapshots = network.snapshot_weightings.reset_index()\n exporter.save_snapshots(snapshots)\n\n # export investment period weightings\n investment_periods = network.investment_period_weightings\n exporter.save_investment_periods(investment_periods)\n\n exported_components = []\n for component in network.all_components - {\"SubNetwork\"}:\n\n list_name = network.components[component][\"list_name\"]\n attrs = network.components[component][\"attrs\"]\n\n df = network.df(component)\n pnl = network.pnl(component)\n\n if not export_standard_types and component in network.standard_type_components:\n df = df.drop(network.components[component][\"standard_types\"].index)\n\n # first do static attributes\n df.index.name = \"name\"\n if df.empty:\n exporter.remove_static(list_name)\n continue\n\n col_export = []\n for col in df.columns:\n # do not export derived attributes\n if col in [\"sub_network\", \"r_pu\", \"x_pu\", \"g_pu\", \"b_pu\"]:\n continue\n if col in attrs.index and pd.isnull(attrs.at[col, \"default\"]) and pd.isnull(df[col]).all():\n continue\n if (col in attrs.index\n and df[col].dtype == attrs.at[col, 'dtype']\n and (df[col] == attrs.at[col, \"default\"]).all()):\n continue\n\n col_export.append(col)\n\n exporter.save_static(list_name, df[col_export])\n\n #now do varying attributes\n for attr in pnl:\n if attr not in attrs.index:\n col_export = pnl[attr].columns\n else:\n default = attrs.at[attr, \"default\"]\n\n if pd.isnull(default):\n col_export = pnl[attr].columns[(~pd.isnull(pnl[attr])).any()]\n else:\n col_export = pnl[attr].columns[(pnl[attr] != default).any()]\n\n if len(col_export) > 0:\n df = pnl[attr].reset_index()[col_export]\n exporter.save_series(list_name, attr, df)\n else:\n exporter.remove_series(list_name, attr)\n\n exported_components.append(list_name)\n\n logger.info(\"Exported network {} has {}\".format(basename, \", \".join(exported_components)))\n\ndef import_components_from_dataframe(network, dataframe, cls_name):\n \"\"\"\n Import components from a pandas DataFrame.\n If columns are missing then defaults are used.\n If extra columns are added, these are left in the resulting component dataframe.\n Parameters\n ----------\n dataframe : pandas.DataFrame\n A DataFrame whose index is the names of the components and\n whose columns are the non-default attributes.\n cls_name : string\n Name of class of component, e.g. ``\"Line\",\"Bus\",\"Generator\", \"StorageUnit\"``\n Examples\n --------\n >>> import pandas as pd\n >>> buses = ['Berlin', 'Frankfurt', 'Munich', 'Hamburg']\n >>> network.import_components_from_dataframe(\n pd.DataFrame({\"v_nom\" : 380, \"control\" : 'PV'},\n\t\t\tindex=buses),\n\t\t\t\"Bus\")\n >>> network.import_components_from_dataframe(\n pd.DataFrame({\"carrier\" : \"solar\", \"bus\" : buses, \"p_nom_extendable\" : True},\n\t\t\tindex=[b+\" PV\" for b in buses]),\n\t\t\t\"Generator\")\n See Also\n --------\n pypsa.Network.madd\n \"\"\"\n\n attrs = network.components[cls_name][\"attrs\"]\n\n static_attrs = attrs[attrs.static].drop(\"name\")\n non_static_attrs = attrs[~attrs.static]\n\n # Clean dataframe and ensure correct types\n dataframe = pd.DataFrame(dataframe)\n dataframe.index = dataframe.index.astype(str)\n\n for k in static_attrs.index:\n if k not in dataframe.columns:\n dataframe[k] = static_attrs.at[k, \"default\"]\n else:\n if static_attrs.at[k, \"type\"] == 'string':\n dataframe[k] = dataframe[k].replace({np.nan: \"\"})\n\n dataframe[k] = dataframe[k].astype(static_attrs.at[k, \"typ\"])\n\n #check all the buses are well-defined\n for attr in [\"bus\", \"bus0\", \"bus1\"]:\n if attr in dataframe.columns:\n missing = dataframe.index[~dataframe[attr].isin(network.buses.index)]\n if len(missing) > 0:\n logger.warning(\"The following %s have buses which are not defined:\\n%s\",\n cls_name, missing)\n\n non_static_attrs_in_df = non_static_attrs.index.intersection(dataframe.columns)\n old_df = network.df(cls_name)\n new_df = dataframe.drop(non_static_attrs_in_df, axis=1)\n if not old_df.empty:\n new_df = pd.concat((old_df, new_df), sort=False)\n\n if not new_df.index.is_unique:\n logger.error(\"Error, new components for {} are not unique\".format(cls_name))\n return\n\n setattr(network, network.components[cls_name][\"list_name\"], new_df)\n\n #now deal with time-dependent properties\n\n pnl = network.pnl(cls_name)\n\n for k in non_static_attrs_in_df:\n #If reading in outputs, fill the outputs\n pnl[k] = pnl[k].reindex(columns=new_df.index,\n fill_value=non_static_attrs.at[k, \"default\"])\n pnl[k].loc[:,dataframe.index] = dataframe.loc[:,k].values\n\n setattr(network,network.components[cls_name][\"list_name\"]+\"_t\",pnl)\n\ndef import_series_from_dataframe(network, dataframe, cls_name, attr):\n \"\"\"\n Import time series from a pandas DataFrame.\n Parameters\n ----------\n dataframe : pandas.DataFrame\n A DataFrame whose index is ``network.snapshots`` and\n whose columns are a subset of the relevant components.\n cls_name : string\n Name of class of component\n attr : string\n Name of time-varying series attribute\n Examples\n --------\n >>> import numpy as np\n >>> network.set_snapshots(range(10))\n >>> network.import_series_from_dataframe(\n pd.DataFrame(np.random.rand(10,4),\n columns=network.generators.index,\n\t\t\t index=range(10)),\n\t\t\t\"Generator\",\n\t\t\t\"p_max_pu\")\n See Also\n --------\n pypsa.Network.madd()\n \"\"\"\n\n df = network.df(cls_name)\n pnl = network.pnl(cls_name)\n list_name = network.components[cls_name][\"list_name\"]\n\n diff = dataframe.columns.difference(df.index)\n if len(diff) > 0:\n logger.warning(f\"Components {diff} for attribute {attr} of {cls_name} \"\n f\"are not in main components dataframe {list_name}\")\n\n attrs = network.components[cls_name]['attrs']\n expected_attrs = attrs[lambda ds: ds.type.str.contains('series')].index\n if attr not in expected_attrs:\n pnl[attr] = dataframe\n return\n\n attr_series = attrs.loc[attr]\n default = attr_series.default\n columns = dataframe.columns\n\n diff = network.snapshots.difference(dataframe.index)\n if len(diff):\n logger.warning(f\"Snapshots {diff} are missing from {attr} of {cls_name}.\"\n f\" Filling with default value '{default}'\")\n dataframe = dataframe.reindex(network.snapshots, fill_value=default)\n\n if not attr_series.static:\n pnl[attr] = pnl[attr].reindex(columns=df.index.union(columns), fill_value=default)\n else:\n pnl[attr] = pnl[attr].reindex(columns=(pnl[attr].columns.union(columns)))\n\n pnl[attr].loc[network.snapshots, columns] = dataframe.loc[network.snapshots, columns]", "sub_path": "pypsa/io TEST.py", "file_name": "io TEST.py", "file_ext": "py", "file_size_in_byte": 18693, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.Index", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 146, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 150, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "textwrap.dedent", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 278, "usage_type": "call"}, {"api_name": "os.path", "line_number": 278, "usage_type": "attribute"}, {"api_name": "numpy.sctypeDict.values", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.sctypeDict", "line_number": 301, "usage_type": "attribute"}, {"api_name": "pandas.MultiIndex", "line_number": 311, "usage_type": "attribute"}, {"api_name": "pandas.isnull", "line_number": 345, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 363, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 364, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 421, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 437, "usage_type": "call"}]} +{"seq_id": "580329585", "text": "\nimport requests\nimport json\n# requestlist = requests.get('https://github.com/PyCQA/bandit')\n\n# GET /repos/:owner/:repo/pulls\n\n\n\n# GET /repos/????/????/pulls\n\n# owner = \n# repo = \n\nrequestdata = requests.get('https://api.github.com/repos/PyCQA/bandit/pulls?state=open')\n\njsondata = (requestdata.text)\n\n#print(jsondata)\n\nloadeddata = json.loads(jsondata)\n\n\nfor id in loadeddata:\n print(id['title'])\n print(id['created_at']+' '+id['user']['login'])\n print(id['url'])\n print()\n", "sub_path": "HackerRank Java/src/PullRequestList.py", "file_name": "PullRequestList.py", "file_ext": "py", "file_size_in_byte": 487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "87877803", "text": "import discord\nimport asyncio\nimport datetime\nfrom discord.ext import commands\nfrom discord import Embed\nfrom pathlib import Path\nimport sys, traceback\nimport os\ntry:\n import config\nexcept ImportError:\n print(\"Couldn't import config.py\")\n\nbot = commands.Bot(command_prefix=os.getenv('prefix'), description='I ban people who deserves so...')\n\nstartup_extensions = [\"moderation\",\n \"info\"]\n\n@bot.event\nasync def on_ready():\n print(\"[Info] Bot startup done.\")\n channel = bot.get_channel(int(os.getenv('botlog')))\n await channel.send(\"**[Info]** Bot startup done.\")\n print(\"\\n\")\n await bot.change_presence(game=discord.Game(name=\"with the banhammer\"))\n\n@bot.event\nasync def on_command_error(ctx: commands.Context, error):\n if isinstance(error, commands.NoPrivateMessage):\n await ctx.send(\"This command cannot be used in private messages\")\n elif isinstance(error, commands.BotMissingPermissions):\n await ctx.send(embed=Embed(color=discord.Color.red(), description=\"I need the permission `Ban Members` to sync the bans!\"))\n elif isinstance(error, commands.MissingPermissions):\n await ctx.send(embed=Embed(color=discord.Color.red(), description=\"You are missing the permission `Ban Members`!\"))\n elif isinstance(error, commands.CheckFailure):\n return\n elif isinstance(error, commands.CommandOnCooldown):\n return\n elif isinstance(error, commands.MissingRequiredArgument):\n return\n elif isinstance(error, commands.BadArgument):\n return\n elif isinstance(error, commands.CommandNotFound):\n return\n else:\n await ctx.send(\"Something went wrong while executing that command... Sorry!\")\n channel = bot.get_channel(int(os.getenv('botlog')))\n await channel.send(\"**[ERROR]** %s\" % error)\n\n@bot.event\nasync def on_guild_join(guild):\n channel = bot.get_channel(int(os.getenv('botlog')))\n print(\"[Info] Joined a new guild (`%s` - `%s`)\" % (guild.name, guild.id))\n await channel.send(\"**[Info]** Joined a new guild (`%s` - `%s`)\" % (guild.name, guild.id))\n banguild = bot.get_guild(int(os.getenv('banlistguild')))\n ban_list = await banguild.bans()\n for BanEntry in ban_list:\n await guild.ban(BanEntry.user, reason=f\"WatchDog - Global Ban\")\n\n@bot.event\nasync def on_message(message:discord.Message):\n if message.author.bot:\n return\n ctx:commands.Context = await bot.get_context(message)\n if message.content.startswith(os.getenv('prefix')):\n if ctx.command is not None:\n print(\"[Command] %s (%s) just used the %s command in the guild %s (%s)\" % (ctx.author.name, ctx.author.id, ctx.invoked_with, ctx.guild.name, ctx.guild.id))\n channel = bot.get_channel(int(os.getenv('botlog')))\n await channel.send(\"**[Command]** `%s` (%s) used the `%s` command in the guild `%s` (%s), in the channel `%s` (%s)\" % (ctx.author.name, ctx.author.id, ctx.invoked_with, ctx.guild.name, ctx.guild.id, ctx.channel.name, ctx.channel.id))\n await bot.invoke(ctx)\n else:\n return\n\nif __name__ == '__main__':\n for extension in startup_extensions:\n try:\n bot.load_extension(f\"cogs.{extension}\")\n except Exception as e:\n print(f\"[ERROR] Failed to load extention {extension}.\", e)\n\nbot.run(os.getenv('token'))\n", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 3342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 14, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 22, "usage_type": "call"}, {"api_name": "discord.Game", "line_number": 25, "usage_type": "call"}, {"api_name": "discord.ext.commands.Context", "line_number": 28, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 28, "usage_type": "name"}, {"api_name": "discord.ext.commands.NoPrivateMessage", "line_number": 29, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 29, "usage_type": "name"}, {"api_name": "discord.ext.commands.BotMissingPermissions", "line_number": 31, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 31, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.Color.red", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 32, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.MissingPermissions", "line_number": 33, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 33, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 34, "usage_type": "call"}, {"api_name": "discord.Color.red", "line_number": 34, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 34, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.CheckFailure", "line_number": 35, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 35, "usage_type": "name"}, {"api_name": "discord.ext.commands.CommandOnCooldown", "line_number": 37, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 37, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingRequiredArgument", "line_number": 39, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 39, "usage_type": "name"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 41, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 41, "usage_type": "name"}, {"api_name": "discord.ext.commands.CommandNotFound", "line_number": 43, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 43, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 47, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 52, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 55, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 61, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Context", "line_number": 64, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 64, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 65, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 68, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "327578316", "text": "import tweepy \r\nimport yfinance as yf \r\nimport os \r\n \r\n# Authenticate to Twitter \r\n# Use real key values instead of placeholders \r\napi_key = \"your_api_key\" \r\napi_secret_key =\"your_api_secret_key\" \r\naccess_token = \"your_access_token\" \r\naccess_token_secret= \"your_secret_access_token\" \r\n \r\n# Call the variables using tweepy \r\nauth = tweepy.OAuthHandler(api_key, api_secret_key) \r\nauth.set_access_token(access_token, access_token_secret) \r\n \r\n# Retrieve the stock data \r\napi = tweepy.API(auth) \r\ndata = yf.download(tickers='JW-A', period='1D', interval='1D') \r\n \r\nrow = data.iloc[0] \r\nstatus = \"The open price of Wiley's stock is %s. The close price of Wiley's stock is %s. Volume recorded is %s.\" %(str(round(row['Open'], 2)),str(round(row['Close'], 2)),str(int(row['Volume']))) \r\napi.update_status(status) ", "sub_path": "post_stock.py", "file_name": "post_stock.py", "file_ext": "py", "file_size_in_byte": 805, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 13, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 17, "usage_type": "call"}, {"api_name": "yfinance.download", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "376053311", "text": "from typing import List\nfrom collections import Counter\n\nclass Solution:\n # https://leetcode.com/problems/permutations-ii/solution/\n # Backtracking with Groups of Numbers\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n ans = []\n self.helper(nums, [], Counter(nums), ans)\n return ans\n\n def helper(self, nums, element, counter, ans):\n if len(element) == len(nums):\n ans.append(element[::])\n return\n for num in counter:\n if counter[num] > 0:\n counter[num] -= 1\n element.append(num)\n self.helper(nums, element, counter, ans)\n element.pop()\n counter[num] += 1\n\n def permuteUnique2(self, nums: List[int]) -> List[List[int]]:\n results = []\n\n def backtrack(comb, counter):\n if len(comb) == len(nums):\n # make a deep copy of the resulting permutation,\n # since the permutation would be backtracked later.\n results.append(list(comb))\n return\n\n for num in counter:\n if counter[num] > 0:\n # add this number into the current combination\n comb.append(num)\n counter[num] -= 1\n # continue the exploration\n backtrack(comb, counter)\n # revert the choice for the next exploration\n comb.pop()\n counter[num] += 1\n\n backtrack([], Counter(nums))\n\n return results\n\n\nif __name__ == '__main__':\n solution = Solution()\n n = [1, 2, 1]\n result = solution.permuteUnique(n)\n print(result)\n\n result = solution.permuteUnique2(n)\n print(result)", "sub_path": "Backtracking/47-PermutationsII.py", "file_name": "47-PermutationsII.py", "file_ext": "py", "file_size_in_byte": 1778, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "typing.List", "line_number": 7, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 9, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "399040773", "text": "import pymongo\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\nmydb = myclient[\"Case-Base\"]\n\nmycol = mydb[\"cases\"]\n\nmydict = { \"classification\": \"1\", \"cancer\": \"1\" }\n\nx = mycol.insert_one(mydict)\n\nprint(myclient.list_database_names())", "sub_path": "CBR/db.py", "file_name": "db.py", "file_ext": "py", "file_size_in_byte": 249, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pymongo.MongoClient", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "477311081", "text": "from numpy import *\nimport matplotlib.pyplot as plt\n\ndef convertCoordinate(pX, pY):\n x = pX/631.0\n y = (548.0 - pY)* 0.5 * sqrt(3.0) / 548\n return x, y\ndef coordinates():\n pXY = {'A':[0,548],'B':[315.5,0],'C':[631,548],'D':[16,525],'E':[27,504],'F':[169,540],'G':[173,519],'H':[285,381],'I':[270,540],'J':[354,483],'K':[458,492],'L':[429,202],'M':[317,548],'N':[394,548],'P':[173,548],'Q':[266,548],'DF1':[39, 514],'DF2':[68, 509],'DF3':[100, 513],'DF4':[122, 519],'DF5':[147, 529],'EG1':[45, 503],'EG2':[64, 501],'EG3':[96, 503],'EG4':[129, 508],'EG5':[156, 514],'GH1':[181, 493],'GH2':[195, 468],'GH3':[209, 449],'GH4':[228, 428],'GH5':[260, 398],'GH6':[177,503],'GH7':[180,498],'GH8':[222,435],'GH9':[269,392],'HL1':[293, 365],'HL2':[314, 326],'HL3':[342, 283],'HL4':[381, 239],'HL5':[417, 210],'HL6':[340,286],'HL7':[343,282],'HL8':[307,337],'HL9':[293,367] }\n coors = {}\n for key in pXY.keys():\n pX, pY = pXY.get(key)\n x, y = convertCoordinate(pX, pY)\n coors[key] = [x, y]\n return coors\ndef getLinearInfo(p1, p2):\n coors = coordinates()\n p1 = coors[p1]\n p2 = coors[p2]\n k = (p2[1] - p1[1]) / (p2[0] - p1[0])\n b = p2[1] - k * p2[0]\n return {'k':k,'b':b, 'range':[min([p1[0],p2[0]]), max([p1[0],p2[0]])]}\ndef getCurve(p1, p2, power=2):\n # power是指拟合函数的最高次项,power=3是指拟合三次函数\n coors = coordinates()\n xList = [coors[p1][0], coors[p2][0]]\n yList = [coors[p1][1], coors[p2][1]]\n for key in coors.keys():\n if (p1 in key) and (p2 in key):\n xList.append(coors[key][0])\n yList.append(coors[key][1])\n xArr = array(xList)\n yArr = array(yList)\n tmp = [xArr ** 2, xArr, ones(len(xArr))]\n keys = ['kk','k','b']\n if power > 2:\n for i in range(3, power+1):\n tmp.insert(0, xArr**i)\n keys.insert(0, keys[0] + 'k')\n A = vstack(tmp).T\n res = linalg.lstsq(A, yArr)[0]\n paraDict = {}\n for i in range(len(res)):\n paraDict[keys[i]] = res[i]\n paraDict['range'] = [min(xList), max(xList)]\n return paraDict\ndef borders():\n AB = getLinearInfo('A', 'B')\n BC = getLinearInfo('B', 'C')\n AC = getLinearInfo('A', 'C')\n FG = getLinearInfo('F', 'G')\n FP = getLinearInfo('F', 'P')\n IQ = getLinearInfo('I', 'Q')\n FI = getLinearInfo('F', 'I')\n GI = getLinearInfo('G', 'I')\n IJ = getLinearInfo('I', 'J')\n MJ = getLinearInfo('M', 'J')\n HJ = getLinearInfo('H', 'J')\n HK = getLinearInfo('H', 'K')\n JK = getLinearInfo('J', 'K')\n JM = getLinearInfo('J', 'M')\n NK = getLinearInfo('N', 'K')\n CK = getLinearInfo('C', 'K')\n S_DF = getCurve('D', 'F', 3)\n S_EG = getCurve('E', 'G', 7)\n S_GH = getCurve('G', 'H', 3)\n S_HL = getCurve('H', 'L', 3)\n bordersDict = {'AB':AB, 'BC':BC, 'AC':AC, 'FG':FG, 'FP':FP, 'IQ':IQ, 'FI':FI, 'GI':GI, 'IJ':IJ, 'MJ':MJ, 'HJ':HJ, 'HK':HK, 'JK':JK, 'JM':JM, 'NK':NK, 'CK':CK, 'S_DF':S_DF, 'S_EG':S_EG, 'S_GH':S_GH, 'S_HL':S_HL}\n return bordersDict\ndef blocks():\n coors = coordinates()\n blocksDict = {}\n bordersList = []\n keys = []\n for i in range(11):\n keys.append('block'+str(i + 1))\n for i in range(len(keys)):\n bordersDict = borders()\n # criterion:\n # more: more\n # less: less and equal\n # for block1\n if i == 0:\n AB = bordersDict['AB']\n AB['criterion'] = 'less'\n AB['range'] = [coors['E'][0], coors['B'][0]]\n BC = bordersDict['BC']\n BC['criterion'] = 'less'\n BC['range'] = [coors['B'][0], coors['L'][0]]\n S_EG = bordersDict['S_EG']\n S_EG['criterion'] = 'more'\n S_GH = bordersDict['S_GH']\n S_GH['criterion'] = 'more'\n S_HL = bordersDict['S_HL']\n S_HL['criterion'] = 'more'\n bordersList = [AB, BC, S_EG, S_GH, S_HL]\n # for block2\n elif i == 1:\n BC = bordersDict['BC']\n BC['criterion'] = 'less'\n BC['range'] = [coors['L'][0], coors['C'][0]]\n S_HL = bordersDict['S_HL']\n S_HL['criterion'] = 'less'\n HK = bordersDict['HK']\n HK['criterion'] = 'more'\n CK = bordersDict['CK']\n CK['criterion'] = 'more'\n bordersList = [BC, S_HL, HK, CK]\n # for block3\n elif i == 2:\n S_GH = bordersDict['S_GH']\n S_GH['criterion'] = 'less'\n HJ = bordersDict['HJ']\n HJ['criterion'] = 'less'\n IJ = bordersDict['IJ']\n IJ['criterion'] = 'more'\n GI = bordersDict['GI']\n GI['criterion'] = 'more'\n bordersList = [S_GH, HJ, IJ, GI]\n # for block4\n elif i == 3:\n HJ = bordersDict['HJ']\n HJ['criterion'] = 'more'\n HK = bordersDict['HK']\n HK['criterion'] = 'less'\n JK = bordersDict['JK']\n JK['criterion'] = 'more'\n bordersList = [HJ, HK, JK]\n # for block5\n elif i == 4:\n AC = bordersDict['AC']\n AC['criterion'] = 'more'\n AC['range'] = [coors['N'][0], coors['C'][0]]\n NK = bordersDict['NK']\n NK['criterion'] = 'less'\n CK = bordersDict['CK']\n CK['criterion'] = 'less'\n bordersList = [AC, NK, CK]\n # for block6\n elif i == 5:\n AC = bordersDict['AC']\n AC['criterion'] = 'more'\n AC['range'] = [coors['M'][0], coors['N'][0]]\n JM = bordersDict['JM']\n JM['criterion'] = 'less'\n JK = bordersDict['JK']\n JK['criterion'] = 'less'\n NK = bordersDict['NK']\n NK['criterion'] = 'more'\n bordersList = [AC, JM, JK, NK]\n # for block7\n elif i == 6:\n IQ = bordersDict['IQ']\n IQ['criterion'] = 'less'\n IJ = bordersDict['IJ']\n IJ['criterion'] = 'less'\n JM = bordersDict['JM']\n JM['criterion'] = 'more'\n AC = bordersDict['AC']\n AC['criterion'] = 'more'\n AC['range'] = [coors['Q'][0], coors['M'][0]]\n bordersList = [IQ, IJ, JM, AC]\n # for block8\n elif i == 7:\n AC = bordersDict['AC']\n AC['criterion'] = 'more'\n AC['range'] = [coors['A'][0], coors['P'][0]]\n AB = bordersDict['AB']\n AB['criterion'] = 'less'\n AB['range'] = [coors['A'][0], coors['D'][0]]\n S_DF = bordersDict['S_DF']\n S_DF['criterion'] = 'less'\n FP = bordersDict['FP']\n FP['criterion'] = 'less'\n bordersList = [AC, AB, S_DF, FP]\n # for block9\n elif i == 8:\n FG = bordersDict['FG']\n FG['criterion'] = 'less'\n GI = bordersDict['GI']\n GI['criterion'] = 'less'\n FI = bordersDict['FI']\n FI['criterion'] = 'more'\n bordersList = [FG, GI, FI]\n # for block10\n elif i == 9:\n AB = bordersDict['AB']\n AB['criterion'] = 'less'\n AB['range'] = [coors['D'][0], coors['E'][0]]\n S_EG = bordersDict['S_EG']\n S_EG['criterion'] = 'less'\n FG = bordersDict['FG']\n FG['criterion'] = 'more'\n S_DF = bordersDict['S_DF']\n S_DF['criterion'] = 'more'\n bordersList = [AB, S_EG, FG, S_DF]\n # for block11\n elif i == 10:\n FP = bordersDict['FP']\n FP['criterion'] = 'more'\n FI = bordersDict['FI']\n FI['criterion'] = 'less'\n IQ = bordersDict['IQ']\n IQ['criterion'] = 'more'\n AC = bordersDict['AC']\n AC['criterion'] = 'more'\n AC['range'] = [coors['P'][0], coors['Q'][0]]\n bordersList = [FP, FI, IQ, AC]\n blocksDict[keys[i]] = bordersList\n return blocksDict\ndef decideBlock(x, y):\n blocksDict = blocks()\n keys = []\n for i in range(11):\n keys.append('block'+str(i + 1))\n for key in keys:\n block = blocksDict[key]\n resList = []\n for border in block:\n if (x >= border['range'][0]) and (x <= border['range'][1]):\n # print(border)\n resY = 0.0\n for k in border.keys():\n if 'k' in k:\n resY += border[k] * (x**len(k))\n if 'b' in k:\n resY += border['b']\n if ((border['criterion'] == 'more') and (round(y,6) >= resY)) or ((border['criterion'] == 'less') and (round(y,6) <= resY * 1.00001)):\n resList.append(True)\n else:\n resList.append(False)\n if (False not in resList) and (len(resList) >= 2):\n return keys.index(key) + 1\ndef plotBorders():\n ax = plt.subplot(111)\n bordersDict = borders()\n for key in bordersDict.keys():\n border = bordersDict[key]\n x = arange(0, 1.0, 0.001)\n yList = []\n for e in x:\n y = 0.0\n for a in border.keys():\n if 'k' in a:\n y += border[a] * (e ** len(a))\n if 'b' in a:\n y += border[a]\n yList.append(y)\n yArr = array(yList)\n plt.plot(x, yArr, label=\"Border=%s\" % (key,), linewidth=0.5)\n plt.xlim(0, 1.0)\n plt.ylim(0, 0.866)\n plt.legend()\n leg = plt.legend(loc=2, ncol=2)\n leg.get_frame().set_alpha(0)\n plt.show()\n\n# plotBorders()\n\n# x,y = convertCoordinate(195,543)\n# print('x',x,'y',y)\n\n# x = 0.523317108369\n# y = 0.825638987404\n# res = decideBlock(x,y)\n# borY = sqrt(3) * (1 - x)\n# print('result = ',res,'border = ',borY)\n", "sub_path": "python_src/ternaryUtil.py", "file_name": "ternaryUtil.py", "file_ext": "py", "file_size_in_byte": 9779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "matplotlib.pyplot.subplot", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 250, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}]} +{"seq_id": "412222894", "text": "import numpy as np\nimport torch.nn as nn\n\nfrom params import FEATURE_VALUE2ID, FEATURE_NUMBER\nfrom data.data_parser import Parser\nfrom data.data_splitter import Splitter\n\n\nclass DataGenerator(object):\n def __init__(self, splitter: Splitter, sample_space: int):\n self.label_size = splitter.get_label_size()\n self.data = splitter.load()\n self.name = splitter.name\n self.total_batch = 0\n self.sample_space = sample_space\n\n def generate(self, batch_size: int, mode: str):\n data = self.data[mode]\n np.random.shuffle(data)\n\n total_batch = (len(data) - 1) // batch_size + 1\n for batch_id in range(total_batch):\n begin = batch_id * batch_size\n end = (batch_id + 1) * batch_size\n batch_data = data[begin: end]\n\n batch_input, batch_label = [], []\n for per_data in batch_data:\n features = np.array(Parser.parse_train_file(per_data['path'])[::self.sample_space])\n # for col in range(features.shape[1]):\n # for row in range(features.shape[0]):\n # features[row, col] = FEATURE_VALUE2ID[col][features[row, col]]\n batch_input.append(features)\n batch_label.append(per_data['label'])\n yield np.array(batch_input), np.array(batch_label)\n\n def get_label_size(self) -> int:\n return self.label_size\n\n def get_total_batch(self, batch_size: int, mode: str) -> int:\n return (len(self.data[mode]) - 1) // batch_size + 1\n\n\nif __name__ == '__main__':\n splitter = Splitter(\n label_list=[0, 2, 7, 17],\n is_slice_data=False,\n is_create_negative_sample=True\n )\n splitter.split()\n generator = DataGenerator(splitter, 10)\n for idx, data in enumerate(generator.generate(batch_size=20)):\n print(idx, data[0].shape, data[1].shape)\n", "sub_path": "data/data_generator.py", "file_name": "data_generator.py", "file_ext": "py", "file_size_in_byte": 1894, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "data.data_splitter.Splitter", "line_number": 10, "usage_type": "name"}, {"api_name": "data.data_parser", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.random.shuffle", "line_number": 19, "usage_type": "call"}, {"api_name": "data.data_parser", "line_number": 19, "usage_type": "argument"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "data.data_parser", "line_number": 21, "usage_type": "argument"}, {"api_name": "data.data_parser", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "data.data_parser.Parser.parse_train_file", "line_number": 29, "usage_type": "call"}, {"api_name": "data.data_parser.Parser", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "data.data_splitter.Splitter", "line_number": 45, "usage_type": "call"}, {"api_name": "data.data_parser", "line_number": 52, "usage_type": "name"}, {"api_name": "data.data_parser", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "379219377", "text": "from django import template\n\nregister = template.Library()\n\n@register.filter\ndef obj_rev_msg_flg(key, val):\n '''目標・振り返り作成のメッセージを出すか判定する'''\n if val == \"1\":\n # 目標:目標入力済\n # 振り返り:目標入力済&振り返り入力済\n return False\n else:\n # 目標:未入力\n # 振り返り:目標入力済&振り返り未入力\n if key[:1] == \"T\" and key[2:] == \"R\":\n # 対象が今年・月・週・日かつ振り返りの場合はメッセージを出さない\n return False\n else:\n return True\n\n\n@register.filter\ndef create_obj_rev_msg(key):\n '''目標・振り返り作成のメッセージを作成する'''\n kind_dic = {\"Y\":\"年\",\"M\":\"月\",\"W\":\"週\",\"D\":\"日\"}\n or_dic = {\"O\":\"目標\",\"R\":\"振返り\"}\n prev_this_dic = {\"T\":\"今\",\"P\":\"前\"}\n return \"%s%sの%sが未作成です。\" % (prev_this_dic[key[:1]], kind_dic[key[1:2]], or_dic[key[2:]])\n\n@register.filter\ndef create_obj_rev_link(key, target_date):\n '''目標・振り返り作成のリンクを作成する'''\n return \"/objectives/objrev/%s/%s\" % (key, target_date)\n\n@register.filter\ndef get_checkbox(key):\n if key == \"1\":\n return \"fas fa-check-circle\"\n else:\n return \"far fa-circle\"\n\n@register.filter\ndef get_btn_class(key):\n if key == \"1\":\n return \"btn btn-success\"\n else:\n return \"btn btn-outline-secondary\"", "sub_path": "templatetags/cmn_filter.py", "file_name": "cmn_filter.py", "file_ext": "py", "file_size_in_byte": 1488, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.template.Library", "line_number": 3, "usage_type": "call"}, {"api_name": "django.template", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "612865451", "text": "\nfrom spa.serverside import CSocketProServer\nimport sys\nfrom ctypes import *\nfrom sys import platform as os\n\n_ussLib_ = None\n_IsWin_ = (os == \"win32\")\nif _IsWin_:\n _ussLib_ = WinDLL('ustreamfile.dll')\nelse:\n _ussLib_ = CDLL('libustreamfile.so')\n\n# void WINAPI SetRootDirectory(const wchar_t *pathRoot);\nSetRootDirectory = _ussLib_.SetRootDirectory\nSetRootDirectory.argtypes = [c_wchar_p]\nSetRootDirectory.restype = None\n\nwith CSocketProServer() as server:\n handle = CSocketProServer.DllManager.AddALibrary('ustreamfile')\n if handle:\n SetRootDirectory('C:\\\\boost_1_60_0\\\\stage\\\\lib64')\n ok = server.Run(20901)\n if not ok:\n print('Error message = ' + CSocketProServer.ErrorMessage)\n print('Read a line to shutdown the application ......')\n line = sys.stdin.readline()\n", "sub_path": "tutorials/python/remote_file/rf_server/program.py", "file_name": "program.py", "file_ext": "py", "file_size_in_byte": 804, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.platform", "line_number": 8, "usage_type": "name"}, {"api_name": "spa.serverside.CSocketProServer", "line_number": 19, "usage_type": "call"}, {"api_name": "spa.serverside.CSocketProServer.DllManager.AddALibrary", "line_number": 20, "usage_type": "call"}, {"api_name": "spa.serverside.CSocketProServer.DllManager", "line_number": 20, "usage_type": "attribute"}, {"api_name": "spa.serverside.CSocketProServer", "line_number": 20, "usage_type": "name"}, {"api_name": "spa.serverside.CSocketProServer.ErrorMessage", "line_number": 25, "usage_type": "attribute"}, {"api_name": "spa.serverside.CSocketProServer", "line_number": 25, "usage_type": "name"}, {"api_name": "sys.stdin.readline", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "416435846", "text": "from splinter import Browser\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport datetime as dt\nimport time\n# import warnings\n\n# warnings.filterwarnings('ignore')\n\n\n\ndef scrape_all():\n\n # Initiate headless driver for deployment\n executable_path = {'executable_path': 'chromedriver'}\n browser = Browser('chrome', **executable_path)\n \n news_title, news_paragraph = mars_news(browser)\n\n # Run all scraping functions and store in dictionary.\n data = {\n \"news_title\": news_title,\n \"news_paragraph\": news_paragraph,\n \"featured_image\": featured_image(browser),\n \"hemispheres\": hemispheres(browser),\n \"weather\": twitter_weather(browser),\n \"facts\": mars_facts(),\n \"last_modified\": dt.datetime.now()\n }\n\n # Stop webdriver and return data\n browser.quit()\n return data\n\n\ndef mars_news(browser):\n url = \"https://mars.nasa.gov/news/\"\n browser.visit(url)\n #write your code here\n # Optional delay for loading the page\n browser.is_element_present_by_css(\"ul.item_list li.slide\", wait_time=1)\n # HTML Object\n html = browser.html\n # Parse HTML with Beautiful Soup\n soup = bs(html, 'html.parser')\n # Retrieve the latest element that contains news title and news_paragraph\n new_soup = soup.select_one(\"ul.item_list li.slide\")\n news_title = new_soup.find('div', class_='content_title').find('a').text\n news_paragraph = new_soup.find('div', class_='article_teaser_body').text\n \n return news_title, news_paragraph\n\n\ndef featured_image(browser):\n url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(url)\n\n # HTML Object \n html_image = browser.html\n\n # Parse HTML with Beautiful Soup\n soup = bs(html_image, \"html.parser\")\n\n # Retrieve background-image url from style tag \n image_url = soup.find('article')['style'].replace('background-image: url(','').replace(');', '')[1:-1]\n\n # Website Url \n main_url = \"https://www.jpl.nasa.gov\"\n\n # Concatenate website url with scrapped route\n image_url = main_url + image_url\n\n return image_url\n\n\ndef hemispheres(browser):\n\n # A way to break up long strings\n url = (\n \"https://astrogeology.usgs.gov/search/\"\n \"results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n )\n\n browser.visit(url)\n\n html = browser.html\n\n # Parse HTML with Beautiful Soup\n soup = bs(html, \"html.parser\")\n\n # Create dictionary to store titles & links to images\n hemisphere_image_urls = []\n\n # Retrieve all elements that contain image information\n results = soup.find(\"div\", class_ = \"result-list\" )\n hemispheres = results.find_all(\"div\", class_=\"item\")\n\n # Iterate through each image\n for hemisphere in hemispheres:\n title = hemisphere.find(\"h3\").text\n title = title.replace(\"Enhanced\", \"\")\n end_link = hemisphere.find(\"a\")[\"href\"]\n image_link = \"https://astrogeology.usgs.gov/\" + end_link \n browser.visit(image_link)\n html = browser.html\n soup = bs(html, \"html.parser\")\n downloads = soup.find(\"div\", class_=\"downloads\")\n image_url = downloads.find(\"a\")[\"href\"]\n hemisphere_image_urls.append({\"title\": title, \"img_url\": image_url})\n\n return hemisphere_image_urls\n\n\ndef twitter_weather(browser):\n url = \"https://twitter.com/marswxreport?lang=en\"\n browser.visit(url)\n\n # wait couple of seconds to load the page. Otherwise search for weather tweets returns null.\n time.sleep(2)\n\n html = browser.html\n soup = bs(html, \"html.parser\")\n\n # Find all elements that contain tweets \n latest_tweets = soup.find_all('div', class_='css-901oao r-hkyrab r-1qd0xha r-a023e6 r-16dba41 r-ad9z0x r-bcqeeo r-bnwqim r-qvutc0')\n # # Retrieve all elements that contain news title in the specified range\n # # Look for entries that display weather related words to exclude non weather related tweets \n\n for tweet in latest_tweets:\n if 'sol' and 'pressure' in tweet.text:\n mars_weather = tweet.text\n break\n else: \n pass\n \n return mars_weather\n\ndef mars_facts():\n try:\n df = pd.read_html(\"http://space-facts.com/mars/\")[0]\n except BaseException:\n return None\n\n df.columns = [\"Description\", \"Value\"]\n # df.set_index(\"description\", inplace=True)\n\n # Add some bootstrap styling to \n return df.to_html(classes=\"table table-striped\", index=False)\n\n\nif __name__ == \"__main__\":\n\n # If running as script, print scraped data\n print(scrape_all())", "sub_path": "Mission_to_Mars/app/scrape_mars.py", "file_name": "scrape_mars.py", "file_ext": "py", "file_size_in_byte": 4562, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "splinter.Browser", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 45, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 62, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 89, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 106, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 119, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "25184733", "text": "import pygame\nheight = 800\nwidth = 600\ndisplay = pygame.display.set_mode((height, width)) #design game window\n\t\t#r g b\nblack = (0, 0, 0)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\nwhite = (255, 255, 255)\ngrey = (122, 122, 122)\n\nx = height / 2\nx_speed = 0\ny = width / 2\ny_speed = 0\nsize = 20\ngame_on = True\n#gameloop\nwhile game_on:\n\tfor event in pygame.event.get():\n\t\t# print(event)\n\t\tif event.type == pygame.QUIT:\n\t\t\tpygame.quit()\n\t\t\tquit()\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == pygame.K_RIGHT:\n\t\t\t\tx_speed = size\n\n\t\t\tif event.key == pygame.K_LEFT:\n\t\t\t\tx_speed = -size\t\n\n\t\t\tif event.key == pygame.K_UP:\n\t\t\t\ty_speed = -size\n\n\t\t\tif event.key == pygame.K_DOWN:\n\t\t\t\ty_speed = size\n\n\tx += x_speed\n\ty += y_speeḍ\n\tdisplay.fill(black)\n\tdisplay.fill(red, rect=[x, y, size, size])\n\tpygame.display.update()\n\tpygame.time.Clock().tick(40)\n\n", "sub_path": "tut3.py", "file_name": "tut3.py", "file_ext": "py", "file_size_in_byte": 861, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pygame.display.set_mode", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "497137892", "text": "\"\"\" iframe-eventsource transport \"\"\"\nimport asyncio\nfrom aiohttp import web, hdrs\nfrom sockjs.protocol import STATE_CLOSING, close_frame\nfrom sockjs.exceptions import SessionIsAcquired\n\nfrom .utils import session_cookie\nfrom .xhrstreaming import StreamingTransport\n\n\nclass EventsourceTransport(StreamingTransport):\n\n @asyncio.coroutine\n def send_blob(self, blob):\n blob = b''.join((b'data: ', blob, b'\\r\\n\\r\\n'))\n yield from self.response.write(blob)\n\n self.size += len(blob)\n if self.size > self.maxsize:\n yield from self.manager.release(self.session)\n self.waiter.set_result(True)\n\n def process(self):\n headers = list(\n ((hdrs.CONTENT_TYPE, 'text/event-stream; charset=UTF-8'),\n (hdrs.CACHE_CONTROL,\n 'no-store, no-cache, must-revalidate, max-age=0')) +\n session_cookie(self.request))\n\n # open sequence (sockjs protocol)\n resp = self.response = web.StreamResponse(headers=headers)\n resp.start(self.request)\n resp.write(b'\\r\\n')\n\n # get session\n session = self.session\n\n # session was interrupted\n if session.interrupted:\n self.send_blob(close_frame(1002, b\"Connection interrupted\"))\n\n # session is closed\n elif session.state == STATE_CLOSING:\n yield from self.session._remote_closed()\n self.send_blob(close_frame(3000, b'Go away!'))\n\n else:\n # acquire session\n try:\n yield from self.manager.acquire(self.session, self)\n except SessionIsAcquired:\n yield from self.send_blob(\n close_frame(2010, b\"Another connection still open\"))\n else:\n yield from self.waiter\n\n return resp\n", "sub_path": "sockjs/transports/eventsource.py", "file_name": "eventsource.py", "file_ext": "py", "file_size_in_byte": 1813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "xhrstreaming.StreamingTransport", "line_number": 11, "usage_type": "name"}, {"api_name": "asyncio.coroutine", "line_number": 13, "usage_type": "attribute"}, {"api_name": "aiohttp.hdrs.CONTENT_TYPE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "aiohttp.hdrs", "line_number": 25, "usage_type": "name"}, {"api_name": "aiohttp.hdrs.CACHE_CONTROL", "line_number": 26, "usage_type": "attribute"}, {"api_name": "aiohttp.hdrs", "line_number": 26, "usage_type": "name"}, {"api_name": "utils.session_cookie", "line_number": 28, "usage_type": "call"}, {"api_name": "aiohttp.web.StreamResponse", "line_number": 31, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 31, "usage_type": "name"}, {"api_name": "sockjs.protocol.close_frame", "line_number": 40, "usage_type": "call"}, {"api_name": "sockjs.protocol.STATE_CLOSING", "line_number": 43, "usage_type": "name"}, {"api_name": "sockjs.protocol.close_frame", "line_number": 45, "usage_type": "call"}, {"api_name": "sockjs.exceptions.SessionIsAcquired", "line_number": 51, "usage_type": "name"}, {"api_name": "sockjs.protocol.close_frame", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "321859277", "text": "from perceptron import Perceptron\nimport numpy as np\nfrom utils import gens\nfrom time import gmtime, strftime\nfrom os import getcwd\n\ndef build_adjacency_list (shape):\n adj_list = [[] for node_id in xrange(sum(shape))]\n \n if len(shape) == 1: return adj_list\n \n node_start = 0\n for size_cur, size_next in zip(shape[:-1], shape[1:]):\n node_finish = node_start + size_cur\n for node_id in xrange(node_start, node_finish):\n adj_list[node_id] = range(node_finish, node_finish+size_next)\n node_start = node_finish\n return adj_list\n\ndef build_reverse_adjacency_list (shape):\n r_adj_list = [[] for node_id in xrange(sum(shape))]\n if len(shape) == 1: return r_adj_list\n \n node_start = sum(shape)-1\n \n r_shape = shape[::-1]\n for size_prev, size_cur in zip(r_shape[1:], r_shape[:-1]):\n node_finish = node_start - size_cur\n for node_id in range(node_start, node_finish, -1):\n r_adj_list[node_id] = range(node_finish-size_prev + 1, node_finish+1)\n node_start = node_finish\n return r_adj_list\n\nclass NeuralNetwork (object):\n \n def __init__ (self, number_of_inputs, shape, filename = None):\n\n \n self.shape = shape\n \n self.num_inputs = number_of_inputs\n \n self.node_list = [Perceptron(node_id, size) for node_id, size in gens.id_inputs(number_of_inputs, shape)]\n \n self.adj_list = build_adjacency_list(shape)\n self.r_adj_list = build_reverse_adjacency_list(shape)\n \n self.node_output = [None for node_id in self.node_list]\n \n self.input_ids = [node_id for node_id in gens.id_empty(self.r_adj_list)]\n self.output_ids = [node_id for node_id in gens.id_empty(self.adj_list)]\n \n if filename is None:\n time_str = strftime(\"%H-%M-%S_%d-%b-%Y\", gmtime())\n #self.f = open(getcwd()+'/'+time_str+'.csv', 'w') # self enforcing csv convention\n else: self.f = open(filename + '.csv', 'w')\n \n def feed_forward (self, X):\n for node_id, edges in enumerate(self.r_adj_list):\n if len(edges) == 0: input_value = X\n else: input_value = [self.node_output[edge_id] for edge_id in edges]\n \n self.node_output[node_id] = self.node_list[node_id].output(input_value)\n \n def output (self, X):\n self.feed_forward(X)\n return np.array([self.node_output[node_id] for node_id in self.output_ids])\n", "sub_path": "src/nn/neural_network.py", "file_name": "neural_network.py", "file_ext": "py", "file_size_in_byte": 2487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "perceptron.Perceptron", "line_number": 43, "usage_type": "call"}, {"api_name": "utils.gens.id_inputs", "line_number": 43, "usage_type": "call"}, {"api_name": "utils.gens", "line_number": 43, "usage_type": "name"}, {"api_name": "utils.gens.id_empty", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.gens", "line_number": 50, "usage_type": "name"}, {"api_name": "utils.gens.id_empty", "line_number": 51, "usage_type": "call"}, {"api_name": "utils.gens", "line_number": 51, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 54, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "454662445", "text": "import secret\nimport platform\nimport os\nfrom selenium import webdriver\nimport time\n\ndriver = webdriver.PhantomJS(executable_path=\"D:/工作/tophatter/phantomjs-2.1.1-windows/bin/phantomjs\")\nprint(123)\ndriver.get(\"https://tophatter.com/\")\n\n# 添加cookie\ncookie_list = {}\nfor part in secret.cookie.split('; '):\n kv = part.split('=', 1)\n # d = {kv[0]: kv[1]}\n cookie_list[kv[0]] = kv[1]\n\nfor item in cookie_list:\n driver.add_cookie({\n 'domain': '.tophatter.com',\n 'name': item,\n 'value': cookie_list[item],\n 'path': '/',\n 'expires': None,\n 'httponly': False,\n 'secure': False,\n })\n# driver.add_cookie(cookie_list)\ndriver.get(\"https://tophatter.com/\")\n\nif not os.path.exists('home_products'):\n os.makedirs('home_products')\n\nwhile True:\n print(driver.page_source)\n time.sleep(15)\n # with open('home_products/{}.html'.format(time.strftime(\"%Y-%m-%d %H.%M.%S\", time.localtime())), 'w', encoding='utf-8') as f:\n # f.write(driver.execute_script(\n # \"return document.documentElement.outerHTML\"))\n print(time.strftime(\"%Y-%m-%d %H.%M.%S\", time.localtime()))\n driver.refresh()\n", "sub_path": "4.py", "file_name": "4.py", "file_ext": "py", "file_size_in_byte": 1166, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "selenium.webdriver.PhantomJS", "line_number": 7, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 7, "usage_type": "name"}, {"api_name": "secret.cookie.split", "line_number": 13, "usage_type": "call"}, {"api_name": "secret.cookie", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 40, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "305106740", "text": "from keras.models import load_model\r\nfrom keras.preprocessing import image\r\nimport numpy as np\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\n\r\n\r\n# dimensions of our images\r\nimg_width, img_height = 64, 64\r\n\r\n# load the model we saved\r\nmodel = load_model('model.h5')\r\nmodel.compile(loss='binary_crossentropy',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\nmypath = \"predict/\"\r\nonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\r\nprint(onlyfiles)\r\n\r\n\r\n# predicting images\r\nyes_counter = 0 \r\nno_counter = 0\r\nfor file in onlyfiles:\r\n img = image.load_img(mypath+file, target_size=(img_width, img_height))\r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n \r\n images = np.vstack([x])\r\n classes = model.predict_classes(images, batch_size=10)\r\n classes = classes[0][0]\r\n \r\n if classes == 0:\r\n print(file + \": \" + 'Congratulations, You dont have brain tumor')\r\n no_counter += 1\r\n else:\r\n print(file + \": \" + 'Sorry, You Have brain bumor')\r\n yes_counter += 1\r\nprint(\"Brain_Tumor_Yes :\",yes_counter)\r\nprint(\"Brain_Tumor_No :\",no_counter)", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "keras.models.load_model", "line_number": 12, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 26, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "62581812", "text": "#!/usr/bin/python3\nimport struct\nimport sys\nimport pprint\nimport zlib\nfrom .resource import ResourceID, ResourceFilter\nfrom collections import namedtuple\n\nclass FormatException(Exception):\n pass\n\n# TODO: also store a reference to the DBPF that this entry is from\nclass IndexEntry(namedtuple(\"IndexEntry\", 'id offset size size_decompressed compression')):\n @property\n def deleted(self):\n return self.compression[0] == 0xFFE0\n\nfoo = [None]\nclass DBPFFile:\n \"\"\"A Sims4 DBPF file. This is the format in Sims4 packages, worlds, etc\"\"\"\n _CONST_TYPE = 1\n _CONST_GROUP = 2\n _CONST_INSTAMCE_EX = 4\n \n def __init__(self, name, prescan_index=True):\n self.file = open(name, \"rb\")\n self._index_cache = None\n self._read_header()\n if prescan_index:\n self._index_cache = list(self.scan_index(full_entries=True))\n def _read_header(self):\n\n self.file.seek(0)\n buf = self.file.read(96)\n\n if buf[0:4] != b\"DBPF\":\n raise FormatException(\"Wrong magic\")\n self.file_version = struct.unpack_from(\"=II\", buf, 4)\n if self.file_version != (2,1):\n raise FormatException(\"Don't know how to handle anything other than DBPF v2.1\")\n\n # TODO: check the accuracy of this; it's based on code I had\n # lying around for Sims3 DBPF files\n self._index_count, self._index_size, self._index_vsn, self._index_off = struct.unpack_from(\"=I4xI12xII\", buf, 36)\n\n def _get_dword(self, dword=struct.Struct(\"=I\")):\n \"\"\"This is only ever intended to be called with no arguments; the\n dword kwarg is a function static\"\"\"\n return dword.unpack(self.file.read(4))[0]\n def scan_index(self, filter=None, full_entries=False):\n \"\"\"Iterate over the items that match a filter\"\"\"\n if full_entries:\n xform = lambda x: x\n else:\n xform = lambda x: x.id\n if self._index_cache is not None:\n if filter is None:\n for x in self._index_cache:\n yield xform(x)\n else:\n for x in self._index_cache:\n if filter.match(x.id):\n yield xform(x)\n return\n if self._index_off == 0:\n if self._index_count == 0:\n # Empty DBPF file.\n return\n else:\n raise FormatException(\"Missing index\")\n self.file.seek(self._index_off)\n flags = self._get_dword()\n\n if flags & self._CONST_TYPE:\n entry_type = self._get_dword()\n if flags & self._CONST_GROUP:\n entry_group = self._get_dword()\n if flags & self._CONST_INSTAMCE_EX:\n entry_instance_ex = self._get_dword()\n\n for n in range(self._index_count):\n if not flags & self._CONST_TYPE:\n entry_type = self._get_dword()\n if not flags & self._CONST_GROUP:\n entry_group = self._get_dword()\n if not flags & self._CONST_INSTAMCE_EX:\n entry_instance_ex = self._get_dword()\n entry_instance = self._get_dword()\n entry_offset = self._get_dword()\n entry_size = self._get_dword()\n entry_size_decompressed = self._get_dword()\n if entry_size & 0x80000000:\n entry_compressed = struct.unpack(\"=HH\", self.file.read(4))\n else:\n entry_compressed = (0,1)\n entry_size = entry_size & 0x7FFFFFFF\n rid = ResourceID(entry_group, (entry_instance_ex << 32) | entry_instance, entry_type)\n if filter is None or filter.match(rid):\n # The process of reading the index may be interleaved\n # with reading the contents of the file. This way, we\n # don't lose the file pointer\n cur_pos = self.file.tell()\n if full_entries:\n yield IndexEntry(rid, entry_offset, entry_size,\n entry_size_decompressed, entry_compressed)\n else:\n yield rid\n self.file.seek(cur_pos)\n\n \n def __getitem__(self, item):\n if isinstance(item, int):\n item = self._index_cache[item]\n elif not isinstance(item, IndexEntry):\n # It must be a filter\n itemlist = self.scan_index(item, full_entries=True)\n try:\n item = next(itemlist)\n except StopIteration:\n raise KeyError(\"No item found\")\n try:\n next(itemlist)\n except StopIteration:\n pass\n else:\n raise KeyError(\"More than one item found\")\n # At this point, we know that the item is an IndexEntry;\n # hopefully it is one that refers to this file ;-)\n self.file.seek(item.offset)\n ibuf = self.file.read(item.size)\n\n if item.compression[0] == 0:\n return ibuf # uncompressed\n elif item.compression[0] == 0xFFFE:\n # BUG: I'm guessing \"streamable compression\" is the same\n # as RefPack, with a limited buffer size. This may or may\n # not be true, and even if it is, I'd need to know the\n # size of the buffer to do anything sensible.\n return decodeRefPack(ibuf)\n elif item.compression[0] == 0xFFFF:\n return decodeRefPack(ibuf)\n elif item.deleted:\n raise KeyError(\"Deleted file\")\n elif item.compression[0] == 0x5A42:\n # BUG: Not sure if the gzip header is needed. If it is,\n # change -15 in the next line to 15\n return zlib.decompress(ibuf, 15, item.size_decompressed)\n\ndef decodeRefPack(ibuf):\n \"\"\"Decode the DBPF compression. ibuf must quack like a bytes\"\"\"\n # Based on http://simswiki.info/wiki.php?title=Sims_3:DBPF/Compression\n # Sims4 compression has the first two bytes swapped\n \n iptr = optr = 0\n flags = ibuf[0]\n if ibuf[1] != 0xFB:\n raise FormatException(\"Invalid compressed data\")\n iptr = 2\n osize = 0 # output size\n for _ in range(4 if flags & 0x80 else 3):\n osize = (osize << 8) | ibuf[iptr]\n iptr += 1\n\n obuf = bytearray(osize)\n while iptr < len(ibuf):\n numPlaintext = numToCopy = copyOffset = 0\n # Copyoffset is 0-indexed back from obuf[optr]\n # I.e., copyoffset=0 ==> copying starts at obuf[optr-1]\n \n # Read a control code\n cc0 = ibuf[iptr]; iptr+=1\n if cc0 <= 0x7F:\n cc1 = ibuf[iptr]; iptr+=1\n cc = (cc0,cc1)\n numPlaintext = cc0 & 0x03\n numToCopy = ((cc0 & 0x1C) >> 2) + 3\n copyOffset = ((cc0 & 0x60) << 3) + cc1\n elif cc0 <= 0xBF:\n cc1 = ibuf[iptr]; iptr+=1\n cc2 = ibuf[iptr]; iptr+=1\n cc = (cc0,cc1,cc2)\n numPlaintext = (cc1 & 0xC0) >> 6\n numToCopy = (cc0 & 0x3F) + 4\n copyOffset = ((cc1 & 0x3F) << 8) + cc2\n elif cc0 <= 0xDF:\n cc1 = ibuf[iptr]; iptr+=1\n cc2 = ibuf[iptr]; iptr+=1\n cc3 = ibuf[iptr]; iptr+=1\n cc = (cc0,cc1,cc2,cc3)\n numPlaintext = cc0 & 0x03\n numToCopy = ((cc0 & 0x0C) << 6) + cc3 + 5\n copyOffset = ((cc0 & 0x10) << 12) + (cc1 << 8) + cc2\n elif cc0 <= 0xFB:\n cc = (cc0,)\n numPlaintext = ((cc0 & 0x1F) << 2) + 4\n numToCopy = 0\n else:\n cc = (cc0,)\n numPlaintext = cc0 & 3\n numToCopy = 0\n\n # Copy from source\n obuf[optr:optr+numPlaintext] = ibuf[iptr:iptr+numPlaintext]\n iptr += numPlaintext\n optr += numPlaintext\n\n # Copy from output\n for _ in range(numToCopy):\n obuf[optr] = obuf[optr - 1 - copyOffset]\n optr += 1\n # Done decompressing\n return bytes(obuf)\n\n \n", "sub_path": "lib/s4py/dbpf.py", "file_name": "dbpf.py", "file_ext": "py", "file_size_in_byte": 7957, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "collections.namedtuple", "line_number": 13, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 38, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 44, "usage_type": "call"}, {"api_name": "struct.Struct", "line_number": 46, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 93, "usage_type": "call"}, {"api_name": "resource.ResourceID", "line_number": 97, "usage_type": "call"}, {"api_name": "zlib.decompress", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "1869997", "text": "from django.test import TestCase\n\n# Create your tests here.\nfrom blog.forms import CommentForm, BloggerSignUpForm, DeleteUserForm\n\n\n\"\"\"\nCOMMENT FORM\n\"\"\"\n\nclass CommentFormTest(TestCase):\n\n def test_commentform_comment_field_label(self):\n form = CommentForm()\n self.assertTrue(\n form.fields['comment'].label == 'Comment' or\n form.fields['comment'].label == None\n )\n\n def test_commentform_comment_valid(self):\n comment = 'This is a comment'\n form = CommentForm(data={'comment': comment})\n self.assertTrue(form.is_valid())\n\n def test_commentform_blank_invalid(self):\n comment = ''\n form = CommentForm(data={'comment': comment})\n self.assertFalse(form.is_valid())\n\n\"\"\"\nBLOGGER SIGN UP FORM\n\"\"\"\n\nclass BloggerSignUpFormTest(TestCase):\n\n \"\"\"\n LABELS\n \"\"\"\n\n def test_signupform_first_name_label(self):\n form = BloggerSignUpForm()\n self.assertTrue(\n form.fields['first_name'].label == 'First name' or\n form.fields['first_name'].label == None\n )\n\n def test_signupform_last_name_label(self):\n form = BloggerSignUpForm()\n self.assertTrue(\n form.fields['last_name'].label == 'Last name' or\n form.fields['last_name'].label == None\n )\n\n def test_signupform_nickname_label(self):\n form = BloggerSignUpForm()\n self.assertTrue(\n form.fields['nickname'].label == 'Nickname' or\n form.fields['nickname'].label == None\n )\n\n def test_signupform_bio_label(self):\n form = BloggerSignUpForm()\n self.assertTrue(\n form.fields['bio'].label == 'Bio' or \n form.fields['bio'].label == None\n )\n\n \"\"\"\n VALIDATION\n \"\"\"\n\n def test_signupform_valid(self):\n form = BloggerSignUpForm(\n data={\n 'first_name': 'John',\n 'last_name': 'Doe',\n 'nickname': 'johnnyboy',\n 'bio': \"John's biography\"\n }\n )\n self.assertTrue(form.is_valid())\n\n\"\"\"\nDELETE USER FORM\n\"\"\"\n\nclass DeleteUserFormTest(TestCase):\n\n \"\"\"\n LABELS\n \"\"\"\n\n def test_deleteuserform_username_label(self):\n form = DeleteUserForm()\n self.assertTrue(\n form.fields['username'].label == 'Username' or\n form.fields['username'].label == None\n \n )\n", "sub_path": "blog/tests/test_forms.py", "file_name": "test_forms.py", "file_ext": "py", "file_size_in_byte": 2425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.test.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "blog.forms.CommentForm", "line_number": 14, "usage_type": "call"}, {"api_name": "blog.forms.CommentForm", "line_number": 22, "usage_type": "call"}, {"api_name": "blog.forms.CommentForm", "line_number": 27, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 34, "usage_type": "name"}, {"api_name": "blog.forms.BloggerSignUpForm", "line_number": 41, "usage_type": "call"}, {"api_name": "blog.forms.BloggerSignUpForm", "line_number": 48, "usage_type": "call"}, {"api_name": "blog.forms.BloggerSignUpForm", "line_number": 55, "usage_type": "call"}, {"api_name": "blog.forms.BloggerSignUpForm", "line_number": 62, "usage_type": "call"}, {"api_name": "blog.forms.BloggerSignUpForm", "line_number": 73, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 87, "usage_type": "name"}, {"api_name": "blog.forms.DeleteUserForm", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "521177120", "text": "#!/usr/bin/python\r\n\r\n#coding=utf-8\r\n\r\n\r\nimport cgitb\r\ncgitb.enable()\r\n\r\nimport cgi\r\nform = cgi.FieldStorage()\r\n\r\n\"\"\"\r\nexecfile('product_functions.py');\r\nform=CgiForm();\r\nform.fromUrl('http://localhost/cgi-bin/cgi_main.py?workspace=food-groceries');\r\nform.fromUrl('http://localhost/cgi-bin/cgi_main.py?workspace=blog-electronics');\r\nform.fromUrl('http://localhost/cgi-bin/cgi_main.py');\r\nform.fromUrl('http://localhost/cgi-bin/cgi_main.py?topic_id=92b93920d6a4007084b0ef87fc8d44a7%2D0');\r\n\r\n\"\"\"\r\nworkspace=form.getvalue('workspace');\r\nif workspace==None: workspace=\"\";\r\n\r\n\r\nimport os; \r\nPATH_CGI_DIR=os.getcwd()+\"/\";\r\nif PATH_CGI_DIR.find('cgi-bin/')<0: PATH_CGI_DIR+=\"cgi-bin/\";\r\n\r\nexecfile(PATH_CGI_DIR+\"load_data.py\");\r\n\r\n\r\nimport time;\r\nfrom datetime import datetime;\r\nworkspaces=[('blog-electronics','Home Electronics'), \r\n\t\t\t('food-groceries', 'Food & Groceries'), \r\n\t\t\t('health-vitamin', 'Health & Vitamins'),\r\n\t\t\t('music-blog', 'Book/Music/Video'),\r\n\t\t\t('health-beauty', 'Health & Beauty'),\r\n\t\t\t];\r\n\r\ntitle=\"Trending Topics\"\r\n\r\n\r\ntopic_id=form.getvalue('topic_id'); \r\nshow_multiple_stories=False;\r\n\r\nnextPageUrl='';\r\nif workspace:\r\n\tnextPageUrl+='workspace='+workspace;\r\n\r\ncurPage=1;\r\ntry:\r\n\tcurPage=int(form.getvalue('page'));\r\nexcept:\r\n\tpass;\r\n\r\nperPage=50; #the max number of entries per page.\r\nmaxCount=0; #the max number of entries in solr, in order to do the pagination.\r\ntotalPage=0; #total page.\r\npageStartDate='';\r\npageEndDate='';\r\n\r\nentries=[];\r\ntop_products=defaultdict(list); \r\nif topic_id: \r\n\t# ------------ show one topics ------------\r\n\tp,s=solr_query_var({'id':topic_id, }, perpage=50, page=0, to_print=0);\r\n\tif p:\r\n\t\tworkspace=p[0]['category'].split('__')[0];\r\n\t\tcategory_id=p[0]['category'];\r\n\t\tprodname=p[0]['name'];\r\n\t\tfor p1 in p: \r\n\t\t\tentries.append(decode_post_data(p1['data_t']));\r\n\t\t\tentries[-1]['id']=p1['id'];\r\n\t\t\tif 'date_dt' in p1.keys(): \r\n\t\t\t\tentries[-1]['date_dt']=strptime(p1['date_dt'], '%Y-%m-%dT%H:%M:%SZ');\r\n\t\t\telse: #for old data without date_dt, use updated date\r\n\t\t\t\tentries[-1]['date_dt']=entries[-1]['date'];\r\n\t\tif show_multiple_stories: \r\n\t\t\tp,s=solr_query_var({'category':category_id, 'name':prodname}, perpage=50, page=0, to_print=0);\r\n\t\t\tfor p1 in p: \r\n\t\t\t\tentries.append(decode_post_data(p1['data_t']));\r\n\t\t\t\tentries[-1]['id']=p1['id'];\r\nelse:\r\n\t# ------------ list all topics ------------\r\n\tfor w,n in workspaces: \r\n\t\tif not workspace or workspace == w: \r\n\t\t\tp,s=solr_query_var({'category':w+'__top_products', }, perpage=50, page=curPage-1, to_print=0, params='sort=date_dt+desc');\r\n\t\t\tif len(p):\r\n\t\t\t\tpageStartDate=p[0]['date_s'];\r\n\t\t\t\tpageEndDate=p[-1]['date_s'];\r\n\t\t\tif int(s['response']['numFound']) > maxCount:\r\n\t\t\t\tmaxCount = int(s['response']['numFound']);\r\n\t\t\tfor p1 in p: \r\n\t\t\t\ttop_products[w].append(decode_post_data(p1['data_t']));\r\n\t\t\t\ttop_products[w][-1]['id']=p1['id'];\r\n\t\t\t\tif 'date_dt' in p1.keys(): \r\n\t\t\t\t\ttop_products[w][-1]['date_dt']=strptime(p1['date_dt'], '%Y-%m-%dT%H:%M:%SZ');\r\n\t\t\t\telse: #for old data without date_dt, use updated date\r\n\t\t\t\t\ttop_products[w][-1]['date_dt']=top_products[w][-1]['date'];\r\n\t#\r\n\tblack_list=['doubleclick.net', 'logo', '/ad/', '125x125'];\r\n\timg_min_size=200;\r\n\tadded_entries=[];\r\n\tfor w,n in workspaces:\r\n\t\tfor t in top_products[w]:\r\n\t\t\tif t['product'].lower() not in added_entries and ( \\\r\n\t\t\t\t\t(t['type_image']['imgwidth'] and int(t['type_image']['imgwidth'])>=img_min_size and t['type_image']['imgheight'] and int(t['type_image']['imgheight'])>=img_min_size) \\\r\n\t\t\t\t\tor w in ['music-blog', 'helium-entertainment', ]): \r\n\t\t\t\tif not sum([t['type_image']['img'].find(k)>=0 for k in black_list]) and t['type_image']['img'][-4:].lower() not in ['.gif',]:\r\n\t\t\t\t\tadded_entries.append(t['product'].lower()); \r\n\t\t\t\t\tentries.append(t); \r\n\r\nentries=sorted([k for k in entries if k['date_dt'] and isinstance(k['date_dt'],datetime) ], key=lambda k: k['date_dt'], reverse=True);\r\n#entries=sorted(entries, key=lambda k: k['date_dt'], reverse=True);\r\n\r\n\r\n\r\n# ------------ html page head -----------------\r\nprint(\"content-type: text/html\\n\\n\"); \r\n\r\nprint(\"\"\"\r\n\r\n\r\n\r\n\t\r\n\t\r\n\t\r\n\t\r\n\t\t\r\n\t\"\"\"+title+\"\"\"\r\n\t\r\n\t \r\n\t\r\n\r\n\t\r\n\t\r\n \t\r\n\t\r\n \r\n\r\n\r\n\t\r\n\t
\r\n\r\n\r\n\t\t
\r\n\r\n

\"\"\"+title+\"\"\"\r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n \r\n

\r\n \r\n\r\n\r\n
\r\n
    \r\n\t \"\"\"); \r\n\r\n# ------------ page menu -----------------\r\nprint(\"\"\"\r\n\t\t\t
  • \r\n\t\t\t\t\r\n\t\t\t\tFront Page\r\n\t\t\t
  • \r\n\t\"\"\" % ('selected'*(not workspace)) ); \r\n\r\nfor w,n in workspaces: \r\n\tprint(\"\"\"\r\n\t\t
  • \r\n\t\t\r\n\t\t%(n)s\r\n\t\t
  • \r\n\t\t\"\"\"%{'w':w, 'n': n, 'sel':'selected'*(w==workspace) } ); \r\n\r\nprint(\"\"\"
\r\n
\r\n\r\n\t
\r\n\t\r\n\"\"\");\r\n\r\n# ----------------------pagination area--------------------------------\r\nif topic_id:\r\n\tpass;\r\nelse:\r\n\tif(curPage<=0):\r\n\t\tcurPage=1;\r\n\t\t\r\n\tif maxCount % perPage==0:\r\n\t\ttotalPage = maxCount/perPage;\r\n\telse:\r\n\t\ttotalPage = maxCount/perPage + 1;\r\n\t\t\r\n\tif curPage>totalPage:\r\n\t\tcurPage=totalPage;\r\n\t\t\r\n\ttsp = time.strptime(pageStartDate, '%Y-%m-%d')\r\n\tdate_object=datetime(tsp[0],tsp[1],tsp[2]);\r\n\tprint(\"\"\"\r\n\t
\r\n\t\t
%(pageStartDate)s
\r\n\t\t
\r\n\t\"\"\"%{'pageStartDate':date_object.strftime('%b %d %Y'), });\r\n\tif curPage-1<=0:\r\n\t\tprint(\"\"\"\r\n\t\t\t\t<<Previous\r\n\t\t\t\t\"\"\");\r\n\telse:\r\n\t\tprint(\"\"\"\r\n\t\t\t\t<<Previous\r\n\t\t\"\"\"%{'previousPage':curPage-1, 'nextPageUrl':nextPageUrl,});\r\n\t\r\n\t\r\n\tprint(\"\"\"\r\n\t\t\t\t%(curPage)s\r\n\t\t\"\"\"%{'curPage':curPage,});\r\n\t\r\n\t\r\n\tif curPage>=totalPage:\r\n\t\tprint(\"\"\"\r\n\t\t\t\tNext>>\r\n\t\t\t\t\"\"\");\r\n\telse:\r\n\t\tprint(\"\"\"\r\n\t\t\t\tNext>>\r\n\t\t\"\"\"%{'nextPage':curPage+1, 'nextPageUrl':nextPageUrl,});\r\n\t\t\r\n\tprint(\"\"\"\r\n\t\t\t
\r\n\t
\r\n\t\"\"\");\r\n#----------------end of pagination-------------------------------------------\r\n\t\r\n\t\r\n\r\n# ------------ page content -----------------\r\n\r\nprint(\"\"\"\r\n\t\t
\r\n\t\t\t
\r\n\t\t\t
\r\n\t\"\"\");\r\n\r\nif topic_id: \r\n\t# ------------ show one topics ------------\r\n\tsummarizer=text_summarizer();\r\n\tshown_articles=[];\r\n\tfor i, entry in enumerate(entries): \r\n\t\tnew_articles=[headline['article']['id'] for headline in entry['headlines'][0:2] if headline['article']['id'] not in shown_articles];\r\n\t\tif not new_articles: continue; \r\n\t\tshown_articles+=new_articles;\r\n\t\t#\r\n\t\tprint('
'); \r\n\t\tprint(\"\"\" \");\r\n\t\t#\r\n\t\t# --- show some headline titles ---\r\n\t\thtml=\"\"\"
\r\n\t\t\t%(date)s \r\n\t\t\t\"\"\"%{'date':entry['date_dt'].strftime('%Y-%m-%d') };\r\n\t\tfor headline in entry['headlines'][0:2]: \r\n\t\t\tarticle=headline['article'];\r\n\t\t\tsentences=[];\r\n\t\t\tif headline['r'] and 'isentence0' in headline['r'][0].keys(): \r\n\t\t\t\tfor r in headline['r']: r['isentence']=r['isentence0']; \r\n\t\t\t\tblog_text2=[];\r\n\t\t\t\tpost,s=solr_query_var({'id':article['id']}); \r\n\t\t\t\tif post: \r\n\t\t\t\t\tpost_data1=decode_post_data(post[0]['data_t']); \r\n\t\t\t\t\tblog_text2=[' '+' '.join([p[0] for p in pos])+' ' for pos in post_data1['blog_pos']];\r\n\t\t\t\tsentences=summarizer.get_ranked_article_sentences(blog_text2, headline['r'], 100);\r\n\t\t\t#\r\n\t\t\thtml+=\"\"\"
\r\n\t\t\t\t\t\t\t %(story)s \r\n\t\t\t\t\t
\r\n\t\t\t\t\t\t— %(title)s: \r\n\t\t\t\t\t\t%(source)s \r\n\t\t\t\t\t\t%(date)s\r\n\t\t\t\t\t
\r\n\t\t\t\t\t
\r\n\t\t\t\t\t\"\"\"%{\r\n\t\t\t\t'story': '

'.join(sentences[0:2]),\r\n\t\t\t\t'source': article['source'],\r\n\t\t\t\t'title': escape_html(article['title']), \r\n\t\t\t\t'url': article['url'], \r\n\t\t\t\t'date': article['date'].strftime(\"%Y-%m-%d\")\r\n\t\t\t\t};\r\n\t\thtml+=\"

\";\r\n\t\tprint(html);\r\n\t\t#\r\n\t\tprint(\"
\");\r\n\t#\r\nelse: #no topic_id, list trending topics\r\n\t# ------------ list trending topics ------------\r\n\tfor i, entry in enumerate(entries): \r\n\t\tif i%4==0: \r\n\t\t\tif i==0: \r\n\t\t\t\tprint('
'); \r\n\t\t\telse:\r\n\t\t\t\tprint('
');\r\n\t\tprint(\"\"\"
\r\n\t\t\t\r\n\t\t\t
\r\n\t\t\t\t\"\"\"+escape_html(entry['product'])+\"\"\"\r\n\t\t\t
\r\n\t\t\t
\r\n\t\t\t\"\"\");\r\n\t\tif 'type_image' in entry.keys() and entry['type_image']['img']: \r\n\t\t\tprint(\"\"+escape_html(entry[\"); \r\n\t\telse:\r\n\t\t\twords=entry['summary'].split(); \r\n\t\t\tif len(words)>7: words=words[:7]+['...']; \r\n\t\t\tprint(\" %s \"%' '.join(words)); \r\n\t\tprint(\"\"\"\r\n\t\t\t
\r\n\t\t\t
\r\n\t\t\t\"\"\");\r\n\t\t#product recommendation\r\n\t\tif 'prod_tags' in entry.keys() and entry['prod_tags']:\r\n\t\t\tif i%4==3:\r\n\t\t\t\tprint('
');\r\n\t\t\telse:\r\n\t\t\t\tprint('
');\r\n\t\t\tF_name, F_subcat, F_url, F_img, F_price, F_price0, F_seller=range(7);\r\n\t\t\tprint('
');\r\n\t\t\tfor tag in entry['prod_tags'][:4]:\r\n\t\t\t\tif tag['products']:\r\n\t\t\t\t\tprint('' %{\r\n\t\t\t\t\t\t\t\t'url': tag['products'][0][F_url], \r\n\t\t\t\t\t\t\t\t'name': tag['products'][0][F_name], \r\n\t\t\t\t\t\t\t\t'price': tag['products'][0][F_price], \r\n\t\t\t\t\t\t\t\t'img': tag['products'][0][F_img],\r\n\t\t\t\t\t\t\t} );\r\n\t\t\tprint('

%(price)s
');\r\n\t\t\tprint(' '); \r\n\t\tprint(\"\"\"\r\n\t\t\t \r\n\t\t\t\"\"\");\r\n\t#\r\n\tprint(' ');\r\n\r\nprint(\"\"\"
\r\n\t \r\n\t \r\n\t \r\n\t \r\n\t\"\"\");\r\n\t\r\n\r\n# ------------ page bottom -----------------\r\n\r\nprint(\"\"\"\r\n\t \r\n\t\r\n\t\r\n\t\"\"\"); \r\n\r\n", "sub_path": "cgi-bin/cgi_main.py", "file_name": "cgi_main.py", "file_ext": "py", "file_size_in_byte": 13658, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "cgitb.enable", "line_number": 7, "usage_type": "call"}, {"api_name": "cgi.FieldStorage", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 114, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 158, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 224, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 225, "usage_type": "call"}]} +{"seq_id": "276507477", "text": "import base64\nimport json\nimport logging\nimport re\nimport socket\nimport time\nfrom telnetlib import Telnet\nfrom threading import Thread\nfrom typing import Optional, Union\n\nfrom paho.mqtt.client import Client, MQTTMessage\nfrom . import bluetooth, utils\nfrom .miio_fix import Device\nfrom .unqlite import Unqlite, SQLite\nfrom .utils import GLOBAL_PROP\n\n_LOGGER = logging.getLogger(__name__)\n\nRE_NWK_KEY = re.compile(r'lumi send-nwk-key (0x.+?) {(.+?)}')\n\n\nclass Gateway3(Thread):\n pair_model = None\n pair_payload = None\n\n def __init__(self, host: str, token: str, config: dict, ble: bool = True,\n zha: bool = False):\n super().__init__(daemon=True)\n\n self.host = host\n self.zha = zha\n\n self.miio = Device(host, token)\n\n self.mqtt = Client()\n self.mqtt.on_connect = self.on_connect\n self.mqtt.on_disconnect = self.on_disconnect\n self.mqtt.on_message = self.on_message\n self.mqtt.connect_async(host)\n\n self.ble = GatewayBLE(self) if ble else None\n\n self.debug = config['debug'] if 'debug' in config else ''\n self.default_devices = config['devices']\n\n self.devices = {}\n self.updates = {}\n self.setups = {}\n\n @property\n def device(self):\n return self.devices['lumi.0']\n\n def add_update(self, did: str, handler):\n \"\"\"Add handler to device update event.\"\"\"\n self.updates.setdefault(did, []).append(handler)\n\n def add_setup(self, domain: str, handler):\n \"\"\"Add hass device setup funcion.\"\"\"\n self.setups[domain] = handler\n\n def run(self):\n \"\"\"Main loop\"\"\"\n while True:\n # if not telnet - enable it\n if not self._check_port(23) and not self._enable_telnet():\n time.sleep(30)\n continue\n\n devices = self._get_devices_v3()\n if devices:\n self.setup_devices(devices)\n break\n\n # start bluetooth read loop\n if self.ble:\n self.ble.start()\n\n while True:\n # if not telnet - enable it\n if not self._check_port(23) and not self._enable_telnet():\n time.sleep(30)\n continue\n\n if not self.zha:\n # if not mqtt - enable it\n if not self._mqtt_connect() and not self._enable_mqtt():\n time.sleep(60)\n continue\n\n self.mqtt.loop_forever()\n\n elif not self._check_port(8888) and not self._enable_zha():\n time.sleep(60)\n continue\n\n else:\n # ZHA works fine, check every 60 seconds\n time.sleep(60)\n\n def _check_port(self, port: int):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n return s.connect_ex((self.host, port)) == 0\n finally:\n s.close()\n\n def _mqtt_connect(self) -> bool:\n try:\n self.mqtt.reconnect()\n return True\n except:\n return False\n\n def _miio_connect(self) -> bool:\n try:\n self.miio.send_handshake()\n return True\n except:\n _LOGGER.debug(f\"{self.host} | Can't send handshake\")\n return False\n\n def _get_devices_v1(self) -> Optional[list]:\n \"\"\"Load devices via miio protocol.\"\"\"\n _LOGGER.debug(f\"{self.host} | Read devices\")\n try:\n devices = {}\n\n # endless loop protection\n for _ in range(16):\n # load only 8 device per part\n part = self.miio.send('get_device_list', retry_count=10)\n if len(part) == 0:\n return []\n\n for item in part:\n devices[item['num']] = {\n 'did': item['did'],\n 'mac': f\"0x{item['did'][5:]}\",\n 'model': item['model'],\n }\n\n if part[0]['total'] == len(devices):\n break\n\n devices = list(devices.values())\n for device in devices:\n desc = utils.get_device(device['model'])\n # skip unknown model\n if desc is None:\n continue\n # get xiaomi param names\n params = [p[1] for p in desc['params'] if p[1] is not None]\n # skip if don't have retain params\n if not params:\n continue\n # load param values\n values = self.miio.send('get_device_prop',\n [device['did']] + params)\n # get hass param names\n params = [p[2] for p in desc['params'] if p[1] is not None]\n\n data = dict(zip(params, values))\n # fix some param values\n for k, v in data.items():\n if k in ('temperature', 'humidity'):\n data[k] = v / 100.0\n elif v in ('on', 'open'):\n data[k] = 1\n elif v in ('off', 'close'):\n data[k] = 0\n\n device['init'] = data\n\n device = self.miio.info()\n devices.append({\n 'did': 'lumi.0',\n 'mac': device.mac_address, # wifi mac!!!\n 'model': device.model\n })\n\n return devices\n\n except Exception as e:\n _LOGGER.exception(f\"{self.host} | Get devices: {e}\")\n return None\n\n def _get_devices_v2(self) -> Optional[list]:\n \"\"\"Load device list via Telnet.\n\n Device desc example:\n mac: '0x158d0002c81234'\n shortId: '0x0691'\n manuCode: '0x115f'\n model: 'lumi.sensor_ht'\n did: 'lumi.158d0002c81234'\n devType: 0\n appVer: 2\n hardVer: 0\n devID: 770\n status: 0\n model_ver: 2\n \"\"\"\n _LOGGER.debug(f\"{self.host} | Read devices\")\n try:\n telnet = Telnet(self.host)\n telnet.read_until(b\"login: \")\n telnet.write(b\"admin\\r\\n\")\n telnet.read_until(b'\\r\\n# ') # skip greeting\n\n telnet.write(b\"cat /data/zigbee/coordinator.info\\r\\n\")\n telnet.read_until(b'\\r\\n') # skip command\n raw = telnet.read_until(b'# ')\n device = json.loads(raw[:-2])\n device.update({\n 'did': 'lumi.0',\n 'model': 'lumi.gateway.mgl03',\n 'host': self.host\n })\n\n devices = [device]\n\n telnet.write(b\"cat /data/zigbee/device.info\\r\\n\")\n telnet.read_until(b'\\r\\n') # skip command\n raw = telnet.read_until(b'# ')\n raw = json.loads(raw[:-2])\n devices += raw['devInfo']\n telnet.close()\n\n return devices\n except Exception as e:\n _LOGGER.exception(f\"Can't read devices: {e}\")\n return None\n\n def _get_devices_v3(self):\n \"\"\"Load device list via Telnet.\"\"\"\n _LOGGER.debug(f\"{self.host} | Read devices\")\n try:\n telnet = Telnet(self.host, timeout=5)\n telnet.read_until(b\"login: \")\n telnet.write(b\"admin\\r\\n\")\n telnet.read_until(b'\\r\\n# ') # skip greeting\n\n # read coordinator info\n telnet.write(b\"cat /data/zigbee/coordinator.info\\r\\n\")\n telnet.read_until(b'\\r\\n') # skip command\n raw = telnet.read_until(b'# ')\n\n device = json.loads(raw[:-2])\n devices = [{\n 'did': 'lumi.0',\n 'model': 'lumi.gateway.mgl03',\n 'mac': device['mac'],\n 'type': 'gateway'\n }]\n\n if self.zha:\n return devices\n\n # https://github.com/AlexxIT/XiaomiGateway3/issues/14\n # fw 1.4.6_0012 and below have one zigbee_gw.db file\n # fw 1.4.6_0030 have many json files in this folder\n telnet.write(b\"cat /data/zigbee_gw/* | base64\\r\\n\")\n telnet.read_until(b'\\r\\n') # skip command\n raw = telnet.read_until(b'# ')\n raw = base64.b64decode(raw)\n if raw.startswith(b'unqlite'):\n db = Unqlite(raw)\n data = db.read_all()\n else:\n raw = re.sub(br'}\\s+{', b',', raw)\n data = json.loads(raw)\n\n # data = {} or data = {'dev_list': 'null'}\n dev_list = json.loads(data.get('dev_list', 'null')) or []\n _LOGGER.debug(f\"{self.host} | Load {len(dev_list)} zigbee devices\")\n\n for did in dev_list:\n model = data[did + '.model']\n desc = utils.get_device(model)\n\n # skip unknown model\n if desc is None:\n _LOGGER.debug(f\"{did} has an unsupported modell: {model}\")\n continue\n\n retain = json.loads(data[did + '.prop'])['props']\n _LOGGER.debug(f\"{self.host} | {did} {model} retain: {retain}\")\n\n params = {\n p[2]: retain.get(p[1])\n for p in desc['params']\n if p[1] is not None\n }\n\n device = {\n 'did': did,\n 'mac': '0x' + data[did + '.mac'],\n 'model': data[did + '.model'],\n 'type': 'zigbee',\n 'zb_ver': data[did + '.version'],\n 'init': utils.fix_xiaomi_props(params),\n 'online': retain.get('alive', 1) == 1\n }\n devices.append(device)\n\n return devices\n\n except (ConnectionRefusedError, socket.timeout):\n return None\n\n except Exception as e:\n _LOGGER.debug(f\"Can't read devices: {e}\")\n return None\n\n def _enable_telnet(self):\n _LOGGER.debug(f\"{self.host} | Try enable telnet\")\n try:\n resp = self.miio.send(\"enable_telnet_service\")\n return resp[0] == 'ok'\n except Exception as e:\n _LOGGER.debug(f\"Can't enable telnet: {e}\")\n return False\n\n def _enable_mqtt(self):\n _LOGGER.debug(f\"{self.host} | Try run public MQTT\")\n try:\n telnet = Telnet(self.host)\n telnet.read_until(b\"login: \")\n telnet.write(b\"admin\\r\\n\")\n telnet.read_until(b\"\\r\\n# \") # skip greeting\n\n # enable public mqtt\n telnet.write(b\"killall mosquitto\\r\\n\")\n telnet.read_until(b\"\\r\\n\") # skip command\n time.sleep(.5) # it's important to wait\n telnet.write(b\"mosquitto -d\\r\\n\")\n telnet.read_until(b\"\\r\\n\") # skip command\n time.sleep(.5) # it's important to wait\n\n # fix CPU 90% full time bug\n telnet.write(b\"killall zigbee_gw\\r\\n\")\n telnet.read_until(b\"\\r\\n\") # skip command\n time.sleep(.5) # it's important to wait\n\n telnet.close()\n return True\n except Exception as e:\n _LOGGER.debug(f\"Can't run MQTT: {e}\")\n return False\n\n def _enable_zha(self):\n _LOGGER.debug(f\"{self.host} | Try enable ZHA\")\n try:\n check_socat = \\\n \"(md5sum /data/socat | grep 92b77e1a93c4f4377b4b751a5390d979)\"\n download_socat = \\\n \"(curl -o /data/socat http://pkg.musl.cc/socat/\" \\\n \"mipsel-linux-musln32/bin/socat && chmod +x /data/socat)\"\n run_socat = \"/data/socat tcp-l:8888,reuseaddr,fork /dev/ttyS2\"\n\n telnet = Telnet(self.host, timeout=5)\n telnet.read_until(b\"login: \")\n telnet.write(b\"admin\\r\\n\")\n telnet.read_until(b\"\\r\\n# \") # skip greeting\n\n # download socat and check md5\n telnet.write(f\"{check_socat} || {download_socat}\\r\\n\".encode())\n raw = telnet.read_until(b\"\\r\\n# \")\n if b\"Received\" in raw:\n _LOGGER.debug(f\"{self.host} | Downloading socat\")\n\n telnet.write(f\"{check_socat} && {run_socat} &\\r\\n\".encode())\n telnet.read_until(b\"\\r\\n# \")\n\n telnet.write(\n b\"killall daemon_app.sh; killall Lumi_Z3GatewayHost_MQTT\\r\\n\")\n telnet.read_until(b\"\\r\\n# \")\n\n telnet.close()\n return True\n\n except Exception as e:\n _LOGGER.debug(f\"Can't enable ZHA: {e}\")\n return False\n\n def on_connect(self, client, userdata, flags, rc):\n _LOGGER.debug(f\"{self.host} | MQTT connected\")\n self.mqtt.subscribe('#')\n\n self.process_gw_message({'online': True})\n\n def on_disconnect(self, client, userdata, rc):\n _LOGGER.debug(f\"{self.host} | MQTT disconnected\")\n # force end mqtt.loop_forever()\n self.mqtt.disconnect()\n\n self.process_gw_message({'online': False})\n\n def on_message(self, client: Client, userdata, msg: MQTTMessage):\n if 'mqtt' in self.debug:\n _LOGGER.debug(f\"[MQ] {msg.topic} {msg.payload.decode()}\")\n\n if msg.topic == 'zigbee/send':\n payload = json.loads(msg.payload)\n self.process_message(payload)\n elif msg.topic.endswith('/heartbeat'):\n payload = json.loads(msg.payload)\n self.process_gw_message(payload)\n elif self.pair_model and msg.topic.endswith('/commands'):\n self.process_pair(msg.payload)\n\n def setup_devices(self, devices: list):\n \"\"\"Add devices to hass.\"\"\"\n for device in devices:\n desc = utils.get_device(device['model'])\n if not desc:\n _LOGGER.debug(f\"Unsupported model: {device}\")\n continue\n\n _LOGGER.debug(f\"{self.host} | Setup Zigbee device {device}\")\n\n device.update(desc)\n\n # update params from config\n default_config = self.default_devices.get(device['mac']) or \\\n self.default_devices.get(device['did'])\n if default_config:\n device.update(default_config)\n\n self.devices[device['did']] = device\n\n for param in device['params']:\n domain = param[3]\n if not domain:\n continue\n\n # wait domain init\n while domain not in self.setups:\n time.sleep(1)\n\n attr = param[2]\n self.setups[domain](self, device, attr)\n\n def setup_mesh_devices(self, devices: list):\n for device in devices:\n desc = bluetooth.get_device(device['model'], 'Mesh')\n device.update(desc)\n\n _LOGGER.debug(f\"{self.host} | Setup Mesh device {device}\")\n\n # update params from config\n default_config = self.default_devices.get(device['did'])\n if default_config:\n device.update(default_config)\n\n device['online'] = False\n\n self.devices[device['did']] = device\n\n # wait domain init\n while 'light' not in self.setups:\n time.sleep(1)\n\n self.setups['light'](self, device, 'light')\n\n def process_message(self, data: dict):\n if data['cmd'] == 'heartbeat':\n # don't know if only one item\n assert len(data['params']) == 1, data\n\n data = data['params'][0]\n pkey = 'res_list'\n elif data['cmd'] == 'report':\n pkey = 'params' if 'params' in data else 'mi_spec'\n elif data['cmd'] in ('write_rsp', 'read_rsp'):\n pkey = 'results'\n else:\n _LOGGER.warning(f\"Unsupported cmd: {data}\")\n return\n\n did = data['did']\n\n # skip without callback\n if did not in self.updates:\n return\n\n device = self.devices[did]\n payload = {}\n\n # convert codes to names\n for param in data[pkey]:\n if param.get('error_code', 0) != 0:\n continue\n\n prop = param['res_name'] if 'res_name' in param else \\\n f\"{param['siid']}.{param['piid']}\"\n\n if prop in GLOBAL_PROP:\n prop = GLOBAL_PROP[prop]\n else:\n prop = next((p[2] for p in device['params']\n if p[0] == prop), prop)\n\n if prop in ('temperature', 'humidity', 'pressure'):\n payload[prop] = param['value'] / 100.0\n elif prop == 'battery' and param['value'] > 1000:\n # xiaomi light sensor\n payload[prop] = round((min(param['value'], 3200) - 2500) / 7)\n elif prop == 'alive':\n # {'res_name':'8.0.2102','value':{'status':'online','time':0}}\n device['online'] = (param['value']['status'] == 'online')\n elif prop == 'angle':\n # xiaomi cube 100 points = 360 degrees\n payload[prop] = param['value'] * 4\n elif prop == 'duration':\n # xiaomi cube\n payload[prop] = param['value'] / 1000.0\n elif prop in ('consumption', 'power'):\n payload[prop] = round(param['value'], 2)\n else:\n payload[prop] = param['value']\n\n _LOGGER.debug(f\"{self.host} | {device['did']} {device['model']} <= \"\n f\"{payload}\")\n\n for handler in self.updates[did]:\n handler(payload)\n\n if 'added_device' in payload:\n # {'did': 'lumi.fff', 'mac': 'fff', 'model': 'lumi.sen_ill.mgl01',\n # 'version': '21', 'zb_ver': '3.0'}\n device = payload['added_device']\n device['mac'] = '0x' + device['mac']\n device['type'] = 'zigbee'\n device['init'] = payload\n self.setup_devices([device])\n\n def process_gw_message(self, payload: json):\n _LOGGER.debug(f\"{self.host} | gateway <= {payload}\")\n\n if 'lumi.0' not in self.updates:\n return\n\n if 'networkUp' in payload:\n payload = {\n 'network_pan_id': payload['networkPanId'],\n 'radio_tx_power': payload['radioTxPower'],\n 'radio_channel': payload['radioChannel'],\n }\n elif 'online' in payload:\n self.device['online'] = payload['online']\n\n for handler in self.updates['lumi.0']:\n handler(payload)\n\n def process_pair(self, raw: bytes):\n # get shortID and eui64 of paired device\n if b'lumi send-nwk-key' in raw:\n # create model response\n payload = f\"0x18010105000042{len(self.pair_model):02x}\" \\\n f\"{self.pair_model.encode().hex()}\"\n m = RE_NWK_KEY.search(raw.decode())\n self.pair_payload = json.dumps({\n 'sourceAddress': m[1],\n 'eui64': '0x' + m[2],\n 'profileId': '0x0104',\n 'clusterId': '0x0000',\n 'sourceEndpoint': '0x01',\n 'destinationEndpoint': '0x01',\n 'APSCounter': '0x01',\n 'APSPlayload': payload\n }, separators=(',', ':'))\n\n # send model response \"from device\"\n elif b'zdo active ' in raw:\n mac = self.device['mac'][2:].upper()\n self.mqtt.publish(f\"gw/{mac}/MessageReceived\", self.pair_payload)\n\n def process_ble_event(self, raw: Union[bytes, str]):\n data = json.loads(raw[10:])['params'] \\\n if isinstance(raw, bytes) else json.loads(raw)\n\n _LOGGER.debug(f\"{self.host} | Process BLE {data}\")\n\n did = data['dev']['did']\n if did not in self.devices:\n mac = data['dev']['mac'].replace(':', '').lower() \\\n if 'mac' in data['dev'] else \\\n 'ble_' + did.replace('blt.3.', '')\n self.devices[did] = device = {\n 'did': did, 'mac': mac, 'init': {}, 'type': 'bluetooth'}\n pdid = data['dev'].get('pdid')\n desc = bluetooth.get_device(pdid, 'BLE')\n device.update(desc)\n\n # update params from config\n default_config = self.default_devices.get(did)\n if default_config:\n device.update(default_config)\n\n else:\n device = self.devices[did]\n\n if isinstance(data['evt'], list):\n # check if only one\n assert len(data['evt']) == 1, data\n payload = bluetooth.parse_xiaomi_ble(data['evt'][0])\n elif isinstance(data['evt'], dict):\n payload = bluetooth.parse_xiaomi_ble(data['evt'])\n else:\n payload = None\n\n if payload is None:\n _LOGGER.debug(f\"Unsupported BLE {data}\")\n return\n\n # init entities if needed\n for k in payload.keys():\n if k in device['init']:\n continue\n\n device['init'][k] = payload[k]\n\n domain = bluetooth.get_ble_domain(k)\n if not domain:\n continue\n\n # wait domain init\n while domain not in self.setups:\n time.sleep(1)\n\n self.setups[domain](self, device, k)\n\n if did in self.updates:\n for handler in self.updates[did]:\n handler(payload)\n\n def process_mesh_data(self, raw: Union[bytes, list]):\n data = json.loads(raw[10:])['params'] \\\n if isinstance(raw, bytes) else raw\n\n _LOGGER.debug(f\"{self.host} | Process Mesh {data}\")\n\n data = bluetooth.parse_xiaomi_mesh(data)\n for did, payload in data.items():\n device = self.devices.get(did)\n if not device:\n _LOGGER.warning(\"Unknown mesh device, reboot Hass may helps\")\n return\n\n if did in self.updates:\n for handler in self.updates[did]:\n handler(payload)\n\n def send(self, device: dict, data: dict):\n # convert hass prop to lumi prop\n params = [{\n 'res_name': next(p[0] for p in device['params'] if p[2] == k),\n 'value': v\n } for k, v in data.items()]\n\n payload = {\n 'cmd': 'write',\n 'did': device['did'],\n 'params': params,\n }\n\n _LOGGER.debug(f\"{self.host} | {device['did']} {device['model']} => \"\n f\"{payload}\")\n\n payload = json.dumps(payload, separators=(',', ':')).encode()\n self.mqtt.publish('zigbee/recv', payload)\n\n def send_telnet(self, *args: str):\n try:\n telnet = Telnet(self.host, timeout=5)\n telnet.read_until(b\"login: \")\n telnet.write(b\"admin\\r\\n\")\n telnet.read_until(b\"\\r\\n# \") # skip greeting\n\n for command in args:\n telnet.write(command.encode() + b'\\r\\n')\n telnet.read_until(b\"\\r\\n\") # skip command\n\n telnet.close()\n\n except Exception as e:\n _LOGGER.exception(f\"Telnet command error: {e}\")\n\n def send_mqtt(self, cmd: str):\n if cmd == 'publishstate':\n mac = self.device['mac'][2:].upper()\n self.mqtt.publish(f\"gw/{mac}/publishstate\")\n\n def send_mesh(self, device: dict, data: dict):\n did = device['did']\n payload = bluetooth.pack_xiaomi_mesh(did, data)\n return self.miio.send('set_properties', payload)\n\n def get_device(self, mac: str) -> Optional[dict]:\n for device in self.devices.values():\n if device.get('mac') == mac:\n return device\n return None\n\n\nclass GatewayBLE(Thread):\n devices_loaded = False\n\n def __init__(self, gw: Gateway3):\n super().__init__(daemon=True)\n self.gw = gw\n\n def run(self):\n _LOGGER.debug(f\"{self.gw.host} | Start BLE \")\n while True:\n try:\n telnet = Telnet(self.gw.host, timeout=5)\n telnet.read_until(b\"login: \")\n telnet.write(b\"admin\\r\\n\")\n telnet.read_until(b\"\\r\\n# \") # skip greeting\n\n if not self.devices_loaded:\n self._get_devices(telnet)\n self.devices_loaded = True\n\n telnet.write(b\"killall silabs_ncp_bt; \"\n b\"silabs_ncp_bt /dev/ttyS1 1\\r\\n\")\n telnet.read_until(b\"\\r\\n\") # skip command\n\n while True:\n raw = telnet.read_until(b\"\\r\\n\")\n\n if 'bluetooth' in self.gw.debug:\n _LOGGER.debug(f\"[BT] {raw}\")\n\n if b'_async.ble_event' in raw:\n self.gw.process_ble_event(raw)\n elif b'properties_changed' in raw:\n self.gw.process_mesh_data(raw)\n\n except (ConnectionRefusedError, ConnectionResetError, EOFError,\n socket.timeout):\n pass\n except Exception as e:\n _LOGGER.exception(f\"Bluetooth loop error: {e}\")\n\n time.sleep(30)\n\n def _get_devices(self, telnet: Telnet):\n # read bluetooth db\n telnet.write(b\"cat /data/miio/mible_local.db | base64\\r\\n\")\n telnet.read_until(b'\\r\\n') # skip command\n raw = telnet.read_until(b'# ')\n raw = base64.b64decode(raw)\n\n db = SQLite(raw)\n tables = db.read_page(0)\n device_page = next(table[3] - 1 for table in tables\n if table[1] == 'mesh_device')\n rows = db.read_page(device_page)\n if not rows:\n return\n\n devices = [{\n 'did': row[0],\n 'mac': row[1].replace(':', ''),\n 'model': row[2],\n 'type': 'bluetooth'\n } for row in rows]\n self.gw.setup_mesh_devices(devices)\n\n\ndef is_gw3(host: str, token: str) -> Optional[str]:\n try:\n device = Device(host, token)\n info = device.info()\n if info.model != 'lumi.gateway.mgl03':\n raise Exception(f\"Wrong device model: {info.model}\")\n except Exception as e:\n return str(e)\n\n return None\n", "sub_path": "custom_components/xiaomi_gateway3/gateway3.py", "file_name": "gateway3.py", "file_ext": "py", "file_size_in_byte": 26209, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 22, "usage_type": "name"}, {"api_name": "miio_fix.Device", "line_number": 33, "usage_type": "call"}, {"api_name": "paho.mqtt.client.Client", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 67, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 88, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 99, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 102, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 102, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 102, "usage_type": "attribute"}, {"api_name": "utils.get_device", "line_number": 148, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 123, "usage_type": "name"}, {"api_name": "telnetlib.Telnet", "line_number": 206, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 214, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 226, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 188, "usage_type": "name"}, {"api_name": "telnetlib.Telnet", "line_number": 239, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 249, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 266, "usage_type": "call"}, {"api_name": "unqlite.Unqlite", "line_number": 268, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 271, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 272, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 275, "usage_type": "call"}, {"api_name": "utils.get_device", "line_number": 280, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 287, "usage_type": "call"}, {"api_name": "utils.fix_xiaomi_props", "line_number": 302, "usage_type": "call"}, {"api_name": "socket.timeout", "line_number": 309, "usage_type": "attribute"}, {"api_name": "telnetlib.Telnet", "line_number": 328, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 336, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 339, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 344, "usage_type": "call"}, {"api_name": "telnetlib.Telnet", "line_number": 362, "usage_type": "call"}, {"api_name": "paho.mqtt.client.Client", "line_number": 400, "usage_type": "name"}, {"api_name": "paho.mqtt.client.MQTTMessage", "line_number": 400, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 405, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 408, "usage_type": "call"}, {"api_name": "utils.get_device", "line_number": 416, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 440, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 463, "usage_type": "call"}, {"api_name": "utils.GLOBAL_PROP", "line_number": 499, "usage_type": "name"}, {"api_name": "utils.GLOBAL_PROP", "line_number": 500, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 564, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 580, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 581, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 582, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 631, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 639, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 640, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 672, "usage_type": "call"}, {"api_name": "telnetlib.Telnet", "line_number": 677, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 701, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 708, "usage_type": "name"}, {"api_name": "telnetlib.Telnet", "line_number": 719, "usage_type": "call"}, {"api_name": "socket.timeout", "line_number": 744, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 749, "usage_type": "call"}, {"api_name": "telnetlib.Telnet", "line_number": 751, "usage_type": "name"}, {"api_name": "base64.b64decode", "line_number": 756, "usage_type": "call"}, {"api_name": "unqlite.SQLite", "line_number": 758, "usage_type": "call"}, {"api_name": "miio_fix.Device", "line_number": 777, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 775, "usage_type": "name"}]} +{"seq_id": "527418575", "text": "import numpy as np\nimport pandas as pd\nimport time\nimport os\nimport torch\nfrom torch import autograd\nimport torchvision\nfrom torchvision import transforms\nimport torchvision.datasets as dsets\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nfrom torchvision.utils import save_image\nfrom dcgan_network import Generator, Discriminator\nimport pdb\n\n\n# parameters\nimage_size = 64\nbatch_size = 128\ndata_dir = \"generative-dog-images/data/\"\noutput_dir = \"output/\"\n\nG_input_dim = 100\nG_output_dim = 3\nG_num_filters = [1024, 512, 256, 128]\nD_input_dim = 3\nD_output_dim = 1\nD_num_filters = [128, 256, 512, 1024]\n\nlearning_rate = 1e-4\nG_beta_1, G_beta_2 = 0.5, 0.999\nD_beta_1, D_beta_2 = 0.5, 0.999\nlambda_gp = 10\nuse_Adam = True\ncuda_ = True if torch.cuda.is_available() else False\nclip_value = 0.01\nn_critic = 5\nnum_epochs = 800\nsample_interval = 100\n\nTensor = torch.cuda.FloatTensor if cuda_ else torch.FloatTensor\n\n\n# write results\ndef write_out_result(generator, noise, epoch_nums, iteration_nums,\n save=False, save_dir='dogs_results/', show=False, fig_size=(5, 5)):\n generator.eval()\n noise = Variable(noise.cuda(), volatile=True)\n # pdb.set_trace()\n gen_img = denormalization(generator(noise))\n generator.train()\n\n n_rows = np.sqrt(noise.size()[0]).astype(np.int32)\n n_cols = (noise.size()[0]//n_rows).astype(np.int32)\n fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=fig_size)\n for ax, img in zip(axes.flatten(), gen_img):\n ax.axis('off')\n ax.set_adjustable('box-forced')\n # intensity rescale\n img = (((img - img.min()) * 255) / (img.max() - img.min())).cpu().data.numpy().transpose(1, 2, 0).astype(np.uint8)\n ax.imshow(img, cmap=None, aspect='equal')\n plt.subplots_adjust(wspace=0.0, hspace=0.0)\n title = 'Epoch {0}'.format(epoch_nums + 1)\n fig.text(0.5, 0.04, title, ha='center')\n\n # save figure\n if save:\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n save_filename = save_dir + 'dogs_wgangp_epoch_{:d}_iteration_{:d}'.format((epoch_nums+1),\n (iteration_nums+1)) + '.png'\n plt.savefig(save_filename)\n\n if show:\n plt.show()\n else:\n plt.close()\n\n\ndef denormalization(in_img):\n out_img = (in_img+1.0)/2.0\n return out_img.clamp(min=0.0, max=1.0)\n\n\n# Calculates the gradient penalty loss for wgan-gp\ndef compute_gradient_penalty(D, real_samples, fake_samples):\n # random weights term for interpolation between real and fake samples\n alpha = Tensor(np.random.random((real_samples.size(0), 1, 1, 1)))\n # get random interpolation between real and fake samples\n interpolations = (alpha*real_samples + (1-alpha)*fake_samples)\n interpolations.requires_grad = True\n # pdb.set_trace()\n D_interpolations = D(interpolations)\n D_interpolations = D_interpolations.view(-1, 1)\n fake = Variable(Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)\n if cuda_:\n fake.cuda()\n gradients = autograd.grad(outputs=D_interpolations, inputs=interpolations, grad_outputs=fake,\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n else:\n gradients = autograd.grad(outputs=D_interpolations, inputs=interpolations, grad_outputs=fake,\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n gradient_penalty = ((gradients.view(gradients.size()[0], -1).norm(2, 1) - 1) ** 2).mean()\n return gradient_penalty\n\n\n# dog data set\ntransform = transforms.Compose([transforms.Resize(64),\n transforms.CenterCrop(64),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\n\ndogs_data = dsets.ImageFolder(root=data_dir,\n transform=transform)\ndata_loader = torch.utils.data.DataLoader(dataset=dogs_data,\n batch_size=batch_size,\n shuffle=True)\n# pdb.set_trace()\n# Models\nG = Generator(G_input_dim, G_num_filters, G_output_dim)\nG.weights_init(mean=0.0, std=0.02)\nD = Discriminator(D_input_dim, D_num_filters, D_output_dim)\nD.weights_init(mean=0.0, std=0.02)\nif cuda_:\n G.cuda()\n D.cuda()\n\n# optimizers\nif use_Adam:\n G_optimizer = torch.optim.Adam(G.parameters(), lr=learning_rate, betas=(G_beta_1, G_beta_2))\n D_optimizer = torch.optim.Adam(D.parameters(), lr=learning_rate, betas=(D_beta_1, D_beta_2))\nelse:\n G_optimizer = torch.optim.RMSprop(G.parameters(), lr=learning_rate)\n D_optimizer = torch.optim.RMSprop(D.parameters(), lr=learning_rate)\n\n# training process\nnum_rows, num_cols = 10, 10\nnum_test_samples = num_cols*num_rows\nfixed_noise = torch.randn(num_test_samples, G_input_dim).view(-1, G_input_dim, 1, 1)\n\nfor epoch in range(num_epochs):\n G_losses, D_losses = [], []\n G.train()\n n_iter = 0\n for batch_ndx, (real_images, _) in enumerate(data_loader):\n print(batch_ndx)\n # configure input\n mini_batch = real_images.size()[0]\n\n # sample noise as the generator input\n z_ = torch.randn(mini_batch, G_input_dim).view(-1, G_input_dim, 1, 1)\n if cuda_:\n real_images = real_images.cuda()\n z_ = z_.cuda()\n z_ = Variable(z_)\n D_optimizer.zero_grad()\n\n D_real = D(real_images)\n D_real_loss = -torch.mean(D_real)\n\n fake_images = G(z_).squeeze().detach()\n D_fake = D(fake_images)\n D_fake_loss = torch.mean(D_fake)\n\n # gradient penalty\n gradient_penalty = compute_gradient_penalty(D=D, real_samples=real_images, fake_samples=fake_images)\n D_loss = D_real_loss + D_fake_loss + lambda_gp*gradient_penalty\n D_loss.backward()\n D_optimizer.step()\n\n # train the generator every n_critic iterations\n if (batch_ndx+1) % n_critic == 0:\n G_optimizer.zero_grad()\n gen_imgs = G(z_)\n G_loss = -torch.mean(D(gen_imgs))\n G_loss.backward()\n G_optimizer.step()\n G_losses.append(G_loss.item())\n D_losses.append(D_loss.item())\n\n if (batch_ndx + 1) % sample_interval == 0:\n print(\"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % (epoch + 1, num_epochs, batch_ndx + 1 % len(data_loader), len(data_loader), D_loss.item(),\n G_loss.item()))\n if not os.path.exists(\"images/\"):\n os.makedirs(\"images/\")\n save_image(gen_imgs.data[:sample_interval], \"images/%d_%d.png\" % (epoch + 1, n_iter),\n nrow=np.int8(np.sqrt(sample_interval)), normalize=True)\n # pdb.set_trace()\n write_out_result(generator=G, noise=fixed_noise, epoch_nums=epoch, iteration_nums=n_iter,\n save=True, save_dir=output_dir, show=False, fig_size=(num_rows, num_cols))\n n_iter += 1\n\n\nif not os.path.exists('output_images/'):\n os.mkdir('output_images/')\nimage_batch_size = 200\nnum_images = 10000\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\npdb.set_trace()\nfor i_batch in range(0, num_images, image_batch_size):\n gen_z = torch.randn(image_batch_size, G_input_dim, device=device).view(-1, G_input_dim, 1, 1)\n if cuda_:\n gen_z.cuda()\n gen_z = Variable(gen_z)\n gen_images = G(gen_z)\n images = gen_images.to(\"cpu\").clone().detach()\n images = images.numpy()\n for i_image in range(gen_images.size(0)):\n save_image(gen_images[i_image, :, :, :], os.path.join('output_images/', f'image_{i_batch+i_image:05d}.png'))\n\n\nimport shutil\nshutil.make_archive('images', 'zip', 'output_images/')\n\n\n", "sub_path": "Generative-Dog-Images/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7846, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torch.cuda.is_available", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 60, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 98, "usage_type": "name"}, {"api_name": "torch.autograd.grad", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 101, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 108, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 108, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 108, "usage_type": "call"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 109, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 109, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 110, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 110, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 111, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 111, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 113, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 115, "usage_type": "attribute"}, {"api_name": "dcgan_network.Generator", "line_number": 120, "usage_type": "call"}, {"api_name": "dcgan_network.Discriminator", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 131, "usage_type": "attribute"}, {"api_name": "torch.optim.RMSprop", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 133, "usage_type": "attribute"}, {"api_name": "torch.optim.RMSprop", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 134, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 186, "usage_type": "call"}, {"api_name": "torchvision.utils.save_image", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 199, "usage_type": "attribute"}, {"api_name": "pdb.set_trace", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 205, "usage_type": "call"}, {"api_name": "torchvision.utils.save_image", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "shutil.make_archive", "line_number": 214, "usage_type": "call"}]} +{"seq_id": "501353885", "text": "def main():\r\n from bioformers.utilize.Bert import BertSeqClassification\r\n from torch.utils.data import DataLoader\r\n from transformers import DataCollatorWithPadding\r\n from pytorch_lightning import Trainer\r\n from pytorch_lightning.loggers import WandbLogger\r\n import os\r\n import warnings\r\n from pytorch_lightning.callbacks import ModelCheckpoint\r\n\r\n from transformers import BertModel, BertTokenizer\r\n import torch\r\n\r\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n os.environ[\"MKL_THREADING_LAYER\"] = \"GNU\"\r\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"true\"\r\n\r\n #######################################\r\n num_cpu = 16\r\n lr = '6.918e-12'\r\n wandb_name = f\"puffinCaller_classify_domain_1893_with8Functional-{lr}-mxepch24\"\r\n num_labels = 1893\r\n max_epch = 24\r\n gpus = '0, 1, 2, 3'\r\n\r\n data_folder = \"tensor_datasets\"\r\n strat_train_name = \"puffinCaller_with8Functional_balanced_strat_train.pt\"\r\n strat_val_name = \"puffinCaller_with8Functional_balanced_strat_val.pt\"\r\n\r\n model_folder = \"puffinCaller/ckpt\"\r\n save_checkpoint_name = \"puffinCaller_with8Functional_balanced_labels_maxepch24.ckpt\"\r\n\r\n ###-dont need to touch-###\r\n save_checkpoint_path = f\"/mnt/storage/grid/home/eric/hmm2bert/models/{model_folder}/{save_checkpoint_name}\"\r\n strat_train_path = f\"/mnt/storage/grid/home/eric/hmm2bert/data_prep/{data_folder}/{strat_train_name}\"\r\n strat_val_path = f\"/mnt/storage/grid/home/eric/hmm2bert/data_prep/{data_folder}/{strat_val_name}\"\r\n ###\r\n\r\n #######################################\r\n\r\n #load tokenizer and wandb logger\r\n\r\n wandb_logger = WandbLogger(name=wandb_name, project=\"hmm_reBERT\")\r\n tokenizer = BertTokenizer.from_pretrained(\"Rostlab/prot_bert\", do_lower_case=False)\r\n\r\n #load train and test tensors and instantiate pytorch lightning wrapper for the huggingface model with the base pretrained protbert model\r\n\r\n encoded_train = torch.load(strat_train_path)\r\n encoded_test = torch.load(strat_val_path)\r\n bsc = BertSeqClassification(pretrained_dir=\"Rostlab/prot_bert\", use_adafactor=True, num_labels=num_labels)\r\n\r\n #setup checkpoint callback\r\n\r\n checkpoint_callback = ModelCheckpoint(\r\n monitor='val_loss_epoch',\r\n dirpath=f'/mnt/storage/grid/home/eric/hmm2bert/models/{model_folder}',\r\n filename='puffinCaller_with8Functional_balanced_best_loss',\r\n save_top_k=3,\r\n mode='min'\r\n )\r\n\r\n #setup data collator, trainer, and dataloader for train and val dataset\r\n\r\n data_collator = DataCollatorWithPadding(tokenizer=tokenizer)\r\n trainer = Trainer(\r\n max_epochs=max_epch,\r\n gpus=gpus,\r\n auto_lr_find=False,\r\n logger=wandb_logger,\r\n accelerator=\"ddp\",\r\n callbacks=[checkpoint_callback]\r\n )\r\n warnings.filterwarnings(\"ignore\")\r\n\r\n train_dl = DataLoader(encoded_train, batch_size=4, num_workers=num_cpu, collate_fn=data_collator, shuffle=True)\r\n eval_dl = DataLoader(encoded_test, batch_size=4, num_workers=num_cpu, collate_fn=data_collator, shuffle=False)\r\n\r\n #train and save classifier as checkpoint\r\n\r\n trainer.fit(bsc, train_dataloader=train_dl, val_dataloaders=eval_dl)\r\n #trainer.save_checkpoint(save_checkpoint_path)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()", "sub_path": "scripts/train_classify_domain.py", "file_name": "train_classify_domain.py", "file_ext": "py", "file_size_in_byte": 3293, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.loggers.WandbLogger", "line_number": 43, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer.from_pretrained", "line_number": 44, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 49, "usage_type": "call"}, {"api_name": "bioformers.utilize.Bert.BertSeqClassification", "line_number": 50, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks.ModelCheckpoint", "line_number": 54, "usage_type": "call"}, {"api_name": "transformers.DataCollatorWithPadding", "line_number": 64, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 65, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "455128340", "text": "from functools import partial\nimport numpy as np\nimport collections\nfrom scipy.stats import binom_test\nimport copy\nimport math\nimport os\n\nimport ray\nfrom ray import tune\nfrom ray.tune.logger import pretty_print\nfrom ray.rllib.agents.trainer import with_common_config\nfrom ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches\nfrom ray.rllib.execution.train_ops import TrainOneStep\nfrom ray.rllib.execution.metric_ops import StandardMetricsReporting\nfrom ray.rllib.agents.trainer_template import build_trainer\nfrom ray.rllib.optimizers import SyncSamplesOptimizer\nfrom ray.rllib.rollout import DefaultMapping\nfrom ray.rllib.agents.callbacks import DefaultCallbacks\nfrom ray.rllib.evaluation.worker_set import WorkerSet\nfrom ray.rllib.evaluation.metrics import collect_episodes\nfrom ray.rllib.env import MultiAgentEnv\nfrom ray.rllib.utils.space_utils import flatten_to_single_ndarray\nfrom ray.tune.registry import get_trainable_cls\n\nfrom RLC.capture_chess_rllib.policies import PolicyGradient, PolicyRandom\nfrom RLC.capture_chess_rllib.environment import CaptureChessEnv\nfrom RLC.utils import dotdict\n\n# Define a trainer using our own defined policy.\n# The trainer will run multiple workers and update the policy given by the ruling underneath.\n# (this is the standard PG as implemented in RLLib)\n# Evaluation of a policy can be done with the function rollout in rollout.py.\n# https://github.com/ray-project/ray/blob/master/rllib/examples/rollout_worker_custom_workflow.py\n\n\ndef execution_plan(workers, config):\n # enable with \"use_exec_api\": True.\n # Collects experiences in parallel from multiple RolloutWorker actors.\n rollouts = ParallelRollouts(workers, mode=\"bulk_sync\")\n\n # Combine experiences batches until we hit `train_batch_size` in size.\n # Then, train the policy on those experiences and update the workers.\n train_op = rollouts \\\n .combine(ConcatBatches(\n min_batch_size=config[\"train_batch_size\"])) \\\n .for_each(TrainOneStep(workers))\n\n # Add on the standard episode reward, etc. metrics reporting. This returns\n # a LocalIterator[metrics_dict] representing metrics for each train step.\n return StandardMetricsReporting(train_op, workers, config)\n\n\nclass MyCallbacks(DefaultCallbacks):\n def on_episode_end(self, worker, base_env,\n policies, episode,\n **kwargs):\n episode.custom_metrics[\"winner\"] = base_env.get_unwrapped()[\n 0].determine_winner()\n\n\n# Because we need to set the setting to True on default, we will override the parameters.\nDEFAULT_CONFIG = with_common_config({\n # No remote workers by default.\n \"num_workers\": 0,\n # Learning rate.\n \"lr\": 0.0004,\n # Use the execution plan API instead of policy optimizers.\n \"use_exec_api\": True,\n \"callbacks\": MyCallbacks,\n})\n\n# Define the trainer.\n# From the _setup() function in trainer.py, we can see how the env is setup.\n# The main function is _train() in trainer_template.py.\n# Here we can see how the execution_plan or other training is called.\nPGTrainer = build_trainer(\n name=\"PolicyGradientTrainer\",\n default_config=DEFAULT_CONFIG,\n default_policy=PolicyGradient,\n execution_plan=execution_plan,\n)\n\n\nclass InfoNumberRounds():\n def __init__(self, min_, max_, step):\n self.min = min_\n self.max = max_\n self.step = step\n\n\ndef self_play_workflow(config):\n \"\"\"\n Expects in config:\n checkpoint\n checkpoint to load from (None if new)\n trainer\n trainer to use\n model\n model to use in learning\n percentage_equal: float\n The maximal allowed percentage that equal opponents get game results. (see binomial test)\n lr_schedule: List of lr\n Learning rates to use. Will use first to last and update each time the model gets worse.\n training_rounds\n Rounds of training\n evaluation_rounds\n Rounds of evaluation\n\n 1. Generate a large batch of self-play games.\n 2. Train.\n 3. Test the updated bot against the previous version.\n 4. If the bot is measurably stronger, switch to this new version.\n 5. If the bot is about the same strength, generate more games and train again.\n 6. If the bot gets significantly weaker, adjust the optimizer settings and retrain.\n \"\"\"\n ##########################################\n # Set config of trainer and evaluators\n ##########################################\n check_dir = 'logs'\n log_file = 'logs/logs.txt'\n if os.path.exists(log_file):\n os.remove(log_file)\n\n if config.get(\"evaluation_num_episodes\", None) is None:\n config[\"evaluation_num_episodes\"] = 1\n trainer_fn = get_trainable_cls(config[\"trainer\"])\n lr_idx = 0\n\n def select_policy_train(agent_id):\n if agent_id == \"player1\":\n return np.random.choice([\"learning_white\", \"previous_white\", \"random\"], 1,\n p=[.6, .3, .1])[0]\n else:\n return np.random.choice([\"learning_black\", \"previous_black\", \"random\"], 1,\n p=[.6, .3, .1])[0]\n\n def select_policy_eval(learning_player, agent_id):\n if learning_player == \"player1\":\n if agent_id == \"player1\":\n return \"learning_white\"\n else:\n return \"previous_black\"\n else:\n if agent_id == \"player2\":\n return \"learning_black\"\n else:\n return \"previous_white\"\n\n trainer_config = copy.deepcopy(config)\n # remove self-play parameters\n trainer_config.pop(\"trainer\")\n trainer_config.pop(\"percentage_equal\")\n trainer_config.pop(\"model\")\n trainer_config.pop(\"training_rounds\")\n trainer_config.pop(\"evaluation_rounds\")\n trainer_config.pop(\"checkpoint\", None)\n trainer_config.pop(\"lr_schedule\", None)\n trainer_config.pop(\"evaluation_interval\", None)\n\n trainer_config[\"lr\"] = config[\"lr_schedule\"][lr_idx]\n trainer_config[\"multiagent\"] = {\n \"policies_to_train\": [\"learning_white\", \"learning_black\"],\n \"policies\": {\n \"random\": (PolicyRandom, config[\"env\"].observation_space, config[\"env\"].action_space,\n {}),\n \"learning_white\": (None, config[\"env\"].observation_space, config[\"env\"].action_space,\n {\n \"model\": config[\"model\"]\n }),\n \"learning_black\": (None, config[\"env\"].observation_space, config[\"env\"].action_space,\n {\n \"model\": config[\"model\"]\n }),\n \"previous_white\": (None, config[\"env\"].observation_space, config[\"env\"].action_space,\n {\n \"model\": config[\"model\"]\n }),\n \"previous_black\": (None, config[\"env\"].observation_space, config[\"env\"].action_space,\n {\n \"model\": config[\"model\"]\n }),\n },\n \"policy_mapping_fn\": select_policy_train,\n }\n trainer_config[\"train_batch_size\"] = 2 * config[\"train_batch_size\"]\n\n eval_config_player1 = copy.deepcopy(trainer_config)\n eval_config_player1[\"multiagent\"][\"policy_mapping_fn\"] = partial(\n select_policy_eval, \"player1\")\n eval_config_player1[\"multiagent\"][\"policies_to_train\"] = []\n\n eval_config_player2 = copy.deepcopy(trainer_config)\n eval_config_player2[\"multiagent\"][\"policy_mapping_fn\"] = partial(\n select_policy_eval, \"player2\")\n eval_config_player2[\"multiagent\"][\"policies_to_train\"] = []\n\n ##########################################\n # Run train / evaluation rounds\n ##########################################\n\n def update_for_next_loop(total_rounds, rounds, reset=False):\n done = False\n if reset:\n next_num_rounds = rounds.min\n else:\n if (total_rounds >= rounds.max):\n done = True\n next_num_rounds = rounds.step\n\n return done, next_num_rounds\n\n ray.init()\n\n trainer = trainer_fn(\n env=trainer_config[\"env\"], config=trainer_config)\n evaluator_player1 = trainer_fn(\n env=eval_config_player1[\"env\"], config=eval_config_player1)\n evaluator_player2 = trainer_fn(\n env=eval_config_player1[\"env\"], config=eval_config_player2)\n\n total_rounds_training = 0\n done, training_rounds = update_for_next_loop(\n total_rounds_training, config[\"training_rounds\"], True)\n prev_it_state = config.get(\"checkpoint\", None)\n prev_state = prev_it_state\n while not done:\n ##########################################\n # Train\n ##########################################\n try:\n if prev_it_state is not None:\n trainer.restore(prev_it_state)\n for _ in range(training_rounds):\n trainer.train()\n state = trainer.save(check_dir)\n # trainer.stop()\n\n total_rounds_training += training_rounds\n except Exception:\n trainer.stop()\n with open(log_file, 'a') as f:\n f.write(\"Model failed, updating optimizer\\n\")\n lr_idx += 1\n if lr_idx < len(config[\"lr_schedule\"]):\n trainer_config[\"lr\"] = config[\"lr_schedule\"][lr_idx]\n trainer = trainer_fn(\n env=trainer_config[\"env\"], config=trainer_config)\n total_rounds_training = 0\n done, training_rounds = update_for_next_loop(\n total_rounds_training, config[\"training_rounds\"], True)\n prev_it_state = prev_state\n else:\n done = True\n continue # try again.\n\n ##########################################\n # Evaluate\n ##########################################\n try:\n total_eval_rounds = 0\n comparison_wrt_equal = 1\n eval_results1 = []\n eval_results2 = []\n # maximal evaluation rounds determined by training, does not make sense to evaluate more than training rounds.\n eval_info = InfoNumberRounds(config[\"evaluation_rounds\"].min, min(\n config[\"evaluation_rounds\"].max, total_rounds_training), config[\"evaluation_rounds\"].step)\n done_eval, eval_rounds = update_for_next_loop(\n total_eval_rounds, eval_info, True)\n while not done_eval:\n num_episodes = eval_rounds * config[\"evaluation_num_episodes\"]\n\n evaluator_player1.restore(state)\n eval_results1.extend(own_evaluation(\n evaluator_player1, eval_rounds))\n num_pos = sum(x == 1 for x in eval_results1)\n num_neg = sum(x == -1 for x in eval_results1)\n comparison_wrt_equal1 = binom_test(\n num_pos, num_pos + num_neg, 0.5)\n with open(log_file, 'a') as f:\n f.write(\n f'results1: trained agent wins: {num_pos} previous agent wins: {num_neg} remises: {sum(x == 0 for x in eval_results1)} \\n')\n f.write(\n f'chance result for equal opponents: {comparison_wrt_equal1} \\n')\n\n evaluator_player2.restore(state)\n eval_results2.extend(own_evaluation(\n evaluator_player2, eval_rounds))\n num_pos = sum(x == 1 for x in eval_results2)\n num_neg = sum(x == -1 for x in eval_results2)\n comparison_wrt_equal2 = binom_test(\n num_neg, num_pos + num_neg, 0.5)\n with open(log_file, 'a') as f:\n f.write(\n f'results2: trained agent wins: {num_neg} previous agent wins: {num_pos} remises: {sum(x == 0 for x in eval_results2)} \\n')\n f.write(\n f'chance result for equal opponents: {comparison_wrt_equal2} \\n')\n\n total_eval_rounds += eval_rounds\n\n done_eval, eval_rounds = update_for_next_loop(\n total_eval_rounds, eval_info)\n if config[\"percentage_equal\"] > comparison_wrt_equal1 or config[\"percentage_equal\"] > comparison_wrt_equal2:\n # one of players improved\n done_eval = True\n except Exception:\n with open(log_file, 'a') as f:\n f.write(\"Model failed, need to update optimizer\\n\")\n # trigger update optimizer\n comparison_wrt_equal1 = 0\n comparison_wrt_equal2 = 0\n eval_results1 = [-1]\n eval_results2 = [1]\n\n ##########################################\n # Update policy\n ##########################################\n\n if config[\"percentage_equal\"] > comparison_wrt_equal1 or config[\"percentage_equal\"] > comparison_wrt_equal2:\n # results differ enough\n if sum(x == 1 for x in eval_results1) > sum(x == -1 for x in eval_results1) and sum(x == -1 for x in eval_results2) > sum(x == 1 for x in eval_results2):\n with open(log_file, 'a') as f:\n f.write(\"Model improved\\n\")\n total_rounds_training = 0\n done, training_rounds = update_for_next_loop(\n total_rounds_training, config[\"training_rounds\"], True)\n # reupdate previous\n key_previous_val_learning_white = {}\n for (k, v), (k2, v2) in zip(trainer.get_policy(\"previous_white\").get_weights().items(),\n trainer.get_policy(\"learning_white\").get_weights().items()):\n key_previous_val_learning_white[k] = v2\n key_previous_val_learning_black = {}\n for (k, v), (k2, v2) in zip(trainer.get_policy(\"previous_black\").get_weights().items(),\n trainer.get_policy(\"learning_black\").get_weights().items()):\n key_previous_val_learning_black[k] = v2\n # set weights\n trainer.set_weights({\"previous_white\": key_previous_val_learning_white,\n \"previous_black\": key_previous_val_learning_black,\n # no change\n \"learning_white\": trainer.get_policy(\"learning_white\").get_weights(),\n \"learning_black\": trainer.get_policy(\"learning_black\").get_weights(),\n })\n if prev_state is not None:\n trainer.delete_checkpoint(prev_state)\n trainer.delete_checkpoint(state)\n\n prev_it_state = trainer.save(check_dir)\n prev_state = prev_it_state\n elif sum(x == 1 for x in eval_results1) < sum(x == -1 for x in eval_results1) and sum(x == -1 for x in eval_results2) < sum(x == 1 for x in eval_results2):\n with open(log_file, 'a') as f:\n f.write(\"Model got worse, updating optimizer\\n\")\n trainer.stop()\n lr_idx += 1\n if lr_idx < len(config[\"lr_schedule\"]):\n trainer_config[\"lr\"] = config[\"lr_schedule\"][lr_idx]\n trainer = trainer_fn(\n env=trainer_config[\"env\"], config=trainer_config)\n total_rounds_training = 0\n done, training_rounds = update_for_next_loop(\n total_rounds_training, config[\"training_rounds\"], True)\n prev_it_state = prev_state\n else:\n done = True\n else:\n with open(log_file, 'a') as f:\n f.write(\n \"One player improved one got worse, trying more learning iterations.\\n\")\n done, training_rounds = update_for_next_loop(\n total_rounds_training, config[\"training_rounds\"])\n prev_it_state = state\n else:\n with open(log_file, 'a') as f:\n f.write(\"Unable to evaluate, trying more learning iterations.\\n\")\n done, training_rounds = update_for_next_loop(\n total_rounds_training, config[\"training_rounds\"])\n prev_it_state = state\n\n trainer.restore(prev_it_state)\n trainer.save()\n print(\"Checkpoint and trainer saved at: \", trainer.logdir)\n with open(log_file, 'a') as f:\n f.write(f'Checkpoint and trainer saved at: {trainer.logdir} \\n')\n\n\ndef own_rollout(agent,\n num_episodes):\n results = []\n if hasattr(agent, \"workers\") and isinstance(agent.workers, WorkerSet):\n env = agent.workers.local_worker().env\n multiagent = isinstance(env, MultiAgentEnv)\n if agent.workers.local_worker().multiagent:\n policy_agent_mapping = agent.config[\"multiagent\"][\n \"policy_mapping_fn\"]\n\n policy_map = agent.workers.local_worker().policy_map\n state_init = {p: m.get_initial_state() for p, m in policy_map.items()}\n else:\n raise NotImplementedError(\"Multi-Agent only\")\n\n action_init = {\n p: flatten_to_single_ndarray(m.action_space.sample())\n for p, m in policy_map.items()\n }\n\n episodes = 0\n while episodes < num_episodes:\n mapping_cache = {} # in case policy_agent_mapping is stochastic\n obs = env.reset()\n prev_actions = DefaultMapping(\n lambda agent_id: action_init[mapping_cache[agent_id]])\n prev_rewards = collections.defaultdict(lambda: 0.)\n done = False\n while not done and (episodes < num_episodes):\n action_dict = {}\n for agent_id, a_obs in obs.items():\n if a_obs is not None:\n policy_id = mapping_cache.setdefault(\n agent_id, policy_agent_mapping(agent_id))\n\n a_action = agent.compute_action(\n a_obs,\n prev_action=prev_actions[agent_id],\n prev_reward=prev_rewards[agent_id],\n policy_id=policy_id)\n a_action = flatten_to_single_ndarray(a_action)\n action_dict[agent_id] = a_action\n prev_actions[agent_id] = a_action\n\n action = action_dict\n next_obs, reward, done, info = env.step(action)\n done = done[\"__all__\"]\n\n # update\n for agent_id, r in reward.items():\n prev_rewards[agent_id] = r\n obs = next_obs\n\n if done:\n episodes += 1\n # specific function for alternate game.\n results.append(env.determine_winner())\n\n return results\n\n\ndef own_evaluation(agent, num_rounds):\n results = []\n num_episodes = num_rounds * agent.config[\"evaluation_num_episodes\"]\n if agent.config[\"num_workers\"] == 0:\n for _ in range(num_episodes):\n agent.evaluation_workers.local_worker().sample()\n else:\n while len(results) < num_episodes:\n # Calling .sample() runs exactly one episode per worker due to how the\n # eval workers are configured.\n ray.get([\n w.sample.remote()\n for w in agent.workers.remote_workers()\n ])\n\n episodes, _ = collect_episodes(None,\n agent.workers.remote_workers(),\n [])\n\n for episode in episodes:\n for key, winner in episode.custom_metrics.copy().items():\n results.append(winner)\n\n return results[:num_episodes]\n", "sub_path": "RLC/capture_chess_rllib/game_internal.py", "file_name": "game_internal.py", "file_ext": "py", "file_size_in_byte": 19784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "ray.rllib.execution.rollout_ops.ParallelRollouts", "line_number": 40, "usage_type": "call"}, {"api_name": "ray.rllib.execution.rollout_ops.ConcatBatches", "line_number": 45, "usage_type": "call"}, {"api_name": "ray.rllib.execution.train_ops.TrainOneStep", "line_number": 47, "usage_type": "call"}, {"api_name": "ray.rllib.execution.metric_ops.StandardMetricsReporting", "line_number": 51, "usage_type": "call"}, {"api_name": "ray.rllib.agents.callbacks.DefaultCallbacks", "line_number": 54, "usage_type": "name"}, {"api_name": "ray.rllib.agents.trainer.with_common_config", "line_number": 63, "usage_type": "call"}, {"api_name": "ray.rllib.agents.trainer_template.build_trainer", "line_number": 77, "usage_type": "call"}, {"api_name": "RLC.capture_chess_rllib.policies.PolicyGradient", "line_number": 80, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 123, "usage_type": "call"}, {"api_name": "ray.tune.registry.get_trainable_cls", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 135, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 150, "usage_type": "call"}, {"api_name": "RLC.capture_chess_rllib.policies.PolicyRandom", "line_number": 165, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 188, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 189, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 193, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 194, "usage_type": "call"}, {"api_name": "ray.init", "line_number": 213, "usage_type": "call"}, {"api_name": "scipy.stats.binom_test", "line_number": 278, "usage_type": "call"}, {"api_name": "scipy.stats.binom_test", "line_number": 291, "usage_type": "call"}, {"api_name": "ray.rllib.evaluation.worker_set.WorkerSet", "line_number": 388, "usage_type": "argument"}, {"api_name": "ray.rllib.env.MultiAgentEnv", "line_number": 390, "usage_type": "argument"}, {"api_name": "ray.rllib.utils.space_utils.flatten_to_single_ndarray", "line_number": 401, "usage_type": "call"}, {"api_name": "ray.rllib.rollout.DefaultMapping", "line_number": 409, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 411, "usage_type": "call"}, {"api_name": "ray.rllib.utils.space_utils.flatten_to_single_ndarray", "line_number": 425, "usage_type": "call"}, {"api_name": "ray.get", "line_number": 456, "usage_type": "call"}, {"api_name": "ray.rllib.evaluation.metrics.collect_episodes", "line_number": 461, "usage_type": "call"}]} +{"seq_id": "615462007", "text": "from django.conf.urls import url, include\nfrom . import views\nimport debug_toolbar\n\nurlpatterns = [\n\turl(r'^$', views.MainPageAuth.as_view(), name='main'),\n\turl(r'^my_profile/(?P[0-9]+)/$', views.MyProfile.as_view(), name='my_profile'),\n\turl(r'^new', views.NewPublication.as_view(), name='new'),\n\turl(r'^publication/(?P[0-9]+)/$', views.FullPublication.as_view(), name='full'),\n\turl(r'^publication/(?P[0-9]+)/edit/$', views.EditPublication.as_view(), name='edit'),\n\turl(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),\n\turl(r'^delete/$', views.delete, name='delete'),\n\turl(r'^edit/(?P[0-9]+)/$', views.edit, name='edit_profile'),\n\turl(r'^search/$', views.search, name='search'),\n\turl(r'^rating/$', views.rating, name='rating'),\n\turl(r'^my_profile/(?P[0-9]+)/accept/(?P\\w+)/$', views.accept, name='accept'),\n\turl(r'^debug/', include(debug_toolbar.urls)),\n]", "sub_path": "multiblog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 17, "usage_type": "call"}, {"api_name": "debug_toolbar.urls", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "495879048", "text": "from aiohttp.web import middleware\nfrom aiohttp_session import get_session\nfrom ioa.database import DatabaseContext\nfrom models import UserDAO\nfrom view.model.user import AnonymousUser, User\nfrom aiohttp.web import middleware\nimport time\nimport logging\n\n@middleware\nasync def caculate_execute_time_middleware(request, handler):\n start = time.time()\n request['request_began_at'] = time.time()\n request['now'] = lambda: time.time()\n response = await handler(request)\n request['executed_time'] = time.time() - start\n # logging.info(\"request duration %.4f\" % (time.time() - start, ))\n return response\n\n@middleware\nasync def user_middleware(request, handler):\n logging.info(\"executing middleware user\")\n session = request['session']\n\n if 'user_id' in session:\n user_id = session['user_id']\n async with DatabaseContext.default.engine.acquire() as connection:\n dao = UserDAO(connection)\n user_table = await dao.find(user_id)\n if not user_table:\n user = AnonymousUser()\n else:\n user = User()\n user.id = user_table['id']\n user.username = user_table['username']\n else:\n user = AnonymousUser()\n request['user'] = user\n print(2)\n # print(request['aiohttp_jinja2_context'])\n # request['aiohttp_jinja2_context'].update(user = user)\n request['aiohttp_jinja2_context'] = {\n 'user': user,\n 'request': request\n }\n response = await handler(request)\n return response\n\n@middleware\nasync def session_to_request_middleware(request, handler):\n session = await get_session(request)\n request['session'] = session\n \n return await handler(request)\n\ndef setup_middleware(app):\n app.middlewares.append(session_to_request_middleware)\n app.middlewares.append(user_middleware)\n # app.middlewares.append(caculate_execute_time_middleware)\n pass", "sub_path": "middlewares.py", "file_name": "middlewares.py", "file_ext": "py", "file_size_in_byte": 1929, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "time.time", "line_number": 13, "usage_type": "call"}, {"api_name": "time.time", "line_number": 14, "usage_type": "call"}, {"api_name": "time.time", "line_number": 16, "usage_type": "call"}, {"api_name": "aiohttp.web.middleware", "line_number": 10, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 22, "usage_type": "call"}, {"api_name": "ioa.database.DatabaseContext.default.engine.acquire", "line_number": 27, "usage_type": "call"}, {"api_name": "ioa.database.DatabaseContext.default", "line_number": 27, "usage_type": "attribute"}, {"api_name": "ioa.database.DatabaseContext", "line_number": 27, "usage_type": "name"}, {"api_name": "models.UserDAO", "line_number": 28, "usage_type": "call"}, {"api_name": "view.model.user.AnonymousUser", "line_number": 31, "usage_type": "call"}, {"api_name": "view.model.user.User", "line_number": 33, "usage_type": "call"}, {"api_name": "view.model.user.AnonymousUser", "line_number": 37, "usage_type": "call"}, {"api_name": "aiohttp.web.middleware", "line_number": 20, "usage_type": "name"}, {"api_name": "aiohttp_session.get_session", "line_number": 51, "usage_type": "call"}, {"api_name": "aiohttp.web.middleware", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "363558561", "text": "\"\"\"Offer time listening automation rules.\"\"\"\nimport logging\n\nimport voluptuous as vol\n\nfrom homeassistant.const import CONF_PLATFORM\nfrom homeassistant.core import callback\nfrom homeassistant.helpers import config_validation as cv\nfrom homeassistant.helpers.event import async_track_time_change\n\n# mypy: allow-untyped-defs, no-check-untyped-defs\n\nCONF_HOURS = \"hours\"\nCONF_MINUTES = \"minutes\"\nCONF_SECONDS = \"seconds\"\n\n_LOGGER = logging.getLogger(__name__)\n\nTRIGGER_SCHEMA = vol.All(\n vol.Schema(\n {\n vol.Required(CONF_PLATFORM): \"time_pattern\",\n CONF_HOURS: vol.Any(vol.Coerce(int), vol.Coerce(str)),\n CONF_MINUTES: vol.Any(vol.Coerce(int), vol.Coerce(str)),\n CONF_SECONDS: vol.Any(vol.Coerce(int), vol.Coerce(str)),\n }\n ),\n cv.has_at_least_one_key(CONF_HOURS, CONF_MINUTES, CONF_SECONDS),\n)\n\n\nasync def async_attach_trigger(hass, config, action, automation_info):\n \"\"\"Listen for state changes based on configuration.\"\"\"\n hours = config.get(CONF_HOURS)\n minutes = config.get(CONF_MINUTES)\n seconds = config.get(CONF_SECONDS)\n\n # If larger units are specified, default the smaller units to zero\n if minutes is None and hours is not None:\n minutes = 0\n if seconds is None and minutes is not None:\n seconds = 0\n\n @callback\n def time_automation_listener(now):\n \"\"\"Listen for time changes and calls action.\"\"\"\n hass.async_run_job(\n action, {\"trigger\": {\"platform\": \"time_pattern\", \"now\": now}}\n )\n\n return async_track_time_change(\n hass, time_automation_listener, hour=hours, minute=minutes, second=seconds\n )\n", "sub_path": "homeassistant/components/automation/time_pattern.py", "file_name": "time_pattern.py", "file_ext": "py", "file_size_in_byte": 1659, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "voluptuous.All", "line_number": 19, "usage_type": "call"}, {"api_name": "voluptuous.Schema", "line_number": 20, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 22, "usage_type": "call"}, {"api_name": "homeassistant.const.CONF_PLATFORM", "line_number": 22, "usage_type": "argument"}, {"api_name": "voluptuous.Any", "line_number": 23, "usage_type": "call"}, {"api_name": "voluptuous.Coerce", "line_number": 23, "usage_type": "call"}, {"api_name": "voluptuous.Any", "line_number": 24, "usage_type": "call"}, {"api_name": "voluptuous.Coerce", "line_number": 24, "usage_type": "call"}, {"api_name": "voluptuous.Any", "line_number": 25, "usage_type": "call"}, {"api_name": "voluptuous.Coerce", "line_number": 25, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation.has_at_least_one_key", "line_number": 28, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 28, "usage_type": "name"}, {"api_name": "homeassistant.core.callback", "line_number": 44, "usage_type": "name"}, {"api_name": "homeassistant.helpers.event.async_track_time_change", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "354715671", "text": "import os\nimport sys\n\nfrom telethon import TelegramClient\nfrom telethon.tl.types import InputMessagesFilterPhotos\nfrom telethon.tl.types import InputMessagesFilterPhotoVideo\nimport socks\nimport async_timeout\n\nproxy = (socks.SOCKS5, \"localhost\", 1080)\napi_id = 123456789\napi_hash = \"17cbxxxxxxxx6ec8d79ba5f1031ee\"\nclient = TelegramClient('anon', api_id, api_hash, proxy=proxy).start()\n\n\nasync def video():\n messages = await client.get_messages(url, None, filter=InputMessagesFilterPhotoVideo)\n for video_ in messages:\n filename = video_path + \"/\" + str(video_.id) + \".mp4\"\n await client.download_media(video_, filename)\n\n\nasync def photo():\n messages = await client.get_messages(url, None, filter=InputMessagesFilterPhotos)\n for photos in messages:\n filename = img_path + \"/\" + str(photos.id) + \".jpg\"\n await client.download_media(photos, filename)\n client.disconnect()\n\n\nif sys.argv[1] == \"i\":\n url = input(\"输入TG频道的链接:\")\n img_path = \"./img\"\n os.mkdir(\"./img\")\n with client:\n client.loop.run_until_complete(photo())\nelif sys.argv[1] == \"v\":\n url = input(\"输入TG频道的链接:\")\n video_path = \"./video\"\n os.mkdir(\"./video\")\n with client:\n client.loop.run_until_complete(video())\n", "sub_path": "telegram_download.py", "file_name": "telegram_download.py", "file_ext": "py", "file_size_in_byte": 1280, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "socks.SOCKS5", "line_number": 10, "usage_type": "attribute"}, {"api_name": "telethon.TelegramClient", "line_number": 13, "usage_type": "call"}, {"api_name": "telethon.tl.types.InputMessagesFilterPhotoVideo", "line_number": 17, "usage_type": "name"}, {"api_name": "telethon.tl.types.InputMessagesFilterPhotos", "line_number": 24, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "3829020", "text": "import nltk\r\nfrom nltk.tokenize import RegexpTokenizer\r\n\r\ndef percentageOfwords(text):\r\n \r\n count=0\r\n \r\n sentences = nltk.sent_tokenize(text)\r\n no_of_sen = len(sentences)\r\n\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n words = tokenizer.tokenize(text)\r\n no_of_words = len(words)\r\n #print no_of_words\r\n \r\n for word in words:\r\n if len(word) >= 6:\r\n count+=1\r\n\r\n #print count\n if no_of_words != 0:\r\n return float(count*100)/no_of_words\n else:\n return 0.5;\r\n \r\n", "sub_path": "Software/Software/percentageOfWordsWithSixAndMoreLetters.py", "file_name": "percentageOfWordsWithSixAndMoreLetters.py", "file_ext": "py", "file_size_in_byte": 507, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "nltk.sent_tokenize", "line_number": 8, "usage_type": "call"}, {"api_name": "nltk.tokenize.RegexpTokenizer", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "17688274", "text": "from app.boss_admin_api import bossadminapi\n\nfrom flask import request, g\nimport json\n\nfrom app.models.LoginModel.admin import AdvancedLabel\nfrom app.models.LoginModel.tokens import TokenOA\nfrom app.models.base import db\n\n\n\n@bossadminapi.before_request\ndef apis():\n headers = request.headers.get('Authorization')\n if not headers:\n data = {\n 'status': 401,\n 'message': 'error',\n }\n data = json.dumps(data, default=lambda o: o.__dict__)\n return data, 401, {\"ContentType\": \"application/json\"}\n\n value = TokenOA.verify_password(headers)\n\n if not value:\n data = {\n 'status': 401,\n 'message': 'error',\n }\n data = json.dumps(data, default=lambda o: o.__dict__)\n return data, 401, {\"ContentType\": \"application/json\"}\n elif value.grade < 9:\n data = {\n 'status': 402,\n 'message': 'error',\n }\n data = json.dumps(data, default=lambda o: o.__dict__)\n return data, 402, {\"ContentType\": \"application/json\"}\n g.user_message = value\n\n\n@bossadminapi.route('/boss_admin/advanced_label', methods=['POST'])\ndef boss_admin_advanced_label():\n if request.method == 'POST':\n label = db.session.query(AdvancedLabel).all()\n label = advanced_label_data(label)\n data = {\n 'status': 200,\n 'message': 'ok',\n 'label': label\n\n }\n data = json.dumps(data, default=lambda o: o.__dict__)\n return data, 200, {\"ContentType\": \"application/json\"}\n\ndef advanced_label_data(value):\n lists = []\n for x in value:\n v = {\n 'uid': x.al_uid,\n 'label': x.al_label\n }\n lists.append(v)\n return lists", "sub_path": "app/boss_admin_api/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 1745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "flask.request.headers.get", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 20, "usage_type": "call"}, {"api_name": "app.models.LoginModel.tokens.TokenOA.verify_password", "line_number": 23, "usage_type": "call"}, {"api_name": "app.models.LoginModel.tokens.TokenOA", "line_number": 23, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 30, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.g.user_message", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 39, "usage_type": "name"}, {"api_name": "app.boss_admin_api.bossadminapi.before_request", "line_number": 12, "usage_type": "attribute"}, {"api_name": "app.boss_admin_api.bossadminapi", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "app.models.base.db.session.query", "line_number": 45, "usage_type": "call"}, {"api_name": "app.models.LoginModel.admin.AdvancedLabel", "line_number": 45, "usage_type": "argument"}, {"api_name": "app.models.base.db.session", "line_number": 45, "usage_type": "attribute"}, {"api_name": "app.models.base.db", "line_number": 45, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 53, "usage_type": "call"}, {"api_name": "app.boss_admin_api.bossadminapi.route", "line_number": 42, "usage_type": "call"}, {"api_name": "app.boss_admin_api.bossadminapi", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "505267951", "text": "# -*- coding: utf-8 -*-\nimport pymongo\n\n\nclass ExdbspiderPipeline(object):\n def process_item(self, item, spider):\n item['exploit_lang'] = item['exploit_lang'].replace(\"language-\",\"\")\n for key in item.keys():\n if not isinstance(item[key],dict):\n item[key] = item[key].strip()\n\n if item['metrics']:\n for key in item['metrics']:\n if item['metrics'][key]:\n item['metrics'][key] = item['metrics'][key].strip()\n if item['exploit_link']:\n item['exploit_id'] = item['exploit_link'].split(\"/\")[-1]\n if \" - \" in item['title']:\n item['application'], item['attack'] = item['title'].split(\" - \")[0:2]\n else:\n item['application'], item['attack'] = [\"\",\"\"]\n if 'mdi-check' in item['verified']:\n item['verified'] = 'V'\n else:\n item['verified'] = 'NV'\n return item\n\nclass MongoPipeline(object):\n def __init__(self, mongo_db, mongo_collection):\n self.mongo_db = mongo_db\n self.mongo_collection = mongo_collection\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(\n mongo_db = crawler.settings.get('MONGO_DB', 'lists'),\n mongo_collection = crawler.settings.get('MONGO_COLLECTION')\n )\n\n def connect_db(self):\n self.client = pymongo.MongoClient(host='localhost',port=27017)\n self.db = self.client[self.mongo_db]\n\n def process_item(self, item, spider):\n self.connect_db()\n self.db[self.mongo_collection].insert(dict(item))\n return item\n", "sub_path": "exdbspider/exdbspider/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 1592, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pymongo.MongoClient", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "495158551", "text": "from selenium import webdriver\nfrom requests import post, get\nfrom time import sleep\nfrom sys import stdout\nfrom json import loads\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nfrom email.mime.text import MIMEText\nimport smtplib\n\ncredit_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,\n 1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.9,\n 2, 2.4, 2.5, 2.6, 3, 3.5, 4, 4.4, 4.5,\n 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5,\n 10, 11, 11.5, 12, 14.5, 15, 17.5]\n\nfail_num = 0\nsuccess_num = 0\nfail_list = []\ncourse_info_list = []\ncourse_id = ''\ncourse_name = ''\ncourse_timeout = 0\n\nheader = {\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Content-Length': '57',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Host': 'study.foton.com.cn',\n 'Origin': 'http://study.foton.com.cn',\n 'Referer': 'http://study.foton.com.cn/els/flash/elnFlvPlayer.swf?v=4.0.2',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest'}\n\ndata_single = {\n 'courseId': 'PTC035903',\n 'playTime': '9999'\n}\n\ndata_double = {\n 'courseId': ' ',\n 'scoId': ' ',\n 'progress_measure': '100',\n 'session_time': '60:01',\n 'location': '3601'\n}\n\nselect_video_data = {\n 'courseId': '',\n 'scoId': '',\n 'firstLoad': 'true',\n 'Location': '0',\n 'elsSign': ''\n}\n\ncookie = {}\n\ntemplate_url = \"http://study.foton.com.cn/els/html/coursestudyrecord/coursestudyrecord.studyCheck.do?courseId={}&scoId={}\"\nprogress_url = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.saveProgress.do\"\nprecent_url = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.saveCoursePrecent.do\"\ntemplate_watch_url = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.learn.do?courseId={}&vb_server=&willGoStep=COURSE_COURSE_STUDY\"\ntemplate_play_url = \"http://study.foton.com.cn/els/html/studyCourse/studyCourse.enterCourse.do?courseId={}&studyType=STUDY\"\n\n# 保存视频播放进度\nsave_progress_api = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.saveProgress.do\"\n# 同步刷新记录\nupdate_time_api = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.updateTimestepByUserTimmer.do\"\n# 获取课程包含的小节信息\nload_course_api = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.loadCourseItemTree.do\"\n# 选课接口,包含location信息\nselect_resource_api = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.selectResource.do?vbox_server=&fromNetWorkSetting=false\"\n# 确认选课接口\nstudy_check_api_tmp = \"http://study.foton.com.cn/els/html/coursestudyrecord/coursestudyrecord.studyCheck.do?courseId={}&scoId={}\"\n# 查看小节学习进度\nscols_complate_api_tmp = \"http://study.foton.com.cn/els/html/courseStudyItem/courseStudyItem.scoIsComplate.do?courseId={}&processType=THREESCREEN\"\n# sever酱推送接口\nnotification_api_tmp = 'https://api.telegram.org/bot{}/sendMessage?chat_id={}&parse_mode=Markdown&text='\n\n\ndef push_notification(ntfc):\n try:\n get(notification_api+\"福田大学云学习进度提示:\\n\"+ntfc)\n except:\n print(\"推送到TG时出错\")\n\n\ndef show_time():\n print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))\n\n\ndef open_broswer():\n driver.implicitly_wait(20)\n driver.get(\"http://study.foton.com.cn\")\n\n\ndef login():\n print(\"登录中...\")\n sleep(10)\n usrname = driver.find_element_by_id('loginName')\n usrname.click()\n usrname.send_keys(username)\n\n passwd = driver.find_element_by_id('password')\n passwd.click()\n passwd.send_keys(password)\n\n sleep(2)\n\n ele = driver.find_element_by_class_name('login_Btn')\n ele.click()\n\n\ndef login_ok():\n if driver.current_url == \"http://study.foton.com.cn/os/html/index.init.do\":\n # 登录成功\n return True\n else:\n return False\n\n\ndef load_course():\n global select_credit\n with open('./course_data.txt', 'r', encoding='utf-8') as f:\n line = f.readline()\n line = f.readline()\n while line:\n line_list = line.strip().split(',')\n credit = line_list[-2]\n if credit == select_credit:\n course_info_list.append(line)\n line = f.readline()\n\n\ndef select_video(course_id, video_id):\n select_video_data['courseId'] = course_id\n select_video_data['scoId'] = video_id\n select_video_data['elsSign'] = cookie['eln_session_id']\n study_check_api = study_check_api_tmp.format(course_id, video_id)\n sleep(5)\n try:\n post(select_resource_api, headers=header, cookies=cookie, data=select_video_data, timeout=(15, 15))\n post(study_check_api, headers=header, cookies=cookie, data={'elsSign': cookie['eln_session_id']}, timeout=(15, 15))\n except:\n pass\n\n\ndef get_completed_video_list(course_id):\n \"\"\"\n 获取已经完成的视频列表\n \"\"\"\n completed_list = []\n scols_complate_api = scols_complate_api_tmp.format(course_id)\n try:\n cmpls = post(scols_complate_api, headers=header, cookies=cookie,\n data={'elsSign': cookie['eln_session_id']}, timeout=(15, 15))\n except:\n print(\"获取视频完成列表出错\")\n else:\n if len(cmpls.text) != 0:\n completed_list = loads(cmpls.text)\n return completed_list\n\n\ndef course_finished(completed_list, vid_list):\n \"\"\"\n 判断课程是否学习完毕\n \"\"\"\n if len(completed_list) == len(vid_list):\n return True\n else:\n return False\n\n\ndef video_finished(course_id, video_id, course_name, video_name):\n \"\"\"\n 判断视频是否播放完毕\n \"\"\"\n data_double['courseId'] = course_id\n data_double['scoId'] = video_id\n completed_list = get_completed_video_list(course_id)\n\n # print(course_id)\n # print(video_id)\n # print(completed_list)\n\n if video_id in completed_list:\n return True\n try:\n r = post(save_progress_api, headers=header, cookies=cookie, data=data_double, timeout=(15, 15))\n except:\n print(\"获取视频播放进度时出错\")\n else:\n r_data = r.text\n\n print(r.text)\n\n if len(r_data) != 0:\n\n # print(r_data)\n\n try:\n r_dict = loads(r_data)\n except:\n print(\"HTTP Status 500 服务器内部错误\")\n else:\n if 'completed' in r_dict:\n if r_dict['completed'] == 'true':\n return True\n else:\n show_time()\n print(\"{}视频播放进度{}%,{}课程学习进度{}%\".format(video_name, r_dict['completeRate'],\n course_name, r_dict['courseProgress']))\n progress = \"{} 视频播放进度{}%,{} 课程学习进度{}%\".format(video_name, r_dict['completeRate'],\n course_name, r_dict['courseProgress'])\n push_notification(progress)\n return False\n else:\n return False\n else:\n return False\n\n\ndef pre_test():\n sleep(15)\n form = driver.find_element_by_id(\"coursePretestForm\")\n sleep(5)\n try:\n h2 = form.find_element_by_tag_name(\"h2\")\n except:\n form_choice = driver.find_element_by_class_name(\"form_choice\")\n # choice_type_list = form_choice.find_elements_by_class_name(\"choice_type\")\n # for choice_type in choice_type_list:\n # if choice_type.text == \"判断题\":\n # 判断题/选择题(单选,多选)都只需要循环点question-item的第一个选项就行了\n question_item_list = form_choice.find_elements_by_class_name(\"question-item\")\n for question_item in question_item_list:\n p_list = question_item.find_elements_by_tag_name(\"p\")\n span = p_list[1].find_element_by_tag_name(\"span\")\n button = span.find_element_by_tag_name(\"input\")\n button.click()\n sleep(1)\n from_confirm = driver.find_element_by_class_name(\"from_confirm\")\n submit_button = from_confirm.find_element_by_id(\"coursePretestSubmit\")\n submit_button.click()\n sleep(5)\n next_step = driver.find_element_by_id(\"upCoursePretestGoNextBtn\")\n next_step.click()\n sleep(5)\n learn()\n else:\n if h2.text == \"(无试题)\":\n button = driver.find_element_by_class_name(\"from_confirm\").find_element_by_tag_name(\"button\")\n button.click()\n sleep(5)\n learn()\n\n\ndef is_finished():\n global success_num\n sleep(15)\n try:\n # 通过课程进度判断课程是否学习完成,span.text == '100'说明已经学完了\n # span = driver.find_element_by_id(\"studyProgress\")\n span = WebDriverWait(driver, 3, 0.5).until(\n EC.presence_of_element_located((By.ID, 'studyProgress')))\n except:\n try:\n # 通过是否显示课程评估页面判断课程是否学习完成\n # h1 = driver.find_element_by_class_name(\"main_title\")\n h1 = WebDriverWait(driver, 3, 0.5).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'main_title')))\n except:\n # 不显示课程评估,没学完,开始学习\n learn()\n else:\n span = h1.find_element_by_tag_name('span')\n if span.text == \"课程评估\":\n success_num += 1\n print(\"恭喜你!课程《{}》 已经完成学习,已成功学习 {} 门\".format(course_name, success_num))\n info = \"《{}》课程全部视频学习完毕.学习成功{}门.共{}门.学习进度{}\".format(course_name, success_num,\n len(course_info_list),\n str(round((success_num+fail_num) / len(course_info_list),\n 4) * 100) + \"%\")\n push_notification(info)\n elif span.text == \"课前测试\":\n pre_test()\n elif span.text == \"课��测试\":\n success_num += 1\n print(\"恭喜你!课程《{}》 已经完成学习,已成功学习 {} 门\".format(course_name, success_num))\n info = \"《{}》课程全部视频学习完毕.学习成功{}门.共{}门.学习进度{}\".format(course_name, success_num,\n len(course_info_list),\n str(round((success_num+fail_num) / len(course_info_list),\n 4) * 100) + \"%\")\n push_notification(info)\n\n else:\n if span.text == '100':\n success_num += 1\n print(\"恭喜你!课程《{}》 已经完成学习,已成功学习 {} 门\".format(course_name, success_num))\n info = \"《{}》课程全部视频学习完毕.学习成功{}门.共{}门.学习进度{}\".format(course_name, success_num,\n len(course_info_list),\n str(round((success_num+fail_num) / len(course_info_list),\n 4) * 100) + \"%\")\n push_notification(info)\n else:\n # 开始学习\n learn()\n\n\ndef get_cookie():\n cookie_list = driver.get_cookies()\n for single_cookie in cookie_list:\n cookie[single_cookie['name']] = single_cookie['value']\n\n\ndef learn():\n global success_num\n global fail_num\n global course_timeout\n sleep(15)\n play_button = WebDriverWait(driver, 15, 0.5).until(\n EC.presence_of_element_located((By.ID, 'courseRp_sel')))\n play_button.click()\n sleep(5)\n get_cookie()\n data_single['courseId'] = course_id\n data_double['courseId'] = course_id\n\n try:\n # ele存在说明是双分屏或者三分屏\n # ele = driver.find_element_by_id('vodtree')\n ele = WebDriverWait(driver, 3, 0.5).until(\n EC.presence_of_element_located((By.ID, 'vodtree')))\n except:\n # ele不存在说明是单分屏\n sleep(0.1)\n try:\n r = post(precent_url, headers=header, cookies=cookie, data=data_single, timeout=(15, 15))\n except:\n fail_num += 1\n print(\"课程《{}》学习失败,已学习失败{}门课程\".format(course_name, fail_num))\n fail_list.append(course_name)\n else:\n r_data = r.text\n if len(r_data) != 0:\n r_dict = loads(r_data)\n if 'completed' in r_dict:\n if r_dict['completed'] == 'true':\n success_num += 1\n print(\"恭喜你!课程《{}》 已经完成学习,已成功学习 {} 门\".format(course_name, success_num))\n info = \"《{}》课程全部视频学习完毕.学习成功{}门.共{}门.学习进度{}\".format(course_name, success_num,\n len(course_info_list),\n str(round(\n (success_num+fail_num) / len(course_info_list),\n 4) * 100) + \"%\")\n push_notification(info)\n else:\n fail_num += 1\n print(\"课程《{}》学习失败,已学习失败{}门课程\".format(course_name, fail_num))\n fail_list.append(course_name)\n else:\n # 双/三分屏\n completed_video_list = get_completed_video_list(course_id)\n vid_list = []\n title_list = []\n\n div_list = ele.find_elements_by_tag_name('div')\n for div in div_list[1:]:\n try:\n a = div.find_element_by_tag_name('a')\n except:\n pass\n else:\n video_id = a.get_attribute('data-id')\n vid_list.append(video_id)\n video_title = a.get_attribute('title')\n title_list.append(video_title)\n print(\"正在爬取 {} 视频数据...\".format(video_title))\n stdout.flush()\n\n print(\"课程《{}》所有视频数据爬取完成!开始学习\".format(course_name))\n if course_finished(completed_video_list, vid_list):\n show_time()\n print(\"《{}》课程全部视频学习完毕\".format(course_name))\n success_num += 1\n info = \"《{}》课程全部视频学习完毕.学习成功{}门.共{}门.学习进度{}\".format(course_name, success_num, len(course_info_list),\n str(round((success_num+fail_num) / len(course_info_list),\n 4) * 100) + \"%\")\n push_notification(info)\n else:\n for index, vid in enumerate(vid_list):\n t = 0\n sleep(1)\n video_title = title_list[index]\n data_double['scoId'] = vid\n video_url = template_url.format(course_id, vid)\n print(\"开始学习 {} 视频\".format(video_title))\n select_video(course_id, vid)\n while True:\n if video_finished(course_id, vid, course_name, video_title):\n show_time()\n print(\"{} 视频学习完毕\".format(video_title))\n sleep(1)\n break\n else:\n post(update_time_api, headers=header, cookies=cookie,\n data={'elsSign': cookie['eln_session_id']}, timeout=(15, 15))\n sleep(180)\n t += 1\n if t > 30:\n print(\"{} 视频学习超时\".format(video_title))\n course_timeout = 1\n break\n completed_video_list = get_completed_video_list(course_id)\n if course_finished(completed_video_list, vid_list):\n show_time()\n success_num += 1\n print(\"《{}》课程全部视频学习完毕\".format(course_name))\n info = \"《{}》课程全部视频学习完毕.学习成功{}门.共{}门.学习进度{}\".format(course_name, success_num, len(course_info_list),\n str(round((success_num+fail_num) / len(course_info_list),\n 4) * 100) + \"%\")\n '''\n msg = MIMEText(info, 'plain', 'utf-8')\n server.sendmail(from_addr, [to_addr], msg.as_string())\n '''\n push_notification(info)\n sleep(1)\n elif course_timeout == 1:\n show_time()\n fail_num += 1\n fail_list.append(course_name)\n course_timeout = 0\n print(\"《{}》课程学习超时\".format(course_name))\n info = \"《{}》课程学习超时\".format(course_name)\n push_notification(info)\n\n\ndef end_study():\n print(\"本次学习结束,共{}门课程\".format(len(course_info_list)))\n print(\"学习成功{}门,学习失败{}门\".format(success_num, fail_num))\n end_info = \"本次学习结束,共{}门课程.学习成功{}门,学习失败{}门,请检查学习失败的课程是否已经完成选课,未进行选课的课程无法学习。\"\\\n .format(len(course_info_list), success_num, fail_num)\n if fail_num > 0:\n print(\"学习失败的课程有{}\".format(fail_list))\n print(\"请检查学习失败的课程是否已经完成选课,未进行选课的课程无法学习。\")\n push_notification(end_info)\n\n\nif __name__ == \"__main__\":\n print(\"课程学分:\" + str(credit_list))\n select_credit = input(\"请输入要学习的课程的学分:\")\n if float(select_credit) not in credit_list:\n print(\"输入错误。告辞\")\n exit(0)\n else:\n print(\"正在打开浏览器...\")\n load_course()\n # driver = webdriver.Firefox()\n options = webdriver.FirefoxOptions()\n options.add_argument('-headless')\n driver = webdriver.Firefox(options=options)\n open_broswer()\n username = input(\"请输入用户名:\")\n password = input(\"请输入密码:\")\n bot_token = input(\"请输入TelegramBotToken: \")\n bot_chatID = input(\"请输入chat_id: \")\n notification_api = notification_api_tmp.format(bot_token, bot_chatID)\n login()\n while not login_ok():\n driver.refresh()\n sleep(2)\n login()\n print(\"登录成功\")\n for c, course_info in enumerate(course_info_list):\n course_line_list = course_info.strip().split(',')\n course_id = course_line_list[-3]\n course_name = course_line_list[-4]\n play_url = template_play_url.format(course_id)\n driver.switch_to.window(driver.window_handles[0])\n driver.get(play_url)\n if c % 100 == 0:\n get_cookie()\n sleep(1)\n print(\"开始学习《{}》:\".format(course_name))\n try:\n is_finished()\n except:\n fail_num += 1\n fail_list.append(course_name)\n print(\"{} 学习失败,请检查《{}》是否已选课!\".format(course_name, course_name))\n info = \"{} 学习失败, 请检查《{}》是否已选课!\".format(course_name, course_name)\n push_notification(info)\n driver.quit()\n end_study()\n", "sub_path": "ServerStudyBotTG.py", "file_name": "ServerStudyBotTG.py", "file_ext": "py", "file_size_in_byte": 20773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "requests.get", "line_number": 87, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 93, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 103, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 112, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 144, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 146, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 147, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 159, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 165, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 194, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 207, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 229, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 231, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 246, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 250, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 253, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 259, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 265, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 269, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 270, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 270, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 270, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 270, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 275, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 276, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 276, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 276, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 276, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 325, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 326, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 327, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 327, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 327, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 327, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 329, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 337, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 338, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 338, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 338, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 338, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 341, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 343, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 351, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 384, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 384, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 398, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 408, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 411, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 413, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 432, "usage_type": "call"}, {"api_name": "selenium.webdriver.FirefoxOptions", "line_number": 464, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 464, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 466, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 466, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 476, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 488, "usage_type": "call"}]} +{"seq_id": "262461937", "text": "import requests\r\nimport json\r\nimport sys\r\n\r\n# runs through entire blockchain to determine amount belonging to \"user\"\r\n# this script allows user to enter, save, or change the id used\r\n# will return the balance for that user and all transactions involving said user\r\n\r\nif __name__ == '__main__':\r\n # What is the server address? IE `python3 miner.py https://server.com/api/`\r\n if len(sys.argv) > 1:\r\n node = sys.argv[1]\r\n else:\r\n node = \"http://localhost:5000\"\r\n\r\n # placeholder for balance we will calculate by running through chain\r\n balance = 0\r\n\r\n # transactions involving user\r\n transactions = []\r\n\r\n # Load ID\r\n # change to user input\r\n # f = open(\"my_id.txt\", \"r\")\r\n # user_id = f.read()\r\n # print(\"ID is\", id)\r\n # f.close()\r\n\r\n user_id = input('Please enter user id')\r\n\r\n blockchain = requests.get(url=node + \"/chain\").json()\r\n\r\n chain = blockchain['chain']\r\n\r\n for block in chain:\r\n for transaction in block['transactions']:\r\n if transaction['sender'] == user_id:\r\n transactions.append(transaction)\r\n balance -= transaction['amount']\r\n if transaction['recipient'] == user_id:\r\n transactions.append(transaction)\r\n balance += transaction['amount']\r\n\r\n if len(transactions) > 0:\r\n print('Total amount attributable to entered user id is ', balance)\r\n print('')\r\n print('Below are all transactions involving the user ')\r\n print('')\r\n print(json.dumps(transactions, sort_keys=True, indent=4))\r\n else:\r\n print('Balance is 0, no transactions found')\r\n", "sub_path": "basic_wallet_p/basic_wallet.py", "file_name": "basic_wallet.py", "file_ext": "py", "file_size_in_byte": 1651, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "394116279", "text": "# encoding: utf-8\nfrom django import forms\n\nfrom apps.account.models import Persona\nfrom .models import Socio, Parcela, Acopio, DetalleAcopio\n\n\nclass PersonaForm(forms.ModelForm):\n\n class Meta:\n model = Persona\n fields = ['first_name', 'last_name', 'identity_type',\n 'identity_num', 'photo', 'email', 'birth_date']\n labels = {\n 'first_name': 'Nombre: ',\n 'last_name': 'Apellidos: ',\n 'identity_type': 'Tipo de Identidad: ',\n 'identity_num': 'Número: ',\n 'photo': 'Fotografía: ',\n 'email': 'E-mail: ',\n 'birth_date': 'Fecha de Nacimiento: ',\n }\n widgets = {\n 'first_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'last_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'identity_num': forms.TextInput(attrs={'class': 'form-control'}),\n 'email': forms.TextInput(attrs={'class': 'form-control'}),\n }\n\n\nclass SocioForm(forms.ModelForm):\n\n class Meta:\n model = Socio\n fields = ['cod_socio', 'direccion', 'ciudad',\n 'estado']\n labels = {\n 'cod_socio': 'Codigo de Socio: ',\n 'direccion': 'Dirección: ',\n 'ciudad': 'Distrito: ',\n 'estado': 'Estado: ',\n }\n # widgets = {\n # 'first_name': forms.TextInput(attrs={'class': 'form-control'}),\n # 'last_name': forms.TextInput(attrs={'class': 'form-control'}),\n # 'identity_num': forms.TextInput(attrs={'class': 'form-control'}),\n # 'email': forms.TextInput(attrs={'class': 'form-control'}),\n # }\n\n\nclass ParcelaForm(forms.ModelForm):\n\n class Meta:\n model = Parcela\n fields = ['codigo', 'ubicacion', 'area_cultivo', 'area_desarrollo',\n 'prod_estimado_tn', 'prod_estimado_kg', 'total_parcelas',\n 'socio']\n\n\nclass AcopioForm(forms.ModelForm):\n\n class Meta:\n model = Acopio\n fields = ['socio', 'estado', 'n_ticket']\n labels = {\n 'socio': 'Proveedor ',\n 'estado': 'Pagado?: ',\n 'n_ticket': 'N° Ticket: ',\n }\n\n\nclass DetalleAcopioForm(forms.ModelForm):\n\n class Meta:\n model = DetalleAcopio\n fields = ['parcela', 'acopio', 'kilos', 'precio_uni', 'total_pagar']\n labels = {\n 'parcela': 'Parcela: ',\n 'acopio': 'Acopio:',\n 'kilos': 'Cantidad en Kg.: ',\n 'precio_uni': 'Acopiado a: ',\n 'total_pagar': 'Total a pagar: ',\n }\n", "sub_path": "apps/acopio/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 2605, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.forms.ModelForm", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 8, "usage_type": "name"}, {"api_name": "apps.account.models.Persona", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 24, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 24, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 25, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 25, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 27, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 27, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 31, "usage_type": "name"}, {"api_name": "models.Socio", "line_number": 34, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 51, "usage_type": "name"}, {"api_name": "models.Parcela", "line_number": 54, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 60, "usage_type": "name"}, {"api_name": "models.Acopio", "line_number": 63, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 72, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 72, "usage_type": "name"}, {"api_name": "models.DetalleAcopio", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "567879336", "text": "import datetime\nimport requests\n\n################################################################################\n# Config\nHASS_API_PASSWORD = \"{{homeassistant_http.api_password}}\"\nHASS_HOST = \"http://0.0.0.0:{{homeassistant_port}}\"\nPROMETHEUS_HOST = \"http://0.0.0.0:{{prometheus_port}}\"\nDEBUG = True\n\n################################################################################\n# Utility Functions\n\n\ndef debug(msg):\n if DEBUG:\n print(msg)\n\n\ndef create_sensor(sensor_type, sensor_name, payload):\n debug(\"Making API call to Homeassistant to install sensor {0}.{1}\".format(sensor_type, sensor_name))\n headers = {\"x-ha-access\": HASS_API_PASSWORD, \"Content-Type\": \"application/json\"}\n url = \"{0}/api/states/{1}.{2}\".format(HASS_HOST, sensor_type, sensor_name)\n resp = requests.post(url, json=payload, headers=headers, timeout=2)\n debug(\"DONE ({0})\".format(resp))\n\n\n################################################################################\n# SENSORS\ndebug(\"Fetching sensors from prometheus...\")\nresp = requests.get(PROMETHEUS_HOST + \"/api/v1/query?query={homeassistant='yes'}\", timeout=2)\n\ndebug(\"Adding sensors from homeassistant...\")\ndata = resp.json()['data']['result']\nfor item in data:\n sensor_type = item['metric'].get(\"homeassistant_sensor_type\", \"sensor\")\n sensor_state = item['value'][1]\n if sensor_type == \"binary_sensor\":\n sensor_state = \"on\" if int(sensor_state) >= 1 else \"off\"\n payload = {\n \"state\": \"{}\".format(sensor_state),\n \"attributes\": {\n \"friendly_name\": item['metric']['__name__'],\n \"source\": \"prometheus\",\n \"type\": \"prometheus-sensor\",\n \"timestamp\": item['value'][0]\n }\n }\n create_sensor(sensor_type, item['metric']['__name__'], payload)\n\n\n################################################################################\n# ALERTS\ndebug(\"Fetching alerts from prometheus...\")\nresp = requests.get(PROMETHEUS_HOST + \"/api/v1/rules\", timeout=2)\ndata = resp.json()['data']\n\ndebug(\"Adding alerts as sensors from homeassistant...\")\naggregate_state = \"on\"\ntimestamp = datetime.datetime.now().timestamp()\nfor group in data['groups']:\n for rule in group['rules']:\n if len(rule['alerts']) > 0:\n rule_alerts = rule['alerts'][0]['state']\n else:\n rule_alerts = None\n\n if rule_alerts:\n sensor_state = \"off\"\n aggregate_state = \"off\"\n else:\n sensor_state = \"on\"\n\n sensor_name = rule['name'].replace(\".\", \"_\").replace(\"-\", \"_\")\n payload = {\n \"state\": \"{}\".format(sensor_state),\n \"attributes\": {\n \"friendly_name\": sensor_name,\n \"source\": \"prometheus\",\n \"type\": \"prometheus-alert\",\n \"timestamp\": timestamp\n }\n }\n create_sensor(\"binary_sensor\", sensor_name, payload)\n\ndebug(\"Adding aggregate alert sensor to homeassistant...\")\npayload['state'] = aggregate_state\npayload['attributes']['friendly_name'] = \"prometheus_aggregate\"\ncreate_sensor(\"binary_sensor\", \"prometheus_aggregate\", payload)\n", "sub_path": "roles/homeassistant/templates/prom2hass.py", "file_name": "prom2hass.py", "file_ext": "py", "file_size_in_byte": 3119, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "requests.post", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "138467795", "text": "from hasoffers import Hasoffers\nfrom kpi_notificator import celery_app\n\nfrom django.conf import settings\nfrom stats.models import Offer\n\n\n@celery_app.task\ndef update_active_offers():\n\n api = Hasoffers(network_token=settings.HASOFFERS_NETWORK_TOKEN,\n network_id=settings.HASOFFERS_NETWORK_ID,\n proxies=settings.PROXIES)\n\n params = dict(\n fields=['id', 'name'],\n contain=['OfferCategory'],\n filters={'status': 'active'},\n limit=10000\n )\n resp = api.Offer.findAll(**params)\n\n for offer in resp.extract_all():\n offer_categories_id = (\n list(\n map(\n int,\n list(dict(offer.OfferCategory).keys()))))\n is_incent = bool(set(offer_categories_id) & set(settings.INCENT_CATEGORIES))\n\n try:\n db_offer = Offer.objects.get(pk=offer.id)\n if db_offer.incent != is_incent:\n db_offer.incent = is_incent\n db_offer.save()\n except Offer.DoesNotExist:\n continue\n", "sub_path": "workers/loaders/tasks/update_active_offers.py", "file_name": "update_active_offers.py", "file_ext": "py", "file_size_in_byte": 1075, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "hasoffers.Hasoffers", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.settings.HASOFFERS_NETWORK_TOKEN", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.settings.HASOFFERS_NETWORK_ID", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.settings.PROXIES", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.settings.INCENT_CATEGORIES", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 29, "usage_type": "name"}, {"api_name": "stats.models.Offer.objects.get", "line_number": 32, "usage_type": "call"}, {"api_name": "stats.models.Offer.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "stats.models.Offer", "line_number": 32, "usage_type": "name"}, {"api_name": "stats.models.Offer.DoesNotExist", "line_number": 36, "usage_type": "attribute"}, {"api_name": "stats.models.Offer", "line_number": 36, "usage_type": "name"}, {"api_name": "kpi_notificator.celery_app.task", "line_number": 8, "usage_type": "attribute"}, {"api_name": "kpi_notificator.celery_app", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "72229156", "text": "#\n# Modified standard toolbar for image manipulation tasks and image WM awareness\n#\n# Copied from:\n# matplotlib.backend_bases.NavigationToolbar2\n# matplotlib.backends.backend_tkagg.NavigationToolbar2TkAgg\n#\n# If one can live with the dual function pan/zoom, it should be possible to subclass this.\n#\n\n\nimport matplotlib\nimport matplotlib.cbook as cbook\nfrom matplotlib.figure import Figure\nfrom matplotlib.widgets import SubplotTool\nimport matplotlib.backends.windowing as windowing\nfrom matplotlib.backends.backend_tkagg import ToolTip, FigureCanvasTkAgg, FigureManagerTkAgg\n\nimport numpy as np\n\nimport six\nimport Tkinter as Tk\n\nimport WM\n\nimport os.path\nrcParams = matplotlib.rcParams\n\nclass Cursors:\n # this class is only used as a simple namespace\n HAND, POINTER, SELECT_REGION, MOVE = list(range(4))\ncursors = Cursors()\n\ncursord = {\n \"none\" : \"arrow\",\n \"slice\" : \"fleur\",\n \"bvalue\" : \"fleur\",\n \"pan\" : \"hand2\",\n \"crosshair\": \"tcross\",\n \"ZOOM\": \"tcross\",\n }\n\nclass NavigationToolbar2(object):\n \"\"\"\n Base class for the navigation cursor, version 2\n\n backends must implement a canvas that handles connections for\n 'button_press_event' and 'button_release_event'. See\n :meth:`FigureCanvasBase.mpl_connect` for more information\n\n\n They must also define\n\n :meth:`save_figure`\n save the current figure\n\n :meth:`set_cursor`\n if you want the pointer icon to change\n\n :meth:`_init_toolbar`\n create your toolbar widget\n\n :meth:`draw_rubberband` (optional)\n draw the zoom to rect \"rubberband\" rectangle\n\n :meth:`press` (optional)\n whenever a mouse button is pressed, you'll be notified with\n the event\n\n :meth:`release` (optional)\n whenever a mouse button is released, you'll be notified with\n the event\n\n :meth:`dynamic_update` (optional)\n dynamically update the window while navigating\n\n :meth:`set_message` (optional)\n display message\n\n :meth:`set_history_buttons` (optional)\n you can change the history back / forward buttons to\n indicate disabled / enabled state.\n\n That's it, we'll do the rest!\n \"\"\"\n\n # list of toolitems to add to the toolbar, format is:\n # (\n # text, # the text of the button (often not visible to users)\n # tooltip_text, # the tooltip shown on hover (where possible)\n # image_file, # name of the image for the button (without the extension)\n # name_of_method, # name of the method in NavigationToolbar2 to call\n # )\n toolitems = (\n ('Sources', 'Select sources', 'move', 'wm_sources'),\n ('View', 'Select view', 'move', 'wm_view'),\n ('Source', 'Select source', 'move', 'source'),\n ('Axis', 'Toggle Axis', 'move', 'axis'),\n ('Crosshair', 'Show a point in space', 'move', 'crosshair'),\n ('B-value', 'Pan through b-values', 'move', 'bvalue'),\n ('Slice', 'Pan through slices', 'move', 'slice'),\n ('Home', 'Reset original view', 'home', 'home'),\n ('Back', 'Back to previous view', 'back', 'back'),\n ('Forward', 'Forward to next view', 'forward', 'forward'),\n (None, None, None, None),\n ('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),\n ('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),\n (None, None, None, None),\n ('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),\n ('Save', 'Save the figure', 'filesave', 'save_figure'),\n #('Debug', 'bind debug info here', 'filesave', 'debug'),\n )\n\n def __init__(self, canvas, Winst):\n self.canvas = canvas\n self.Winst = Winst\n canvas.toolbar = self\n # a dict from axes index to a list of view limits\n self._views = cbook.Stack()\n self._views2 = cbook.Stack()\n self._positions = cbook.Stack() # stack of subplot positions\n\n\n self._idPress = None\n self._idRelease = None\n self._idDrag = self.canvas.mpl_connect(\n 'motion_notify_event', self.mouse_move)\n self._ids_zoom = [] # Zoom equivalent for drag ID (mouse move, key press, key release)\n\n self._last_axis = None # Axis where the button was pressed\n self._xypress = None # Press info for the zoom rectangle\n\n self._active = None # ID string denoting active mode\n self._button_pressed = None # Save the mouse button pressed at canvas\n self._zoom_mode = None # Key pressed in zoom mode, to limit to y/x zooming\n\n self._lastCursor = None # GUI mouse cursor\n self.mode = '' # GUI status message\n self._init_toolbar()\n\n\n\n # a mode string for the status bar\n self.set_history_buttons()\n\n # ACTIVATION FUNCTIONS\n # General activation function called from the GUI\n def activate(self, mode_id, *args):\n \"\"\" General activation function for toolbar button press.\n Set the active mode and connect corresponding callbacks.\n \"\"\"\n\n if self._active == mode_id:\n self._active = None\n else:\n self._active = mode_id\n if self._idPress is not None:\n self._idPress = self.canvas.mpl_disconnect(self._idPress)\n self.mode = ''\n\n if self._idRelease is not None:\n self._idRelease = self.canvas.mpl_disconnect(self._idRelease)\n self.mode = ''\n\n if self._active:\n self._idPress = self.canvas.mpl_connect('button_press_event',\n getattr(self, 'press_' + mode_id))\n self._idRelease = self.canvas.mpl_connect('button_release_event',\n getattr(self, 'release_' + mode_id))\n self.mode = mode_id\n self.canvas.widgetlock(self)\n else:\n self.canvas.widgetlock.release(self)\n\n for a in self.canvas.figure.get_axes():\n a.set_navigate_mode(self._active)\n\n self.set_message(self.mode)\n # just get rid of these altogether\n def pan(self, *args): self.activate('pan')\n def slice(self, *args): self.activate('slice')\n def crosshair(self, *args): self.activate('crosshair')\n def bvalue(self, *args): self.activate('bvalue')\n def axis(self, *args): self.activate('axis')\n def wm_sources(self, *args): self.activate('sources')\n def wm_view(self, *args): self.activate('view')\n def source(self, *args): self.activate('source')\n\n\n # CANVAS MOUSE PRESS\n # Called when mouse button is pressed\n def press(self, event): pass\n # General function to call when a mouse press occurs in canvas\n def press_canvas(self, mode_string, event, implements = (1,)):\n\n if event.button in implements:\n self._button_pressed = event.button\n else:\n self._button_pressed = None\n return\n\n # push the current view to define home if stack is empty\n if self._views.empty():\n self.push_current()\n\n x, y = event.x, event.y\n for i, a in enumerate(self.canvas.figure.get_axes()):\n if x is not None and y is not None and a.in_axes(event) and a.get_navigate() and a.can_pan():\n\n self._last_axis = a\n self.canvas.mpl_disconnect(self._idDrag)\n # Can replaced by a subclass like structure\n a2 = self.Winst.get_adapter(a)\n if self._button_pressed == 1:\n getattr(a2, 'start_'+self._active)(x, y, event.button)\n self._idDrag = self.canvas.mpl_connect('motion_notify_event', getattr(self,'drag_'+self._active))\n elif self._button_pressed == 3:\n #TODO: seperate zoom/pan into two buttons to get rid of the first if\n if self._active == 'pan':\n getattr(a2, 'start_'+self._active)(x, y, event.button)\n self._idDrag = self.canvas.mpl_connect('motion_notify_event', getattr(self,'drag_'+self._active))\n else:\n getattr(self, self._active+'_menu')(event)\n\n\n self.press(event)\n # Connect to drag_mode\n def press_pan(self, event): self.press_canvas('pan', event, implements = (1,3))\n def press_slice(self, event): self.press_canvas('slice', event, implements = (1,))\n def press_crosshair(self, event): self.press_canvas('crosshair', event, implements = (1,))\n def press_bvalue(self, event): self.press_canvas('bvalue', event, implements = (1,))\n # Call mode_menu\n def press_axis(self, event): self.press_canvas('axis', event, implements = (3,))\n def press_sources(self, event): self.press_canvas('sources', event, implements = (3,))\n def press_view(self, event): self.press_canvas('view', event, implements = (3,))\n def press_source(self, event): self.press_canvas('source', event, implements = (3,))\n\n\n # CANVAS MOUSE DRAG\n # General function to call when a mouse drag occurs in canvas\n def drag_canvas(self, event):\n a = self._last_axis\n a2 = self.Winst.get_adapter(a)\n getattr(a2, 'drag_'+self._active)(event)\n self.display_message(event)\n self.dynamic_update()\n # When the tool is not active\n def mouse_move(self, event):\n if not event.inaxes or not self._active:\n if self._lastCursor != \"none\":\n self.set_cursor(\"none\")\n self._lastCursor = \"none\"\n else:\n if self._lastCursor != self._active:\n if cursord.has_key(self._active):\n self.set_cursor(self._active)\n else:\n self.set_cursor(\"none\")\n self._lastCursor = self._active\n self.display_message(event)\n\n def display_message(self, event):\n if event.inaxes and event.inaxes.get_navigate():\n try:\n a2 = self.Winst.get_adapter(event.inaxes)\n if self._active in ('crosshair', None):\n s = a2.get_display_str(event.xdata, event.ydata)\n elif self._active in ('slice','bvalue','pan'):\n s = a2.get_display_minor_str(event.xdata, event.ydata)\n else:\n s = event.inaxes.format_coord(event.xdata, event.ydata)\n except (ValueError, OverflowError):\n pass\n else:\n if len(self.mode):\n self.set_message('%s, %s' % (self.mode, s))\n else:\n self.set_message(s)\n else:\n self.set_message(self.mode)\n\n\n # Drag functions\n def drag_pan(self, event): self.drag_canvas(event)\n def drag_slice(self, event): self.drag_canvas(event)\n def drag_bvalue(self, event): self.drag_canvas(event)\n def drag_crosshair(self, event): self.drag_canvas(event)\n # Menu functions, overwritten in GUI's subclass\n def axis_menu(self, event): pass\n def sources_menu(self, event): pass\n def view_menu(self, event): pass\n def source_menu(self, event): pass\n\n\n # CANVAS MOUSE RELEASE\n # Called when mouse button is released\n def release(self, event): pass\n # General function to call when a mouse release occurs in canvas\n def release_canvas(self, event):\n if self._button_pressed is None:\n return\n self.canvas.mpl_disconnect(self._idDrag)\n self._idDrag = self.canvas.mpl_connect('motion_notify_event', self.mouse_move)\n\n if not self._last_axis:\n return\n a = self._last_axis\n a2 = self.Winst.get_adapter(a)\n # Menus do not implement this\n if hasattr(a2,'end_'+self._active):\n getattr(a2, 'end_'+self._active)()\n\n self._button_pressed = None\n self._last_axis = None\n\n self.push_current()\n self.release(event)\n self.draw()\n # just get rid of these altogether\n def release_pan(self, event): self.release_canvas(event)\n def release_slice(self, event): self.release_canvas(event)\n def release_crosshair(self, event): self.release_canvas(event)\n def release_bvalue(self, event): self.release_canvas(event)\n def release_axis(self, event): self.release_canvas(event)\n def release_sources(self, event): self.release_canvas(event)\n def release_view(self, event): self.release_canvas(event)\n def release_source(self, event): self.release_canvas(event)\n\n def get_method(self, a, f):\n if hasattr(a,f): return getattr(a, f)\n else:\n a2 = self.Winst.get_adapter(a)\n if hasattr(a2,f): return getattr(a2, f)\n else: return None\n\n # The black sheep\n def zoom(self, *args):\n \"\"\"Activate zoom to rect mode\"\"\"\n if self._active == 'ZOOM':\n self._active = None\n else:\n self._active = 'ZOOM'\n\n if self._idPress is not None:\n self._idPress = self.canvas.mpl_disconnect(self._idPress)\n self.mode = ''\n\n if self._idRelease is not None:\n self._idRelease = self.canvas.mpl_disconnect(self._idRelease)\n self.mode = ''\n\n if self._active:\n self._idPress = self.canvas.mpl_connect('button_press_event',\n self.press_zoom)\n self._idRelease = self.canvas.mpl_connect('button_release_event',\n self.release_zoom)\n self.mode = 'zoom rect'\n self.canvas.widgetlock(self)\n else:\n self.canvas.widgetlock.release(self)\n\n for a in self.canvas.figure.get_axes():\n a.set_navigate_mode(self._active)\n\n self.set_message(self.mode)\n def press_zoom(self, event):\n \"\"\"the press mouse button in zoom to rect mode callback\"\"\"\n # If we're already in the middle of a zoom, pressing another\n # button works to \"cancel\"\n if self._ids_zoom != []:\n for zoom_id in self._ids_zoom:\n self.canvas.mpl_disconnect(zoom_id)\n self.release(event)\n self.draw()\n self._xypress = None\n self._button_pressed = None\n self._ids_zoom = []\n return\n\n if event.button == 1:\n self._button_pressed = 1\n elif event.button == 3:\n self._button_pressed = 3\n else:\n self._button_pressed = None\n return\n\n x, y = event.x, event.y\n\n # push the current view to define home if stack is empty\n if self._views.empty():\n self.push_current()\n\n self._xypress = []\n for i, a in enumerate(self.canvas.figure.get_axes()):\n if (x is not None and y is not None and a.in_axes(event) and\n a.get_navigate() and a.can_zoom()):\n self._xypress.append((x, y, a, i, a.viewLim.frozen(),\n a.transData.frozen()))\n\n id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)\n id2 = self.canvas.mpl_connect('key_press_event',\n self._switch_on_zoom_mode)\n id3 = self.canvas.mpl_connect('key_release_event',\n self._switch_off_zoom_mode)\n\n self._ids_zoom = id1, id2, id3\n self._zoom_mode = event.key\n\n self.press(event)\n def drag_zoom(self, event):\n \"\"\"the drag callback in zoom mode\"\"\"\n\n if self._xypress:\n x, y = event.x, event.y\n lastx, lasty, a, ind, lim, trans = self._xypress[0]\n\n # adjust x, last, y, last\n x1, y1, x2, y2 = a.bbox.extents\n x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)\n y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)\n\n if self._zoom_mode == \"x\":\n x1, y1, x2, y2 = a.bbox.extents\n y, lasty = y1, y2\n elif self._zoom_mode == \"y\":\n x1, y1, x2, y2 = a.bbox.extents\n x, lastx = x1, x2\n\n self.draw_rubberband(event, x, y, lastx, lasty)\n def release_zoom(self, event):\n \"\"\"the release mouse button callback in zoom to rect mode\"\"\"\n for zoom_id in self._ids_zoom:\n self.canvas.mpl_disconnect(zoom_id)\n self._ids_zoom = []\n\n if not self._xypress:\n return\n\n last_a = []\n\n for cur_xypress in self._xypress:\n x, y = event.x, event.y\n lastx, lasty, a, ind, lim, trans = cur_xypress\n # ignore singular clicks - 5 pixels is a threshold\n if abs(x - lastx) < 5 or abs(y - lasty) < 5:\n self._xypress = None\n self.release(event)\n self.draw()\n return\n\n x0, y0, x1, y1 = lim.extents\n\n # zoom to rect\n inverse = a.transData.inverted()\n lastx, lasty = inverse.transform_point((lastx, lasty))\n x, y = inverse.transform_point((x, y))\n Xmin, Xmax = a.get_xlim()\n Ymin, Ymax = a.get_ylim()\n\n # detect twinx,y axes and avoid double zooming\n twinx, twiny = False, False\n if last_a:\n for la in last_a:\n if a.get_shared_x_axes().joined(a, la):\n twinx = True\n if a.get_shared_y_axes().joined(a, la):\n twiny = True\n last_a.append(a)\n\n if twinx:\n x0, x1 = Xmin, Xmax\n else:\n if Xmin < Xmax:\n if x < lastx:\n x0, x1 = x, lastx\n else:\n x0, x1 = lastx, x\n if x0 < Xmin:\n x0 = Xmin\n if x1 > Xmax:\n x1 = Xmax\n else:\n if x > lastx:\n x0, x1 = x, lastx\n else:\n x0, x1 = lastx, x\n if x0 > Xmin:\n x0 = Xmin\n if x1 < Xmax:\n x1 = Xmax\n\n if twiny:\n y0, y1 = Ymin, Ymax\n else:\n if Ymin < Ymax:\n if y < lasty:\n y0, y1 = y, lasty\n else:\n y0, y1 = lasty, y\n if y0 < Ymin:\n y0 = Ymin\n if y1 > Ymax:\n y1 = Ymax\n else:\n if y > lasty:\n y0, y1 = y, lasty\n else:\n y0, y1 = lasty, y\n if y0 > Ymin:\n y0 = Ymin\n if y1 < Ymax:\n y1 = Ymax\n\n if self._button_pressed == 1:\n if self._zoom_mode == \"x\":\n a.set_xlim((x0, x1))\n elif self._zoom_mode == \"y\":\n a.set_ylim((y0, y1))\n else:\n a.set_xlim((x0, x1))\n a.set_ylim((y0, y1))\n elif self._button_pressed == 3:\n if a.get_xscale() == 'log':\n alpha = np.log(Xmax / Xmin) / np.log(x1 / x0)\n rx1 = pow(Xmin / x0, alpha) * Xmin\n rx2 = pow(Xmax / x0, alpha) * Xmin\n else:\n alpha = (Xmax - Xmin) / (x1 - x0)\n rx1 = alpha * (Xmin - x0) + Xmin\n rx2 = alpha * (Xmax - x0) + Xmin\n if a.get_yscale() == 'log':\n alpha = np.log(Ymax / Ymin) / np.log(y1 / y0)\n ry1 = pow(Ymin / y0, alpha) * Ymin\n ry2 = pow(Ymax / y0, alpha) * Ymin\n else:\n alpha = (Ymax - Ymin) / (y1 - y0)\n ry1 = alpha * (Ymin - y0) + Ymin\n ry2 = alpha * (Ymax - y0) + Ymin\n\n if self._zoom_mode == \"x\":\n a.set_xlim((rx1, rx2))\n elif self._zoom_mode == \"y\":\n a.set_ylim((ry1, ry2))\n else:\n a.set_xlim((rx1, rx2))\n a.set_ylim((ry1, ry2))\n\n self.draw()\n self._xypress = None\n self._button_pressed = None\n\n self._zoom_mode = None\n\n self.push_current()\n self.release(event)\n def _switch_on_zoom_mode(self, event):\n self._zoom_mode = event.key\n self.mouse_move(event)\n def _switch_off_zoom_mode(self, event):\n self._zoom_mode = None\n self.mouse_move(event)\n\n\n # GUI\n def _init_toolbar(self):\n \"\"\"\n This is where you actually build the GUI widgets (called by\n __init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,\n ``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard\n across backends (there are ppm versions in CVS also).\n\n You just need to set the callbacks\n\n home : self.home\n back : self.back\n forward : self.forward\n hand : self.pan\n zoom_to_rect : self.zoom\n filesave : self.save_figure\n\n You only need to define the last one - the others are in the base\n class implementation.\n\n \"\"\"\n raise NotImplementedError\n def set_message(self, s): pass\n def draw_rubberband(self, event, x0, y0, x1, y1): pass\n def save_figure(self, *args): raise NotImplementedError\n def set_cursor(self, cursor): pass\n def set_history_buttons(self): pass\n # Draw the canvas when idle\n def dynamic_update(self): pass\n\n\n # Undo/Redo\n def back(self, *args):\n \"\"\"move back up the view lim stack\"\"\"\n self._views2.back()\n self._views.back()\n self._positions.back()\n self.set_history_buttons()\n self._update_view()\n def forward(self, *args):\n \"\"\"Move forward in the view lim stack\"\"\"\n self._views2.forward()\n self._views.forward()\n self._positions.forward()\n self.set_history_buttons()\n self._update_view()\n def home(self, *args):\n \"\"\"Restore the original view\"\"\"\n self._views2.home()\n self._views.home()\n self._positions.home()\n self.set_history_buttons()\n self._update_view()\n def push_current(self):\n \"\"\"push the current view limits and position onto the stack\"\"\"\n vws = []\n lims = []\n pos = []\n for a in self.canvas.figure.get_axes():\n a2 = self.Winst.get_adapter(a)\n vws.append(a2.view_to_tuple())\n xmin, xmax = a.get_xlim()\n ymin, ymax = a.get_ylim()\n lims.append((xmin, xmax, ymin, ymax))\n # Store both the original and modified positions\n pos.append((\n a.get_position(True).frozen(),\n a.get_position().frozen()))\n self._views2.push(vws)\n self._views.push(lims)\n self._positions.push(pos)\n self.set_history_buttons()\n # Reset the stack\n def update(self):\n self._views2.clear()\n self._views.clear()\n self._positions.clear()\n self.set_history_buttons()\n # Set the view from current stack position\n def _update_view(self):\n wvs = self._views2()\n if wvs is None:\n return\n lims = self._views()\n if lims is None:\n return\n pos = self._positions()\n if pos is None:\n return\n for i, a in enumerate(self.canvas.figure.get_axes()):\n a2 = self.Winst.get_adapter(a)\n a2.view_from_tuple(wvs[i])\n xmin, xmax, ymin, ymax = lims[i]\n a.set_xlim((xmin, xmax))\n a.set_ylim((ymin, ymax))\n # Restore both the original and modified positions\n a.set_position(pos[i][0], 'original')\n a.set_position(pos[i][1], 'active')\n\n self.canvas.draw_idle()\n\n\n def debug(self, *args):\n self.Winst.print_debug()\n\n #TODO: in this case can just replace calls here with self.canvas.draw_idle()\n def draw(self):\n \"\"\"Redraw the canvases, update the locators\"\"\"\n for a in self.canvas.figure.get_axes():\n xaxis = getattr(a, 'xaxis', None)\n yaxis = getattr(a, 'yaxis', None)\n locators = []\n if xaxis is not None:\n locators.append(xaxis.get_major_locator())\n locators.append(xaxis.get_minor_locator())\n if yaxis is not None:\n locators.append(yaxis.get_major_locator())\n locators.append(yaxis.get_minor_locator())\n\n for loc in locators:\n loc.refresh()\n self.canvas.draw_idle()\n\n\n\n\nclass NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):\n \"\"\"\n Public attributes\n\n canvas - the FigureCanvas (gtk.DrawingArea)\n win - the gtk.Window\n \"\"\"\n def __init__(self, canvas, window, Winst):\n self.canvas = canvas\n self.window = window\n self._idle = True\n #Tk.Frame.__init__(self, master=self.canvas._tkcanvas)\n NavigationToolbar2.__init__(self, canvas, Winst)\n\n def destroy(self, *args):\n del self.message\n Tk.Frame.destroy(self, *args)\n\n def set_message(self, s):\n self.message.set(s)\n\n def draw_rubberband(self, event, x0, y0, x1, y1):\n height = self.canvas.figure.bbox.height\n y0 = height-y0\n y1 = height-y1\n try: self.lastrect\n except AttributeError: pass\n else: self.canvas._tkcanvas.delete(self.lastrect)\n self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)\n\n #self.canvas.draw()\n\n def release(self, event):\n try: self.lastrect\n except AttributeError: pass\n else:\n self.canvas._tkcanvas.delete(self.lastrect)\n del self.lastrect\n\n def set_cursor(self, cursor):\n self.window.configure(cursor=cursord[cursor])\n #self.window.configure(cursor=\"tcross\")\n\n def _Button(self, text, file, command, extension='.ppm'):\n img_file = os.path.join(rcParams['datapath'], 'images', file + extension)\n im = Tk.PhotoImage(master=self, file=img_file)\n b = Tk.Button(\n master=self, text=text, padx=2, pady=2, image=im, command=command)\n b._ntimage = im\n b.pack(side=Tk.LEFT)\n return b\n\n def _init_toolbar(self):\n xmin, xmax = self.canvas.figure.bbox.intervalx\n height, width = 50, xmax-xmin\n Tk.Frame.__init__(self, master=self.window,\n width=int(width), height=int(height),\n borderwidth=2)\n\n self.update() # Make axes menu\n\n for text, tooltip_text, image_file, callback in self.toolitems:\n if text is None:\n # spacer, unhandled in Tk\n pass\n else:\n button = self._Button(text=text, file=image_file,\n command=getattr(self, callback))\n if tooltip_text is not None:\n ToolTip.createToolTip(button, tooltip_text)\n\n self.message = Tk.StringVar(master=self)\n self._message_label = Tk.Label(master=self, textvariable=self.message)\n self._message_label.pack(side=Tk.RIGHT)\n self.pack(side=Tk.BOTTOM, fill=Tk.X)\n\n\n def configure_subplots(self):\n toolfig = Figure(figsize=(6,3))\n window = Tk.Tk()\n canvas = FigureCanvasTkAgg(toolfig, master=window)\n toolfig.subplots_adjust(top=0.9)\n tool = SubplotTool(self.canvas.figure, toolfig)\n canvas.show()\n canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n\n def save_figure(self, *args):\n import tkFileDialog\n import tkMessageBox\n filetypes = self.canvas.get_supported_filetypes().copy()\n default_filetype = self.canvas.get_default_filetype()\n\n # Tk doesn't provide a way to choose a default filetype,\n # so we just have to put it first\n default_filetype_name = filetypes[default_filetype]\n del filetypes[default_filetype]\n\n sorted_filetypes = list(six.iteritems(filetypes))\n sorted_filetypes.sort()\n sorted_filetypes.insert(0, (default_filetype, default_filetype_name))\n\n tk_filetypes = [\n (name, '*.%s' % ext) for (ext, name) in sorted_filetypes]\n\n # adding a default extension seems to break the\n # asksaveasfilename dialog when you choose various save types\n # from the dropdown. Passing in the empty string seems to\n # work - JDH!\n #defaultextension = self.canvas.get_default_filetype()\n defaultextension = ''\n initialdir = rcParams.get('savefig.directory', '')\n initialdir = os.path.expanduser(initialdir)\n initialfile = self.canvas.get_default_filename()\n fname = tkFileDialog.asksaveasfilename(\n master=self.window,\n title='Save the figure',\n filetypes=tk_filetypes,\n defaultextension=defaultextension,\n initialdir=initialdir,\n initialfile=initialfile,\n )\n\n if fname == \"\" or fname == ():\n return\n else:\n if initialdir == '':\n # explicitly missing key or empty str signals to use cwd\n rcParams['savefig.directory'] = initialdir\n else:\n # save dir for next time\n rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))\n try:\n # This method will handle the delegation to the correct type\n self.canvas.print_figure(fname)\n except Exception as e:\n tkMessageBox.showerror(\"Error saving file\", str(e))\n\n def set_active(self, ind):\n self._ind = ind\n self._active = [ self._axes[i] for i in self._ind ]\n\n def update(self):\n _focus = windowing.FocusManager()\n self._axes = self.canvas.figure.axes\n naxes = len(self._axes)\n #if not hasattr(self, \"omenu\"):\n # self.set_active(range(naxes))\n # self.omenu = AxisMenu(master=self, naxes=naxes)\n #else:\n # self.omenu.adjust(naxes)\n NavigationToolbar2.update(self)\n\n def dynamic_update(self):\n 'update drawing area only if idle'\n # legacy method; new method is canvas.draw_idle\n self.canvas.draw_idle()\n\n def axis_menu(self, event):\n \"\"\"Show a popup menu to choose the axis\"\"\"\n pmenu = Tk.Menu(self)\n\n x = event.guiEvent.x_root\n y = event.guiEvent.y_root\n\n a = self._last_axis\n a2 = self.Winst.get_adapter(a)\n\n for i in range(3):\n axes_string = WM.MINOR_AXES[i][0]+WM.MINOR_AXES[i][1]\n # Calling the redraw is necessary *immediately*,\n # a bug with with internal state variables or event handling..?\n def f (i=i):\n a2.change_projection(i)\n self.dynamic_update()\n pmenu.add_command(label=axes_string, compound=Tk.LEFT, command=f)\n\n pmenu.tk_popup(int(x),int(y),0)\n\n def sources_menu(self, event):\n \"\"\"Show a popup menu to choose the WM sources\"\"\"\n select_sources_menu = Tk.Menu(self)\n\n x = event.guiEvent.x_root\n y = event.guiEvent.y_root\n\n source_menu = []\n for i in range(len(self.Winst.sources)):\n source_menu.append(Tk.Menu(self, tearoff=0))\n select_sources_menu.add_cascade(label=str(i), menu=source_menu[-1])\n for j in range(len(self.Winst.all_sources)):\n def f (i=i,j=j):\n self.Winst.change_source(i,j)\n self.canvas.draw_idle()\n source_menu[-1].add_radiobutton(label=str(j)+\" (\"+str(self.Winst.all_sources[j])+\")\", command=f )\n\n select_sources_menu.tk_popup(int(x),int(y),0)\n\n def view_menu(self, event):\n \"\"\"Show a popup menu to choose the WM view\"\"\"\n smenu = Tk.Menu(self)\n\n x = event.guiEvent.x_root\n y = event.guiEvent.y_root\n\n a = self._last_axis\n a2 = self.Winst.get_adapter(a)\n\n wm_views = WM.TEMPLATES.keys()\n for i,wm_view in zip(range(len(wm_views)),wm_views):\n def f (wm_view=wm_view,a=a):\n self.Winst.change_template(wm_view, a)\n self.canvas.draw()\n smenu.add_command(label=wm_view, compound=Tk.LEFT, command=f)\n\n smenu.tk_popup(int(x),int(y),0)\n\n def source_menu(self, event):\n \"\"\"Show a popup menu to choose the view's image source\"\"\"\n smenu = Tk.Menu(self)\n\n x = event.guiEvent.x_root\n y = event.guiEvent.y_root\n\n a = self._last_axis\n a2 = self.Winst.get_adapter(a)\n\n sources = self.Winst.get_sources()\n for i,source in zip(range(len(sources)),sources):\n def f (i=i):\n a2.change_source(i)\n self.canvas.draw()\n smenu.add_command(label=str(i)+\" (\"+str(source)+\")\", compound=Tk.LEFT, command=f)\n\n smenu.tk_popup(int(x),int(y),0)\n\n\nFigureCanvas = FigureCanvasTkAgg\nFigureManager = FigureManagerTkAgg", "sub_path": "custom_toolbar.py", "file_name": "custom_toolbar.py", "file_ext": "py", "file_size_in_byte": 33336, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "matplotlib.rcParams", "line_number": 27, "usage_type": "attribute"}, {"api_name": "matplotlib.cbook.Stack", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.cbook", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.cbook.Stack", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.cbook", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.cbook.Stack", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.cbook", "line_number": 121, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 525, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 533, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 691, "usage_type": "attribute"}, {"api_name": "Tkinter.Frame.destroy", "line_number": 707, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 707, "usage_type": "attribute"}, {"api_name": "os.path.path.join", "line_number": 735, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 735, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 735, "usage_type": "name"}, {"api_name": "Tkinter.PhotoImage", "line_number": 736, "usage_type": "call"}, {"api_name": "Tkinter.Button", "line_number": 737, "usage_type": "call"}, {"api_name": "Tkinter.LEFT", "line_number": 740, "usage_type": "attribute"}, {"api_name": "Tkinter.Frame.__init__", "line_number": 746, "usage_type": "call"}, {"api_name": "Tkinter.Frame", "line_number": 746, "usage_type": "attribute"}, {"api_name": "matplotlib.backends.backend_tkagg.ToolTip.createToolTip", "line_number": 760, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_tkagg.ToolTip", "line_number": 760, "usage_type": "name"}, {"api_name": "Tkinter.StringVar", "line_number": 762, "usage_type": "call"}, {"api_name": "Tkinter.Label", "line_number": 763, "usage_type": "call"}, {"api_name": "Tkinter.RIGHT", "line_number": 764, "usage_type": "attribute"}, {"api_name": "Tkinter.BOTTOM", "line_number": 765, "usage_type": "attribute"}, {"api_name": "Tkinter.X", "line_number": 765, "usage_type": "attribute"}, {"api_name": "matplotlib.figure.Figure", "line_number": 769, "usage_type": "call"}, {"api_name": "Tkinter.Tk", "line_number": 770, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 771, "usage_type": "call"}, {"api_name": "matplotlib.widgets.SubplotTool", "line_number": 773, "usage_type": "call"}, {"api_name": "Tkinter.TOP", "line_number": 775, "usage_type": "attribute"}, {"api_name": "Tkinter.BOTH", "line_number": 775, "usage_type": "attribute"}, {"api_name": "six.iteritems", "line_number": 788, "usage_type": "call"}, {"api_name": "os.path.path.expanduser", "line_number": 802, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 802, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 802, "usage_type": "name"}, {"api_name": "tkFileDialog.asksaveasfilename", "line_number": 804, "usage_type": "call"}, {"api_name": "os.path.path.dirname", "line_number": 821, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 821, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 821, "usage_type": "name"}, {"api_name": "six.text_type", "line_number": 821, "usage_type": "call"}, {"api_name": "tkMessageBox.showerror", "line_number": 826, "usage_type": "call"}, {"api_name": "matplotlib.backends.windowing.FocusManager", "line_number": 833, "usage_type": "call"}, {"api_name": "matplotlib.backends.windowing", "line_number": 833, "usage_type": "name"}, {"api_name": "Tkinter.Menu", "line_number": 850, "usage_type": "call"}, {"api_name": "WM.MINOR_AXES", "line_number": 859, "usage_type": "attribute"}, {"api_name": "Tkinter.LEFT", "line_number": 865, "usage_type": "attribute"}, {"api_name": "Tkinter.Menu", "line_number": 871, "usage_type": "call"}, {"api_name": "Tkinter.Menu", "line_number": 878, "usage_type": "call"}, {"api_name": "Tkinter.Menu", "line_number": 890, "usage_type": "call"}, {"api_name": "WM.TEMPLATES.keys", "line_number": 898, "usage_type": "call"}, {"api_name": "WM.TEMPLATES", "line_number": 898, "usage_type": "attribute"}, {"api_name": "Tkinter.LEFT", "line_number": 903, "usage_type": "attribute"}, {"api_name": "Tkinter.Menu", "line_number": 909, "usage_type": "call"}, {"api_name": "Tkinter.LEFT", "line_number": 922, "usage_type": "attribute"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 927, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureManagerTkAgg", "line_number": 928, "usage_type": "name"}]} +{"seq_id": "501342698", "text": "\"\"\"Mysql wrappers to execute mysql statements.\nThe default behaviour depends on the configuration module which contains the\ndatabase settings to use.\n\"\"\"\n\nfrom contextlib import contextmanager\nfrom functools import wraps\nimport logging\n\nimport MySQLdb\nfrom helot_common import configuration\n\n\n@contextmanager\ndef db_connection(host=None, user=None, passwd=None, db=None,\n connect_to_db=True):\n \"\"\"Auto closing db connection context manager.\n Yields an active db connection which will be closed automatically.\n :parameter connect_to_db: (boolean) True to connect to the database.\n Otherwise it will no connect to a specific database, something that can be\n useful in the case of a database creation.\n \"\"\"\n params = {\n 'host': host or configuration.mysql.host,\n 'user': user or configuration.mysql.user,\n 'passwd': passwd or configuration.mysql.passwd\n }\n\n if connect_to_db:\n params['db'] = db or configuration.mysql.db\n\n db_conn = MySQLdb.connect(**params)\n yield db_conn\n db_conn.close()\n\n\n@contextmanager\ndef db_cursor(db_conn):\n \"\"\"Auto closing db cursor context manager.\n Yields an live db cursor which will be closed automatically.\n :parameter db_conn: The db connection to use for the creation of the cursor.\n \"\"\"\n cur = db_conn.cursor()\n yield cur\n cur.close()\n\n\n@contextmanager\ndef make_query_executor(*args, **kwargs):\n \"\"\"Context manager providing a function to execute sql queries.\n Yields a query executor function.\n \"\"\"\n with db_connection(*args, **kwargs) as db_conn, db_cursor(db_conn) as cur:\n def execute_query(sql):\n cur.execute(sql)\n col_names = [col_data[0] for col_data in cur.description]\n for row in cur.fetchall():\n row_data = _RawData()\n for i, cell in enumerate(row):\n setattr(row_data, col_names[i], cell)\n yield row_data\n\n yield execute_query\n\n\n@contextmanager\ndef make_non_query_executor(*args, **kwargs):\n \"\"\"Context manager providing a function to execute non query statements.\n Yields a non query executor function.\n :parameter use_db: (boolean) True to connect to the database. Otherwise it\n will no connect to a specific database, something that can be useful in the\n case of a database creation.\n \"\"\"\n with db_connection(*args, **kwargs) as db_conn, db_cursor(db_conn) as cur:\n def execute_non_query(sql):\n try:\n cur.execute(sql)\n db_conn.commit()\n except Exception as ex:\n logging.exception(ex)\n db_conn.rollback()\n\n yield execute_non_query\n\n\nclass _RawData(object):\n \"\"\"Used to create the object to encapsulate the data of retrieved row.\"\"\"\n\n\ndef query_executor_user(function_to_decorate):\n \"\"\"Decorates a function adding an execute query function argument.\n When decorates a function its signature must contain an argument called\n execute_query which will receive a sql executor to use for queries.\n :parameter function_to_decorate: The function to decorate.\n :returns : The decorated function containing the execute_query argument.\n \"\"\"\n\n @wraps(function_to_decorate)\n def decorator(*args, **kargs):\n with make_query_executor() as execute_query:\n kargs['execute_query'] = execute_query\n return function_to_decorate(*args, **kargs)\n\n return decorator\n\n\ndef execute_query(sql, **kwargs):\n \"\"\"Simplest way to execute a query.\n Opens and closes a new connection and cursor every time called.\n :param sql: (str) The sql statement to execute.\n :param kwargs: The connection settings.\n Yields a sequence of rows coming from the execution of the query.\n \"\"\"\n with make_query_executor(**kwargs) as executor:\n for row in executor(sql):\n yield row\n", "sub_path": "helot_mysql/wrappers.py", "file_name": "wrappers.py", "file_ext": "py", "file_size_in_byte": 3917, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "helot_common.configuration.mysql", "line_number": 24, "usage_type": "attribute"}, {"api_name": "helot_common.configuration", "line_number": 24, "usage_type": "name"}, {"api_name": "helot_common.configuration.mysql", "line_number": 25, "usage_type": "attribute"}, {"api_name": "helot_common.configuration", "line_number": 25, "usage_type": "name"}, {"api_name": "helot_common.configuration.mysql", "line_number": 26, "usage_type": "attribute"}, {"api_name": "helot_common.configuration", "line_number": 26, "usage_type": "name"}, {"api_name": "helot_common.configuration.mysql", "line_number": 30, "usage_type": "attribute"}, {"api_name": "helot_common.configuration", "line_number": 30, "usage_type": "name"}, {"api_name": "MySQLdb.connect", "line_number": 32, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 14, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 37, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 48, "usage_type": "name"}, {"api_name": "logging.exception", "line_number": 80, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 66, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "424228232", "text": "'''\nThis script shows how to send emails via Python when you detect changes via SNMP\n'''\n\ndef send_mail(recipient, subject, message, sender):\n '''\n Simple function to help simplify sending SMTP email\n\n Assumes a mailserver is available on localhost\n '''\n\n import smtplib\n from email.mime.text import MIMEText\n\n message = MIMEText(message)\n message['Subject'] = subject\n message['From'] = sender\n message['To'] = recipient\n\n # Create SMTP connection object to localhost\n smtp_conn = smtplib.SMTP('localhost')\n\n # Send the email\n smtp_conn.sendmail(sender, recipient, message.as_string())\n\n # Close SMTP connection\n\n smtp_conn.quit()\n\n return True\n\n\nif __name__ == '__main__':\n\t\n\trecipient = 'nilekani.raunaq@gmail.com'\n\tsubject = 'Test message'\n\tmessage = '''\n This is a fictional message\n\nRegards,\nRon\n'''\t\n", "sub_path": "email_helper.py", "file_name": "email_helper.py", "file_ext": "py", "file_size_in_byte": 859, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "email.mime.text.MIMEText", "line_number": 15, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "465844263", "text": "#!/usr/bin/env python3\n\nimport utils, open_color, arcade\n\nutils.check_version((3,7))\n#set the screen width and height\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSCREEN_TITLE = \"Smiley Face Example\"\n#set a class that can that the smile face to track the mouse movement\nclass Faces(arcade.Window):\n \"\"\" Our custom Window Class\"\"\"\n\n def __init__(self):\n \"\"\" Initializer \"\"\"\n # Call the parent class initializer\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n\n # Show the mouse cursor\n #if true, you can see the mouse cursor while it is on the window\n self.set_mouse_visible(True)\n #set the face's location\n self.x = SCREEN_WIDTH / 2\n self.y = SCREEN_HEIGHT / 2\n #set the background color to white\n arcade.set_background_color(open_color.white)\n\n def on_draw(self):\n \"\"\" Draw the face \"\"\"\n arcade.start_render()\n\n face_x,face_y = (self.x,self.y)\n #set each dot's position using self.x and self.y\n smile_x,smile_y = (face_x + 0,face_y - 10)\n eye1_x,eye1_y = (face_x - 30,face_y + 20) \n eye2_x,eye2_y = (face_x + 30,face_y + 20)\n catch1_x,catch1_y = (face_x - 25,face_y + 25) \n catch2_x,catch2_y = (face_x + 35,face_y + 25) \n #draw the face using the variable that we set for prior\n arcade.draw_circle_filled(face_x, face_y, 100, open_color.yellow_3)\n arcade.draw_circle_outline(face_x, face_y, 100, open_color.black,4)\n arcade.draw_ellipse_filled(eye1_x,eye1_y,15,25,open_color.black)\n arcade.draw_ellipse_filled(eye2_x,eye2_y,15,25,open_color.black)\n arcade.draw_circle_filled(catch1_x,catch1_y,3,open_color.gray_2)\n arcade.draw_circle_filled(catch2_x,catch2_y,3,open_color.gray_2)\n arcade.draw_arc_outline(smile_x,smile_y,60,50,open_color.black,190,350,4)\n\n\n def on_mouse_motion(self, x, y, dx, dy):\n \"\"\" Handle Mouse Motion \"\"\"\n #when we turn the mouse cursor on, we can see the face moving around the window.\n self.x = x\n self.y = y\n\n\n\nwindow = Faces()\narcade.run()", "sub_path": "main5.py", "file_name": "main5.py", "file_ext": "py", "file_size_in_byte": 2110, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "utils.check_version", "line_number": 5, "usage_type": "call"}, {"api_name": "arcade.Window", "line_number": 11, "usage_type": "attribute"}, {"api_name": "arcade.set_background_color", "line_number": 26, "usage_type": "call"}, {"api_name": "open_color.white", "line_number": 26, "usage_type": "attribute"}, {"api_name": "arcade.start_render", "line_number": 30, "usage_type": "call"}, {"api_name": "arcade.draw_circle_filled", "line_number": 40, "usage_type": "call"}, {"api_name": "open_color.yellow_3", "line_number": 40, "usage_type": "attribute"}, {"api_name": "arcade.draw_circle_outline", "line_number": 41, "usage_type": "call"}, {"api_name": "open_color.black", "line_number": 41, "usage_type": "attribute"}, {"api_name": "arcade.draw_ellipse_filled", "line_number": 42, "usage_type": "call"}, {"api_name": "open_color.black", "line_number": 42, "usage_type": "attribute"}, {"api_name": "arcade.draw_ellipse_filled", "line_number": 43, "usage_type": "call"}, {"api_name": "open_color.black", "line_number": 43, "usage_type": "attribute"}, {"api_name": "arcade.draw_circle_filled", "line_number": 44, "usage_type": "call"}, {"api_name": "open_color.gray_2", "line_number": 44, "usage_type": "attribute"}, {"api_name": "arcade.draw_circle_filled", "line_number": 45, "usage_type": "call"}, {"api_name": "open_color.gray_2", "line_number": 45, "usage_type": "attribute"}, {"api_name": "arcade.draw_arc_outline", "line_number": 46, "usage_type": "call"}, {"api_name": "open_color.black", "line_number": 46, "usage_type": "attribute"}, {"api_name": "arcade.run", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "377196365", "text": "#!/usr/bin/python3\n# coding: utf-8\n\nfrom unittest import TestCase\nimport requests\n\nbase_url = 'http://localhost:5000/user/'\n\n\nclass TestAppUser(TestCase):\n # def test_info(self):\n # url = base_url + \"info/\"\n # resp = requests.get(url)\n # print(resp)\n\n def test_read(self):\n print(\"*\" * 50)\n url = base_url + 'user_read/'\n data = {\n \"user_phone\": 11111111111,\n 'info_id': 2\n }\n resp = requests.post(url, json=data)\n print(resp)\n print(\"-\" * 50)\n", "sub_path": "test/test_login.py", "file_name": "test_login.py", "file_ext": "py", "file_size_in_byte": 542, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "640152876", "text": "import PySimpleGUI as sg\nimport datetime\n\nsg.theme('Dark Blue 3') # please make your windows colorful\n\nlayout = [[sg.Text('Your typed chars appear here:'), sg.Text(size=(12, 1), key='-OUTPUT-')],\n [sg.Input(key='-IN-')],\n [sg.Button('Show'), sg.Button('Exit'), sg.Text(size=(20, 1), key='-_time_-')]]\n\nwindow = sg.Window('Window Title', layout)\n\n\ndef getTime():\n\treturn datetime.datetime.now().strftime('%H:%M:%S')\n\nwhile True: # Event Loop\n\t# event, values = window.read()\n\tevent, values = window.read(timeout=0)\n\t# print(event, values)\n\t# window['-_time_-'].update(str(datetime.datetime.now()))\n\tif event == sg.WIN_CLOSED or event == 'Exit':\n\t\tbreak\n\tif event == 'Show':\n\t\t# change the \"output\" element to be the value of \"input\" element\n\t\twindow['-OUTPUT-'].update(values['-IN-'])\n\n\n\twindow[\"-_time_-\"].update(str(datetime.datetime.now()))\n\t# window.FindElement('-_time_-').update(str(datetime.datetime.now()))\n\n\t# window.FindElement('-_time_-').Update(getTime())\n\nwindow.close()\n", "sub_path": "GUI/GUI_experimentation/eg_demo_code_testing.py", "file_name": "eg_demo_code_testing.py", "file_ext": "py", "file_size_in_byte": 1003, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "PySimpleGUI.theme", "line_number": 4, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 6, "usage_type": "call"}, {"api_name": "PySimpleGUI.Input", "line_number": 7, "usage_type": "call"}, {"api_name": "PySimpleGUI.Button", "line_number": 8, "usage_type": "call"}, {"api_name": "PySimpleGUI.Text", "line_number": 8, "usage_type": "call"}, {"api_name": "PySimpleGUI.Window", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "PySimpleGUI.WIN_CLOSED", "line_number": 21, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "attribute"}]} +{"seq_id": "42373021", "text": "from typing import DefaultDict\nimport click\nimport sys\nimport asyncio\nfrom functools import wraps\n\nfrom rich import box\nfrom rich.console import Console\nfrom rich.progress import track\nfrom rich.table import Table\n\nfrom .whatsmydns import Client, QueryTimeoutException\nfrom . import __version__\n\nCLI_HELP = \"\"\"\nValidate a domain against a list of global DNS servers.\n\nchkdns is powered by whatsmydns.net!\n\"\"\"\n\nTYPE_CHOICES = [\"A\", \"AAAA\", \"CNAME\", \"MX\", \"NS\", \"PTR\", \"SOA\", \"SRV\", \"TXT\", \"CAA\"]\n\n\ndef coro(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n return asyncio.run(f(*args, **kwargs))\n\n return wrapper\n\n\n@click.command(help=CLI_HELP, no_args_is_help=True)\n@click.option(\n \"--type\",\n default=\"A\",\n show_default=True,\n type=click.Choice(TYPE_CHOICES),\n help=\"The type of record to query for.\",\n)\n@click.option(\"--host\", required=True, help=\"A valid hostname.\")\n@click.version_option(__version__)\n@coro\nasync def cli(type, host):\n\n console = Console()\n\n dns = Client(use_mock=False)\n servers = dns.get_servers()\n\n table = Table(box=box.SIMPLE, leading=1, expand=True)\n table.add_column(\"\", no_wrap=True)\n table.add_column(\"location\", no_wrap=True)\n table.add_column(\"provider\", no_wrap=True)\n table.add_column(\"result\", justify=\"center\", no_wrap=True)\n table.add_column(\"response\", no_wrap=True)\n\n stats = DefaultDict(int)\n servers_count = len(servers)\n\n for server in track(\n sorted(servers, key=lambda server: server[\"provider\"]),\n description=f\"Checking DNS propagation for {host}\",\n transient=True,\n ):\n\n try:\n response = await dns.query(server[\"id\"], type, host)\n data = response[\"data\"][0]\n\n if data[\"rcode\"] == \"NOERROR\":\n stats[\"success\"] += 1\n result = \"✅\"\n elif data[\"rcode\"] == \"SERVFAIL\":\n result = \"❌\"\n stats[\"fail\"] += 1\n else:\n result = \"❓\"\n stats[\"unknown\"] += 1\n\n answer = \", \".join(answer.split()[-1] for answer in data[\"answers\"])\n except QueryTimeoutException:\n\n result = \"⏳\"\n answer = \"DNS query timed out.\"\n stats[\"timeout\"] += 1\n\n except Exception:\n console.print_exception()\n sys.exit(1)\n\n table.add_row(\n server[\"flag\"], server[\"location\"], server[\"provider\"], result, answer\n )\n\n score = stats[\"success\"] / servers_count * 100\n table.caption = f\"{score:.2f}% of servers responded successfully for {host}.\"\n\n console.print(table)\n", "sub_path": "src/chkdns/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2624, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "asyncio.run", "line_number": 27, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 25, "usage_type": "call"}, {"api_name": "rich.console.Console", "line_number": 45, "usage_type": "call"}, {"api_name": "whatsmydns.Client", "line_number": 47, "usage_type": "call"}, {"api_name": "rich.table.Table", "line_number": 50, "usage_type": "call"}, {"api_name": "rich.box.SIMPLE", "line_number": 50, "usage_type": "attribute"}, {"api_name": "rich.box", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.DefaultDict", "line_number": 57, "usage_type": "call"}, {"api_name": "rich.progress.track", "line_number": 60, "usage_type": "call"}, {"api_name": "whatsmydns.QueryTimeoutException", "line_number": 81, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 89, "usage_type": "call"}, {"api_name": "click.command", "line_number": 32, "usage_type": "call"}, {"api_name": "click.option", "line_number": 33, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 37, "usage_type": "call"}, {"api_name": "click.option", "line_number": 40, "usage_type": "call"}, {"api_name": "click.version_option", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "152035899", "text": "#script to complete full processing of DTS files using matching sections.\n#This uses xarray interpolation method to produce a consitently sized output.\n#Robert Law, Scott Polar Research Institute, University of Cambridge, 2020. rl491@cam.ac.uk\n\nimport os\nimport sys\nimport glob\nimport pickle\nimport datetime\nimport numpy as np \nimport pandas as pd\nimport xarray as xr\nimport matplotlib.pyplot as plt \nimport matplotlib.dates as mdates\nfrom scipy.interpolate import interp1d\nfrom dtscalibration import read_silixa_files, DataStore\n\n#change working directory to the location of the python file\nos.chdir(os.path.dirname(sys.argv[0]))\n\n#inputs\nfile_dir1 = \"channel_1\" #input file path 1 (channel 1)\nfile_dir2 = \"channel_3\" #input file path 2 (channel 3)\nexport_dir = \"processed_data\" #export processed data\nexport_file = 'ch1_end_processed.nc' #processed stokes and cummulative attenuation data \navg_time = '6H'\navg_time_float = 6\navg_isel = int(96/6) #this sets the number for averaging \nz_num = 11078 #number of z valus to interpolate to. It's convinient down the line if this is an even number\nusr_gamma = (476.53, 0) \n\n#CHANNEL 1 values below\nint_ref_loc = slice(-22., -5.) #(m) internal reference spool location\nbh_down_ref = 211.94 #(m) downwards start for borehole, determined from apex of temperature drop just after entrance\nbh_up_ref = 2327.90 #(m) upwards end of borehole, determined from apex of temperature drop just before exit\nbh_down_splice = 1267.88 #(m) downwards end of borehole, determined as point where temperature begins to rise (even slightly) before splice\nbh_splice_mid = 1269.41 #(m) determined as the location of peak temperature disturbance from the splice\nbh_up_splice = 1270.92 #(m) upwards start of borehole, determined as point where temperature begins to rise (even slightly) after splice\nbh_depth = 1042.95 #(m) from Sam Doyle BH19c depth email thread\nbh_depth_dts = 1062. #(m) BH19c depth from DTS with refractive index error\nstart_cut = -23. #(m) cut outside the DTS so connector losses do not need to be accounted for\nend_cut = bh_down_splice - 0.5 #bh_up_ref + 100 #(m) cut at the surface\nstart_bh = 204.5 #(m) depth at which borehole begins\ntemp_zone_loc = (1000.*(bh_depth_dts/bh_depth) + start_bh, 1010.*(bh_depth_dts/bh_depth) + start_bh) #location of temperate zone\nmatching_section = (bh_up_splice + bh_down_splice - temp_zone_loc[0], bh_up_splice + bh_down_splice - temp_zone_loc[1]) #set matching section to = reference section (if this doesn't throw an error...)\n\n#constants (i.e. things that definitely won't change unless some seriously strange shit happens)\nT0 = 273.15 #(K) 0 degrees C in Kelvin\nTtr = 273.16 #(K) triple point temperature of water\nptr = 611.73 #(Pa) triple point pressure of water\ng = 9.81 #(m/s^2) gravitational acceleration\n\n#parameters (i.e. things that could change)\nccc = 9.14e-8 #(K/MPa) Clausius-Clapeyron constant \nslope = 0 #(degrees) slope under borehole\nrho_ice = 910 #(kg/m^3) ice density \n\n#obtain paths for directories within channel directorys and create export path\ndirects = [x[0] for x in os.walk(file_dir1)]\ndirects = sorted(directs[1:]) #remove the first value as this is just the parent directory and sort \nexport_path = os.path.join(export_dir, export_file)\n\n#convert to slice\ntemp_zone_loc = slice(temp_zone_loc[0], temp_zone_loc[1]) \nmatching_section = slice(matching_section[1], matching_section[0]) \n\nsections = {\n 'pmpTemperature': [temp_zone_loc]} #pressure melting point \n #'referenceTemperature': [int_ref_loc]} #internal coil\n \n\n#labels\nst_label = 'st'\nast_label = 'ast'\nrst_label = 'rst'\nrast_label = 'rast'\n\ndirects = directs[-2:-1]\n\nfor i, directory in enumerate(directs):\n\n #print out\n files = sorted(glob.glob(os.path.join(directory, '*.xml')))\n now = datetime.datetime.now()\n print(now.strftime(\"%H:%M:%S-\"), 'Processing', str(len(files)), 'files in directory', directory, '...')\n print(directory)\n #sys.exit()\n\n #create datastore\n ds_in = read_silixa_files(\n directory = directory,\n timezone_netcdf='UTC',\n file_ext='*.xml')\n\n #obtain channel information\n #xml_name = ds_in['filename'].values[0]\n #channel = int(xml_name[8])\n\n #cut to section of interest\n ds_in = ds_in.sel(x = slice(start_cut, end_cut))\n\n #extract z values\n #array dimensions: (z,y,x) = (data_fields, z, t)\n z_values = ds_in.x.values\n\n #Clausius-Clapeyron calculation\n p_ice = rho_ice*g*((z_values - start_bh)*(bh_depth/bh_depth_dts))*np.cos(np.deg2rad(slope))\n T_pmp_cc = Ttr - ccc*(p_ice - ptr)\n T_pmp_cc_rz = T_pmp_cc[np.argmax(z_values > ((temp_zone_loc.start+temp_zone_loc.stop)/2))] - T0 #reference zone temperature\n\n #create time series for ref_pmp_T\n nd_cols = np.size(ds_in['st'].values,1) #columns in array\n ds_in['pmpTemperature'] = (('time',), np.ones(nd_cols)*T_pmp_cc_rz)\n\n #set sections\n ds_in.sections = sections\n\n #resample before calibration\n ds_in = ds_in.resample_datastore(how='mean', time=avg_time, keep_attrs=True)\n\n ds_in.acquisitionTime = avg_time_float*60*60\n\n #extract t values\n t_values = ds_in.time.values\n\n #interpolate if needed \n if len(z_values) != z_num:\n\n #create z values for interpolation\n zi = np.linspace(z_values[0], z_values[-1], z_num)\n\n #interp datastore\n ds_in = ds_in.interp(x = zi, method='slinear')\n\n #set z_values for outgoing dataset\n z_values = zi\n\n #obtain variance\n st_var, resid = ds_in.variance_stokes(st_label=st_label)\n ast_var, _ = ds_in.variance_stokes(st_label=ast_label)\n rst_var, _ = ds_in.variance_stokes(st_label=rst_label)\n rast_var, _ = ds_in.variance_stokes(st_label=rast_label)\n\n now = datetime.datetime.now()\n print(now.strftime(\"%H:%M:%S-\"), 'Performing calibration...')\n\n #perform calibration\n ds_in.calibration_double_ended(\n st_var=st_var,\n ast_var=ast_var,\n rst_var=rst_var,\n rast_var=rast_var,\n method='wls',\n solver='sparse',\n fix_gamma=usr_gamma)\n #transient_asym_att_x=[bh_splice_mid],\n #matching_sections=[(temp_zone_loc, matching_section, True)])\n\n #confidence intervals\n #ds_in.conf_int_double_ended(\n # p_val='p_val',\n # p_cov='p_cov',\n # st_var=st_var,\n # ast_var=ast_var,\n # rst_var=rst_var,\n # rast_var=rast_var,\n # store_tempvar='_var',\n # conf_ints=[2.5, 50., 97.5],\n # mc_sample_size=5000,\n # store_ta='talpha') # <- choose a much larger sample size\n\n #averaging (see example 2. https://github.com/dtscalibration/python-dts-calibration/blob/master/examples/notebooks/16Averaging_temperatures.ipynb)\n ds_in.average_double_ended(\n st_var=st_var,\n ast_var=ast_var,\n rst_var=rst_var,\n rast_var=rast_var,\n conf_ints=[2.5, 97.5],\n mc_sample_size=5000, # <- choose a much larger sample size\n ci_avg_time_flag1=False,\n ci_avg_time_flag2=True,\n ci_avg_time_isel=np.arange(len(ds_in.time.values) - avg_isel, len(ds_in.time.values), 1),\n ci_avg_time_sel=None)\n\n print('creating outgoing array...')\n print(ds_in)\n\n #creat DataSet. Setting z coord here results in lots of errors, so it is input after the loop\n print('tmpw_central')\n tmpw_avg_array = ds_in.tmpw_avg2\n print(tmpw_avg_array.values)\n tmpw_avg = tmpw_avg_array.values\n print('tmpw_25')\n tmpw_25_array = ds_in.tmpw_mc_avg2.isel(CI=0)\n tmpw_25 = tmpw_25_array.values\n print('tmpw_975')\n tmpw_975_array = ds_in.tmpw_mc_avg2.isel(CI=1)\n tmpw_975 = tmpw_975_array.values\n\n #remove nans and outlying values\n print('remove nans')\n tmpw_25[np.isnan(tmpw_25)] = 30\n tmpw_avg[np.isnan(tmpw_avg)] = 30\n tmpw_975[np.isnan(tmpw_975)] = 30\n\n print('remove outliers')\n tmpw_25[(tmpw_25 > 20) | (tmpw_25 < -30)] = 30\n tmpw_avg[(tmpw_avg > 20) | (tmpw_avg < -30)] = 30\n tmpw_975[(tmpw_975 > 20) | (tmpw_975 < -30)] = 30\n\n ds = xr.Dataset(\n data_vars = { 'tmpw_25':(('z'), tmpw_25),\n 'tmpw':(('z'), tmpw_avg),\n 'tmpw_975':(('z'), tmpw_975)})\n\n print(ds)\n #ds['tmpw_central'].isel(t=-1).plot(linewidth=0.7, figsize=(12, 8))\n #plt.show()\n\n #concatenate DataArray if not first loop iteration\n if i == 0:\n\n ds_out = ds\n\n else: \n\n print(ds_out)\n print(ds)\n #ds_out = xr.merge([ds_out, ds])\n ds_out = xr.concat([ds_out, ds], dim = 't')\n\n#put in z coords\nds_out.coords['z'] = z_values\n\nprint(ds_out)\n\n#save output\nds_out.to_netcdf(export_path)\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "2_end_profile.py", "file_name": "2_end_profile.py", "file_ext": "py", "file_size_in_byte": 8892, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.chdir", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 84, "usage_type": "attribute"}, {"api_name": "dtscalibration.read_silixa_files", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 204, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 211, "usage_type": "call"}, {"api_name": "xarray.concat", "line_number": 230, "usage_type": "call"}]} +{"seq_id": "569070612", "text": "from datetime import datetime\nfrom datetime import timedelta\nimport Crypto\nfrom Crypto.Cipher import AES\nfrom Crypto.PublicKey import RSA\n\ndef calculate_timeresult(timediff_list):\n sum = timediff_list[0] - timediff_list[0]\n for diff in timediff_list:\n sum += diff\n average = sum / len(timediff_list)\n return average.total_seconds() * 1000, sum.total_seconds() * 1000\n\ndef read_rsa_publickey():\n fp = open(\"./data/rsa-publickey.txt\")\n key = fp.read()\n fp.close()\n publickey = RSA.importKey(key)\n return publickey\n\ndef read_rsa_privatekey():\n fp = open(\"./data/rsa-privatekey.txt\")\n key = fp.read()\n fp.close()\n privatekey = RSA.importKey(key)\n return privatekey\n\ndef read_aes_key():\n fp = open(\"./data/aes-key.txt\")\n key = fp.read()\n fp.close()\n aeskey = AES.new(key)\n return aeskey\n\ndef read_textcontent():\n fp = open(\"./data/content.txt\")\n content = fp.read()\n fp.close()\n content = bytes(content, \"utf-8\")\n return content\n ", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "Crypto.PublicKey.RSA.importKey", "line_number": 18, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 18, "usage_type": "name"}, {"api_name": "Crypto.PublicKey.RSA.importKey", "line_number": 25, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 25, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 32, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "388768950", "text": "from random import randint\nfrom time import strftime\nfrom flask import Flask, render_template, flash, request\nfrom wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField\nimport whoosh_retrieval\nimport pandas as pd\nfrom random import sample\nimport whoosh_retrieval\n\nDEBUG = True\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config['SECRET_KEY'] = 'SjdnUends821Jsdlkvxh391ksdODnejdDw'\n\nclass ReusableForm(Form):\n name = TextField('Business:', validators=[validators.required()])\n\ndef get_time():\n time = strftime(\"%Y-%m-%dT%H:%M\")\n return time\n\ndef write_to_disk(business):\n data = open('file.log', 'a')\n timestamp = get_time()\n data.write('DateStamp={}, Name={} \\n'.format(timestamp, business))\n data.close()\n\n\nreview_data = pd.read_csv('business_review_users_tags.csv')\nbusiness_tags = review_data.drop_duplicates(subset=['business_id'])\nbusiness_tags = business_tags[['business_name', 'tks_output']]\nBusinesses = business_tags['business_name'].tolist()\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef hello():\n form = ReusableForm(request.form)\n business = \"\"\n #print(form.errors)\n if request.method == 'POST':\n business=request.form['business']\n\n write_to_disk(business)\n if business in Businesses:\n flash('Reviews for Selected Business: {}'.format(business))\n\n else:\n flash('Error: No Reviews to Display')\n\n if business != \"\" and business in Businesses:\n selectBusinessData = business_tags.loc[business_tags['business_name'] == business]\n listselectBusinessData = selectBusinessData['tks_output'].tolist()[0]\n listselectBusinessData = listselectBusinessData.replace('[', '')\n listselectBusinessData = listselectBusinessData.replace(']', '')\n listselectBusinessData = listselectBusinessData.split(',')\n\n final_tags = []\n for tag in listselectBusinessData:\n temp = tag.replace('\\'', '').strip()\n\n final_tags.append(temp)\n\n selectBusinessTags = sample(final_tags,10) if len(final_tags) > 10 else final_tags\n tagLen = len(selectBusinessTags)\n reviews = {}\n for t in selectBusinessTags:\n reviews[t] = whoosh_retrieval.search_review(t, business)\n\n else:\n tagLen = 0\n selectBusinessTags = []\n reviews = {}\n return render_template('./index.html', form=form, tagLen=tagLen, Tags=selectBusinessTags, businessLen=len(Businesses), Businesses=Businesses, reviews=reviews)\n\nif __name__ == \"__main__\":\n app.run()", "sub_path": "Task 2/Review Retrieval/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "wtforms.Form", "line_number": 15, "usage_type": "name"}, {"api_name": "wtforms.TextField", "line_number": 16, "usage_type": "call"}, {"api_name": "wtforms.validators.required", "line_number": 16, "usage_type": "call"}, {"api_name": "wtforms.validators", "line_number": 16, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 47, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 62, "usage_type": "call"}, {"api_name": "whoosh_retrieval.search_review", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "36795143", "text": "def index(i, j, cols):\n if i < 0 or j < 0 or i >= cols or j >= cols:\n return -1\n return i*cols + j\n\nclass Cell:\n def __init__(self, i, j):\n self.i = i\n self.j = j\n self.walls = [True for i in range(4)]\n self.visited = False\n\n def checkNeighbors(self, grid, cols):\n from random import randint\n ind = index(self.i, self.j-1, cols)\n allNeighbors = []\n if ind != -1:\n top = grid[index(self.i, self.j-1, cols)]\n allNeighbors.append(top)\n ind = index(self.i+1, self.j, cols)\n if ind != -1:\n right = grid[index(self.i+1, self.j, cols)]\n allNeighbors.append(right)\n ind = index(self.i, self.j+1, cols)\n if ind != -1:\n bottom = grid[index(self.i, self.j+1, cols)]\n allNeighbors.append(bottom)\n ind = index(self.i-1, self.j, cols)\n if ind != -1:\n left = grid[index(self.i-1, self.j, cols)]\n allNeighbors.append(left)\n neighbors = []\n for neigh in allNeighbors:\n if not neigh.visited and neigh:\n neighbors.append(neigh)\n nNeighbors = len(neighbors)\n if nNeighbors > 0:\n r = randint(0, nNeighbors-1)\n return neighbors[r]\n # elif nNeighbors == 0:\n\n\n\n def show(self, screen, w):\n import pygame\n white = (255, 255, 255)\n x = self.i*w\n y = self.j*w\n if self.visited:\n surf = pygame.Surface((w, w))\n surf.fill((255, 0, 255, 100))\n screen.blit(surf, (x, y))\n\n if self.walls[0]:\n pygame.draw.line(screen, white, (x , y), (x+w, y))\n if self.walls[1]:\n pygame.draw.line(screen, white, (x+w, y), (x+w, y+w))\n if self.walls[2]:\n pygame.draw.line(screen, white, (x+w, y+w), (x, y+w))\n if self.walls[3]:\n pygame.draw.line(screen, white, (x, y+w), (x, y))\n\n\n def highlight(self, screen, w):\n import pygame\n white = (255, 255, 255)\n x = self.i*w\n y = self.j*w\n if self.visited:\n surf = pygame.Surface((w, w))\n surf.fill((0,0,255, 100))\n screen.blit(surf, (x, y))\n\n if self.walls[0]:\n pygame.draw.line(screen, white, (x , y), (x+w, y))\n if self.walls[1]:\n pygame.draw.line(screen, white, (x+w, y), (x+w, y+w))\n if self.walls[2]:\n pygame.draw.line(screen, white, (x+w, y+w), (x, y+w))\n if self.walls[3]:\n pygame.draw.line(screen, white, (x, y+w), (x, y))\n\ndef removeWall(a, b):\n x = a.i - b.i\n if x == 1:\n a.walls[3] = False\n b.walls[1] = False\n elif x == -1:\n a.walls[1] = False\n b.walls[3] = False\n y = a.j - b.j\n if y == -1:\n a.walls[2] = False\n b.walls[0] = False\n elif y == 1:\n a.walls[0] = False\n b.walls[2] = False\n\ndef createGrid(cols, rows):\n grid = []\n for i in range(rows):\n for j in range(cols):\n grid.append(Cell(i, j))\n return grid\n\ndef showGrid(grid, screen, w):\n for cell in grid:\n cell.show(screen, w)\n\ndef start():\n import pygame\n from pygame.time import delay\n pygame.init()\n cols = 20\n rows = 20\n w = 10\n WIDTH = w*cols\n HEIGHT = w*rows\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n grid = createGrid(cols, rows)\n current = grid[0]\n stack = []\n # Running\n running = True\n while running:\n screen.fill(0)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n current.visited = True\n next = current.checkNeighbors(grid, cols)\n if next:\n next.visited = True\n stack.append(current)\n removeWall(current, next)\n current = next\n elif len(stack) > 0:\n current = stack.pop()\n\n showGrid(grid, screen, w)\n current.highlight(screen, w)\n running = False\n for i in grid:\n if i.visited == False:\n running = True\n pygame.display.update()\n current = grid[0]\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if pygame.key.get_pressed()[pygame.K_w] and current.j > 0:\n current = grid[index(current.i, current.j-1, cols)]\n if pygame.key.get_pressed()[pygame.K_a] and current.i > 0:\n current = grid[index(current.i-1, current.j, cols)]\n if pygame.key.get_pressed()[pygame.K_s] and current.j < cols-1:\n current = grid[index(current.i, current.j+1, cols)]\n if pygame.key.get_pressed()[pygame.K_d] and current.i < cols-1:\n current = grid[index(current.i+1, current.j, cols)]\n showGrid(grid, screen, w)\n current.highlight(screen, w)\n pygame.display.update()\n delay(100)\n", "sub_path": "src/functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 5027, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "random.randint", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 79, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 81, "usage_type": "attribute"}, {"api_name": "{'randint': 'random.randint', 'pygame': 'pygame'}", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 127, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 146, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 146, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 153, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 155, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 155, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 155, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 157, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 157, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 157, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 159, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 163, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "615062930", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('quizter', '0014_auto_20160403_2138'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Quiz',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('question', models.CharField(verbose_name='Вопрос', max_length=200)),\n ('answer', models.CharField(verbose_name='Ответ', max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='QuizAnswer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('answer', models.ForeignKey(verbose_name='Ответ', to='quizter.Quiz')),\n ],\n ),\n ]\n", "sub_path": "quizter/migrations/0015_quiz_quizanswer.py", "file_name": "0015_quiz_quizanswer.py", "file_ext": "py", "file_size_in_byte": 968, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "182994387", "text": "\nimport json\nfrom datetime import datetime\n\nfrom sqlalchemy import (INT, JSON, TIMESTAMP, Boolean, Column, String,\n Table, func)\nfrom sqlalchemy.sql.expression import text\n\nfrom iris.database import BaseModel\n\n\nclass SupportEvents(BaseModel):\n\n def __init__(self, engine, metadata, role='reader'):\n table = Table(\n 'support_events',\n metadata,\n Column('id', INT, primary_key=True, autoincrement=True),\n Column('name', String),\n Column('description', String),\n Column('markdown_path', String),\n Column('attachment_hex', JSON),\n Column('attachment_type', INT),\n Column('is_deleted', Boolean),\n Column('update_at', TIMESTAMP, default=func.now())\n )\n super().__init__(engine, metadata, table, role)\n\n def get_events(self):\n KEYS = [col.key for col in self.table.columns]\n stmt = text('SELECT * FROM support_events WHERE is_deleted = 0 ORDER BY id ASC;')\n results = []\n for row in self.execute(stmt).fetchall():\n results.append(self.tuple_to_dict(row, KEYS))\n return results\n", "sub_path": "iris/database/support_events.py", "file_name": "support_events.py", "file_ext": "py", "file_size_in_byte": 1176, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "iris.database.BaseModel", "line_number": 12, "usage_type": "name"}, {"api_name": "sqlalchemy.Table", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.INT", "line_number": 18, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 19, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 20, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.JSON", "line_number": 22, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.INT", "line_number": 23, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 24, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.TIMESTAMP", "line_number": 25, "usage_type": "argument"}, {"api_name": "sqlalchemy.func.now", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "612672903", "text": "# coding:utf8\n\nimport os\nfrom os.path import abspath\nfrom pyspark.sql import SparkSession\n\n# os.environ[\"SPARK_HOME\"] = \"D:/software/spark-2.4.2-bin-hadoop2.7\"\nwarehouse_location = abspath('/user/hive/warehouse')\n\n\ndef main():\n spark = SparkSession \\\n .builder \\\n .appName(\"Spark on Hive\") \\\n .master(\"local[*]\") \\\n .config(\"spark.sql.warehouse.dir\", warehouse_location) \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n df = spark.sql('select * from dp_test.test limit 10')\n df.show(truncate=False)\n\n spark.stop()\n\n # 转为pandas的dataframe\n pd = df.toPandas()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "spark/pyspark_sql/read_hive.py", "file_name": "read_hive.py", "file_ext": "py", "file_size_in_byte": 658, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 12, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "243283969", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport pytest\nimport requests\nimport xml.etree.ElementTree as ET\n\nfrom foist.pipeline import (extract_text, get_collection_names, get_pdf_url,\n get_record, get_record_list, is_in_fedora,\n is_thesis, parse_record_list)\n\n\ndef test_extract_text_returns_bytes(pdf):\n text = extract_text(pdf)\n assert type(text) == bytes\n\n\ndef test_get_collection_names_returns_correct_names():\n names = get_collection_names(['hdl_1721.1_7888', 'hdl_1721.1_7710',\n 'hdl_1721.1_7929', 'hdl_1721.1_7742',\n 'hdl_1721.1_102296', 'hdl_1721.1_102291',\n 'not_a_key'])\n assert names == {'Engineering Systems', 'Technology and Policy',\n 'Institute for Data, Systems, and Society'}\n\n\ndef test_get_pdf_url_succeeds(mets_xml):\n mets = ET.parse(mets_xml).getroot()\n pdf_url = get_pdf_url(mets)\n assert pdf_url == ('http://dspace.mit.edu/bitstream/1721.1/'\n '107085/1/971247903-MIT.pdf')\n\n\ndef test_get_record_succeeds(pipeline):\n '''Correctly-formed request should return XML response.\n '''\n dspace_oai_uri = 'http://example.com/oai/request?'\n dspace_oai_identifier = 'oai:dspace.mit.edu:1721.1/'\n identifier = '12345'\n metadata_format = 'mets'\n r = get_record(dspace_oai_uri, dspace_oai_identifier, identifier,\n metadata_format)\n assert '' in r\n\n\ndef test_get_record_list_succeeds(pipeline):\n '''Correctly-formed request should return XML response.\n '''\n dspace_oai_uri = 'http://example.com/oai/request?'\n metadata_format = 'mets'\n start_date = '2017-01-01'\n end_date = '2017-02-01'\n r = get_record_list(dspace_oai_uri, metadata_format, start_date=start_date,\n end_date=end_date)\n assert '' in r\n\n\ndef test_is_in_fedora_returns_true_for_ingested_item(fedora):\n handle = 'thesis'\n fedora_uri = 'http://example.com/rest/'\n assert is_in_fedora(handle, fedora_uri, 'theses') is True\n\n\ndef test_is_in_fedora_returns_false_for_uningested_item(fedora):\n handle = 'uningested_thesis'\n fedora_uri = 'http://example.com/rest/'\n assert is_in_fedora(handle, fedora_uri, 'theses') is False\n\n\ndef test_is_in_fedora_error_raises_error(fedora_errors):\n with pytest.raises(requests.exceptions.HTTPError):\n handle = 'no_auth'\n fedora_uri = 'http://example.com/rest/'\n is_in_fedora(handle, fedora_uri, 'theses')\n\n\ndef test_is_thesis_returns_true_for_thesis():\n set_specs = ['hdl_1721.1_7593']\n assert is_thesis(set_specs) is True\n\n\ndef test_is_thesis_returns_false_for_not_thesis():\n set_specs = ['i_am_not_a_thesis_set']\n assert is_thesis(set_specs) is False\n\n\ndef test_parse_record_list_returns_correct_json(record_list):\n json_records = parse_record_list(record_list)\n assert {'identifier': '108425', 'sets': ['hdl_1721.1_494'],\n 'handle': '1721.1-108425'} in json_records\n", "sub_path": "tests/test_pipeline.py", "file_name": "test_pipeline.py", "file_ext": "py", "file_size_in_byte": 3140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "foist.pipeline.extract_text", "line_number": 14, "usage_type": "call"}, {"api_name": "foist.pipeline.get_collection_names", "line_number": 19, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 28, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 28, "usage_type": "name"}, {"api_name": "foist.pipeline.get_pdf_url", "line_number": 29, "usage_type": "call"}, {"api_name": "foist.pipeline.get_record", "line_number": 41, "usage_type": "call"}, {"api_name": "foist.pipeline.get_record_list", "line_number": 53, "usage_type": "call"}, {"api_name": "foist.pipeline.is_in_fedora", "line_number": 61, "usage_type": "call"}, {"api_name": "foist.pipeline.is_in_fedora", "line_number": 67, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 71, "usage_type": "attribute"}, {"api_name": "foist.pipeline.is_in_fedora", "line_number": 74, "usage_type": "call"}, {"api_name": "foist.pipeline.is_thesis", "line_number": 79, "usage_type": "call"}, {"api_name": "foist.pipeline.is_thesis", "line_number": 84, "usage_type": "call"}, {"api_name": "foist.pipeline.parse_record_list", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "140291617", "text": "from flask import Flask\nfrom flask_bcrypt import Bcrypt\nfrom flask_login import LoginManager\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"956b91dcad619e1d5137ee2fc1e14bc2\"\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///database/site.db\"\ndb = SQLAlchemy(app)\nbcrypt = Bcrypt(app)\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = \"login\"\nlogin_manager.login_message_category = \"info\"\n\n# Needs to be imported here to resolve circular imports.\n# from flaskblog.controller import routes\nfrom flaskblog.controller import ( # noqa Discable check\n general_controller,\n user_controller,\n posts_controller,\n)\n", "sub_path": "flaskblog/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 673, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_bcrypt.Bcrypt", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "288321857", "text": "# coding=utf-8\nimport datetime\nfrom django.db import models\nfrom django.conf import settings\nfrom django.core.signing import Signer\nfrom djorm_pgfulltext.models import SearchManager\nfrom djorm_pgfulltext.fields import VectorField\nfrom urllib import quote_plus\nfrom django.utils.http import urlquote\nfrom django.utils.translation import ugettext_lazy as _\nfrom caching.base import CachingManager, CachingMixin\n\n\nNUMBER_OF_VOTES_CACHE_ENTRY = 'number_of_votes'\nRECENT_EVENTS_CACHE_ENTRY = 'recent_events_cache_entry'\n\n\nclass IsNull(models.Func):\n template = '%(expressions)s IS NULL'\n\n\nclass Category(CachingMixin, models.Model):\n\n name = models.CharField(max_length=255)\n\n objects = CachingManager()\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n ordering = [\"name\"]\n verbose_name_plural = _(\"categories\")\n\n\nclass SiteMode(CachingMixin, models.Model):\n show_question_votes = models.BooleanField(default=True, blank=True)\n show_total_votes = models.BooleanField(default=True, blank=True)\n allow_sorting_by_votes = models.BooleanField(default=True, blank=True)\n allow_voting_and_submitting_questions = models.BooleanField(default=True, blank=True)\n debate_time = models.DateTimeField(\n default=datetime.datetime(2099, 1, 1),\n help_text=\"Enter time that debate starts in timezone %s\" % settings.TIME_ZONE,\n )\n debate_state = models.CharField(max_length=5, null=True, blank=True)\n\n announcement_headline = models.CharField(max_length=255, null=True, blank=True)\n announcement_body = models.TextField(null=True, blank=True)\n announcement_link = models.URLField(null=True, blank=True)\n announcement_page_regex = models.CharField(max_length=255, null=True, blank=True)\n\n objects = CachingManager()\n\n\nclass Submission(models.Model):\n\n def user_display_name(self):\n return self.voter.user_display_name()\n\n category = models.ForeignKey(Category)\n idea = models.TextField(verbose_name=_('Question'))\n\n headline = models.TextField(null=False, blank=False)\n followup = models.TextField(null=True, blank=True)\n\n citation = models.CharField(max_length=2000, null=True, blank=True, db_index=True,\n verbose_name=_(\"Optional link to full proposal or reference\"))\n citation_verified = models.BooleanField(default=False, db_index=True)\n\n voter = models.ForeignKey(\"Voter\")\n created_at = models.DateTimeField(db_index=True)\n\n ip_address = models.CharField(max_length=255, db_index=True)\n\n editors_pick = models.BooleanField(default=False)\n approved = models.BooleanField(default=False, db_index=True)\n\n # if True, will not show up again in moderation list.\n moderated_removal = models.BooleanField(default=False, db_index=True)\n\n has_duplicates = models.BooleanField(default=False, db_index=True)\n\n duplicate_of = models.ForeignKey('opendebates.Submission', null=True, blank=True,\n related_name=\"duplicates\")\n\n votes = models.IntegerField(default=0, db_index=True)\n local_votes = models.IntegerField(default=0, db_index=True)\n score = models.FloatField(default=0, db_index=True)\n rank = models.FloatField(default=0, db_index=True)\n\n random_id = models.FloatField(default=0, db_index=True)\n\n search_index = VectorField()\n\n keywords = models.TextField(null=True, blank=True)\n\n objects = SearchManager(fields=[\"idea\", \"keywords\"],\n auto_update_search_field=True)\n\n source = models.CharField(max_length=255, null=True, blank=True)\n\n happened = models.DateField(null=True, blank=True)\n is_positive = models.BooleanField(default=False)\n\n class Meta:\n ordering = ['-happened']\n\n def get_recent_votes(self):\n timespan = datetime.datetime.now() - datetime.timedelta(1)\n return Vote.objects.filter(submission=self, created_at__gte=timespan).count()\n\n def get_duplicates(self):\n if not self.has_duplicates:\n return None\n return Submission.objects.select_related(\n \"voter\", \"category\", \"voter__user\").filter(\n approved=True, duplicate_of=self)\n\n def __unicode__(self):\n return self.idea\n\n @models.permalink\n def get_absolute_url(self):\n return \"vote\", [self.id]\n\n def my_tweet_text(self):\n params = {\n \"hashtag\": settings.SITE_THEME['HASHTAG'],\n }\n return _(u\"Vote for my progressive idea for @ThinkBigUS #%s(hashtag)s. \"\n \"30 leaders in Congress will see top ideas!\" % params)\n\n def tweet_text(self):\n text = settings.SITE_THEME['TWITTER_QUESTION_TEXT']\n if self.voter.twitter_handle:\n text += u\" h/t @%s\" % self.voter.twitter_handle\n return text\n\n def facebook_text(self):\n if len(self.idea) > 240:\n return self.idea[:240] + u'…'\n return self.idea\n\n def facebook_url(self):\n return u\"https://www.facebook.com/sharer/sharer.php?&u=%(idea_url)s\" % {\n \"idea_url\": quote_plus(self.really_absolute_url('fb')),\n }\n\n def reddit_url(self):\n return u\"//www.reddit.com/submit?url=%s\" % (quote_plus(self.really_absolute_url('reddit')),)\n\n def email_url(self):\n subject = settings.SITE_THEME['EMAIL_SUBJECT']\n body = settings.SITE_THEME['EMAIL_BODY'] % {\n \"url\": self.really_absolute_url('email'),\n }\n return u\"mailto:?subject=%s&body=%s\" % (urlquote(subject), urlquote(body))\n\n def sms_url(self):\n params = {\n \"url\": self.really_absolute_url('sms'),\n \"hashtag\": settings.SITE_THEME['HASHTAG'],\n }\n body = _(u\"Vote for my progressive idea for @OpenDebaters #%(hashtag)s. %(url)s\" % params)\n return u\"sms:;?body=%s\" % (quote_plus(body),)\n\n def really_absolute_url(self, source=None):\n url = settings.SITE_DOMAIN_WITH_PROTOCOL + self.get_absolute_url()\n if source is not None:\n url += '?source=share-%s-%s' % (source, self.id)\n return url\n\n def twitter_url(self):\n url_tmpl = u\"https://twitter.com/intent/tweet?url=\" + \\\n \"%(idea_url)s&text=%(tweet_text)s\"\n return url_tmpl % {\n \"idea_url\": quote_plus(self.really_absolute_url('tw')),\n \"tweet_text\": quote_plus(self.tweet_text()),\n }\n\n def twitter_title(self):\n # Vote on this question for the FL-Sen #OpenDebate!\n return settings.SITE_THEME['TWITTER_QUESTION_TITLE'].format(idea=self.idea)\n\n def twitter_description(self):\n # \"{idea}\" At 8pm EDT on 4/25, Jolly & Grayson answer top vote-getting questions at\n # bottom-up #OpenDebate hosted by [TBD], Open Debate Coalition, Progressive Change Institute\n return settings.SITE_THEME['TWITTER_QUESTION_DESCRIPTION'].format(idea=self.idea)\n\n def facebook_title(self):\n return settings.SITE_THEME['FACEBOOK_QUESTION_TITLE'].format(idea=self.idea)\n\n def facebook_description(self):\n return settings.SITE_THEME['FACEBOOK_QUESTION_DESCRIPTION'].format(idea=self.idea)\n\n\nclass ZipCode(CachingMixin, models.Model):\n zip = models.CharField(max_length=10, unique=True)\n city = models.CharField(max_length=255, null=True, blank=True)\n state = models.CharField(max_length=255, null=True, blank=True)\n\n objects = CachingManager()\n\n\nclass Voter(models.Model):\n\n def user_display_name(self):\n voter = self\n if voter.display_name:\n name = voter.display_name\n elif not voter.user:\n name = _(u\"Somebody\")\n else:\n user = voter.user\n name = u\"%s\" % user.first_name\n if user.last_name:\n name = u\"%s %s.\" % (name, user.last_name[0])\n if not name or not name.strip():\n name = _(u\"Somebody\")\n\n if voter.state:\n name = _(u\"%(name)s from %(state)s\" % {\"name\": name, \"state\": voter.state})\n return name\n\n email = models.EmailField(unique=True)\n zip = models.CharField(max_length=10, db_index=True)\n state = models.CharField(max_length=255, null=True, blank=True)\n\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL, null=True, blank=True, related_name=\"voter\")\n\n source = models.CharField(max_length=255, null=True, blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n display_name = models.CharField(max_length=255, null=True, blank=True)\n twitter_handle = models.CharField(max_length=255, null=True, blank=True)\n\n unsubscribed = models.BooleanField(default=False)\n\n def __unicode__(self):\n return self.email\n\n def account_token(self):\n return Voter.make_account_token(self.email)\n\n @classmethod\n def make_account_token(cls, email):\n signer = Signer()\n value = signer.sign(email)\n return value\n\n\nclass Vote(models.Model):\n\n submission = models.ForeignKey(Submission)\n voter = models.ForeignKey(Voter)\n\n ip_address = models.CharField(max_length=255, db_index=True)\n request_headers = models.TextField(null=True, blank=True)\n\n original_merged_submission = models.ForeignKey(Submission, null=True, blank=True,\n related_name=\"votes_merged_elsewhere\")\n\n class Meta:\n unique_together = [(\"submission\", \"voter\")]\n\n created_at = models.DateTimeField(db_index=True)\n source = models.CharField(max_length=255, null=True, blank=True)\n\n\nclass Candidate(models.Model):\n first_name = models.CharField(max_length=255, null=True, blank=True)\n last_name = models.CharField(max_length=255, null=True, blank=True)\n current_title = models.CharField(max_length=255, null=True, blank=True)\n bio = models.TextField(default='', null=True, blank=True)\n website = models.URLField(null=True, blank=True, db_index=True)\n facebook = models.URLField(null=True, blank=True, db_index=True)\n twitter_handle = models.CharField(max_length=16, null=True, blank=True)\n display_name = models.CharField(max_length=255, null=True, blank=True,\n help_text=_(\"Defaults to first_name last_name.\"))\n\n created_at = models.DateTimeField(auto_now_add=True)\n\n def save(self, *args, **kwargs):\n if not self.display_name:\n self.display_name = u'{0} {1}'.format(self.first_name, self.last_name)\n super(Candidate, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return self.display_name\n\n\nclass Flag(models.Model):\n to_remove = models.ForeignKey(Submission, related_name='removal_flags')\n duplicate_of = models.ForeignKey(Submission, related_name='+',\n null=True, blank=True)\n voter = models.ForeignKey(Voter)\n reviewed = models.BooleanField(default=False)\n note = models.TextField(null=True, blank=True)\n\n class Meta:\n unique_together = [\n ('to_remove', 'voter'),\n ]\n", "sub_path": "opendebates/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 10960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.db.models.Func", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "caching.base.CachingMixin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "caching.base.CachingManager", "line_number": 26, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 33, "usage_type": "call"}, {"api_name": "caching.base.CachingMixin", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "call"}, {"api_name": "django.conf.settings.TIME_ZONE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "caching.base.CachingManager", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 60, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 71, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 75, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 83, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 86, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 86, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 87, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 89, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "djorm_pgfulltext.fields.VectorField", "line_number": 93, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 95, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 95, "usage_type": "name"}, {"api_name": "djorm_pgfulltext.models.SearchManager", "line_number": 97, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 100, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 100, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 102, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 102, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 103, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 103, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 109, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 109, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.models.permalink", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 122, "usage_type": "name"}, {"api_name": "django.conf.settings.SITE_THEME", "line_number": 128, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 128, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 130, "usage_type": "call"}, {"api_name": "django.conf.settings.SITE_THEME", "line_number": 134, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 134, "usage_type": "name"}, {"api_name": "urllib.quote_plus", "line_number": 146, "usage_type": "call"}, {"api_name": "urllib.quote_plus", "line_number": 150, "usage_type": "call"}, {"api_name": "django.conf.settings.SITE_THEME", "line_number": 153, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 153, "usage_type": "name"}, {"api_name": "django.conf.settings.SITE_THEME", "line_number": 154, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 154, "usage_type": "name"}, {"api_name": "django.utils.http.urlquote", "line_number": 157, "usage_type": "call"}, {"api_name": "django.conf.settings.SITE_THEME", "line_number": 162, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 162, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 164, "usage_type": "call"}, {"api_name": "urllib.quote_plus", "line_number": 165, "usage_type": "call"}, {"api_name": "django.conf.settings.SITE_DOMAIN_WITH_PROTOCOL", "line_number": 168, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 168, "usage_type": "name"}, {"api_name": "urllib.quote_plus", "line_number": 177, "usage_type": "call"}, {"api_name": "urllib.quote_plus", "line_number": 178, "usage_type": "call"}, {"api_name": "django.conf.settings.SITE_THEME", "line_number": 183, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 183, "usage_type": "name"}, {"api_name": "django.conf.settings.SITE_THEME", "line_number": 188, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 188, "usage_type": "name"}, {"api_name": "django.conf.settings.SITE_THEME", "line_number": 191, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 191, "usage_type": "name"}, {"api_name": "django.conf.settings.SITE_THEME", "line_number": 194, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 194, "usage_type": "name"}, {"api_name": "caching.base.CachingMixin", "line_number": 197, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 197, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 197, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 198, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 198, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 199, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 199, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 200, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 200, "usage_type": "name"}, {"api_name": "caching.base.CachingManager", "line_number": 202, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 205, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 205, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 212, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 219, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 222, "usage_type": "call"}, {"api_name": "django.db.models.EmailField", "line_number": 225, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 225, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 226, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 226, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 227, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 227, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 229, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 229, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 230, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 230, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 232, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 232, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 233, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 233, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 235, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 235, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 236, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 236, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 238, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 238, "usage_type": "name"}, {"api_name": "django.core.signing.Signer", "line_number": 248, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 253, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 253, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 255, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 255, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 256, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 256, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 258, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 258, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 259, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 259, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 261, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 261, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 267, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 267, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 268, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 268, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 271, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 271, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 272, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 272, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 273, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 273, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 274, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 274, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 275, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 275, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 276, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 276, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 277, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 277, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 278, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 278, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 279, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 279, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 280, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 282, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 282, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 293, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 293, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 294, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 294, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 295, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 295, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 297, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 297, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 298, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 298, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 299, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 299, "usage_type": "name"}]} +{"seq_id": "559065604", "text": "from pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nfrom operator import add\nimport pandas as pd\n\nsc = SparkContext()\nsqlcontext = SQLContext(sc)\n\n# PART 1\n# Loading Data\n# csv_path = 'hdfs://wolf.analytics.private/user/ktd5131/data/crime/Crimes_-_2001_to_present.csv'\ncsv_path = 'hdfs://wolf.analytics.private/user/ktd5131/data/sample_crime_data.csv'\nrddd = sc.textFile(csv_path)\n\n# Getting only the blocks data and placing it in the right format. Giving 0 to years different from last 3\nblocksRDD = rddd.map(lambda x: (x.split(\",\")[3], 1) if x.split(\",\")[17] in {'2018', '2019', '2020'} else (x.split(\",\")[3], 0))\n# Sum all the crimes per block\ncasesPerBlockRDD = blocksRDD.reduceByKey(add)\n# Sort by number\nsortedCrimesPerBlock = casesPerBlockRDD.sortBy(lambda x: x[1])\n# Collect\nresult = sortedCrimesPerBlock.collect()[-10:] # Get the 10 with highest number\n\nwith open(\"dimitrov_exercise2_part1_output.txt\", 'w') as output:\n for row in result:\n output.write(str(row) + '\\n')\n\n# COMMENTS:\n# Result i sin dimitrov_exercise2_part1_output.txt\n# ('064XX S DR MARTIN LUTHER KING JR DR', 571)\n# ('076XX S CICERO AVE', 572)\n# ('100XX W OHARE ST', 652)\n# ('006XX N MICHIGAN AVE', 665)\n# ('011XX S CANAL ST', 754)\n# ('0000X S STATE ST', 898)\n# ('0000X N STATE ST', 914)\n# ('0000X W TERMINAL ST', 942)\n# ('008XX N MICHIGAN AVE', 1071)\n# ('001XX N STATE ST', 2290)", "sub_path": "PySpark - RDDs, SparkSQL, Pipelines/dimitrov_exercise2_part1.py", "file_name": "dimitrov_exercise2_part1.py", "file_ext": "py", "file_size_in_byte": 1382, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pyspark.SparkContext", "line_number": 6, "usage_type": "call"}, {"api_name": "pyspark.sql.SQLContext", "line_number": 7, "usage_type": "call"}, {"api_name": "operator.add", "line_number": 18, "usage_type": "argument"}]} +{"seq_id": "439349014", "text": "#!/usr/bin/python3\n\n\"\"\"Binary genetic algorithm engine\"\"\"\n\n\nfrom typing import Any, Callable, List, Union, Tuple\n\nimport numpy as np\n\n\n_DEFAULT_RAND_GEN = np.random.Generator(np.random.pcg64.PCG64(None))\n\n\ndef generate(\n pop_size: int,\n chrom_length: int,\n threshold: float = 0.5,\n rand_generator: np.random.Generator = _DEFAULT_RAND_GEN\n) -> np.ndarray:\n \"\"\"\n Inputs:\n\n pop_size\n -- positive integer number\n -- total number of chromosomes in a generation\n\n chrom_length\n -- positive integer number\n -- number of bits in a chromosome\n -- length of a chromosome\n\n threshold\n -- real number between 0.0 and 1.0 (default is 0.5)\n -- values (from uniform distribution) lower than this number\n translate to True values in chromosomes\n\n rand_generator\n -- instance of Numpy Random Generator\n -- Generator with Numpy default BitGenerator (PCG64)\n and None as a seed value is used as default\n\n\n Each chromosome is represented as a fixed length 1D numpy array\n with random sequence of Boolean values.\n\n Population size must be smaller than the total number of unique\n chromosome sequences (binary patterns) that can be generated from\n a given number of bits.\n\n\n Returns nested (2D) numpy boolean array, the entire population\n of chromosomes (solution candidates).\n \"\"\"\n\n if pop_size >= (1 << chrom_length):\n raise ValueError('Population must be smaller than overall unique chromosome sequences.')\n\n return rand_generator.uniform(low=0.0, high=1.0, size=(pop_size, chrom_length)) < threshold\n\n\ndef select(\n population: np.ndarray,\n scores: np.ndarray,\n indexes: np.ndarray,\n rand_generator: np.random.Generator = _DEFAULT_RAND_GEN\n) -> np.ndarray:\n \"\"\"\n Inputs:\n\n population\n -- array of chromosomes\n -- each chromosome must be represented as 1D numpy boolean array\n\n scores\n -- array of fitness values corresponding to chromosomes\n -- each fitness value must be a non-negative real number\n\n indexes\n -- 1D numpy integer array\n -- indexes of chromosomes in the population (row index)\n\n rand_generator\n -- instance of Numpy Random Generator\n -- Generator with Numpy default BitGenerator (PCG64)\n and None as a seed value is used as default\n\n\n Selection is based on the roulette wheel method (fitness proportionate selection)\n where probability of a chromosome being selected is related to its fitness value.\n\n This does not guarantee that the best chromosome sequence (binary pattern)\n will be selected but helps to avoid local optima.\n\n\n Returns nested (2D) numpy boolean array, the entire next generation of chromosomes\n (solution candidates) chosen with repetition from a given population.\n \"\"\"\n\n probabilities = scores / np.sum(scores)\n\n indexes = rand_generator.choice(indexes, size=indexes.size, replace=True, p=probabilities)\n\n rand_generator.shuffle(indexes)\n\n return population[indexes]\n\n\ndef mutate(\n population: np.ndarray,\n mut_prob: float,\n rand_generator: np.random.Generator = _DEFAULT_RAND_GEN\n) -> np.ndarray:\n \"\"\"\n Inputs:\n\n population\n -- array of chromosomes\n -- each chromosome must be represented as 1D numpy boolean array\n\n mut_prob\n -- positive real number\n -- mutation rate\n -- probability that a bit will be inverted\n\n rand_generator\n -- instance of Numpy Random Generator\n -- Generator with Numpy default BitGenerator (PCG64)\n and None as a seed value is used as default\n\n\n Mutation can occur independently at every bit along each chromosome\n with uniform probability.\n\n\n Returns nested (2D) numpy boolean array, the entire population of chromosomes\n (solution candidates) with randomly altered bits.\n \"\"\"\n\n bits_to_mutate = rand_generator.uniform(low=0.0, high=1.0, size=population.shape) < mut_prob\n\n # Change only specific bits in chromosomes, using XOR\n return population ^ bits_to_mutate\n\n\ndef crossover(\n population: np.ndarray,\n crs_prob: float,\n bits: np.ndarray,\n rand_generator: np.random.Generator = _DEFAULT_RAND_GEN\n) -> np.ndarray:\n \"\"\"\n Inputs:\n\n population\n -- array of chromosomes\n -- each chromosome must be represented as 1D numpy boolean array\n -- number of chromosomes must be even\n\n crs_prob\n -- positive real number\n -- crossover (recombination) probability\n -- probability that a pair of chromosomes will exchange\n part of bit sequences\n\n bits\n -- 1D numpy integer array\n -- indexes of bits in a chromosome (column index)\n\n rand_generator\n -- instance of Numpy Random Generator\n -- Generator with Numpy default BitGenerator (PCG64)\n and None as a seed value is used as default\n\n\n Commute part of binary sequences between paired chromosomes.\n\n\n Returns nested (2D) numpy boolean array, the entire population of chromosomes\n (solution candidates) where random chromosome pairs swapped their binary pattern.\n \"\"\"\n\n # Each row represents a pair of chromosomes\n # Each column represents specific bit\n\n # Get the number of pairs and the length of sequences\n rows, cols = population.shape\n\n rows >>= 1 # rows //= 2\n\n # Select pairs of chromosomes for which sequences of bits will be exchanged\n pairs = rand_generator.uniform(low=0.0, high=1.0, size=(rows, 1)) < crs_prob\n\n # Each chromosome must contribute at least one bit\n\n # Set a single crossover bit for each pair of chromosomes\n breakpoints = rand_generator.integers(\n low=1,\n high=cols,\n size=(rows, 1),\n dtype=bits.dtype,\n endpoint=False\n )\n\n # Divide each sequence of bits into two parts\n positions = bits < breakpoints\n\n # Keep information of bit positions only for selected pairs\n positions &= pairs\n\n return np.concatenate((\n (population[0::2] & positions) | (population[1::2] & ~positions),\n (population[0::2] & ~positions) | (population[1::2] & positions)\n ), axis=0)\n\n\ndef run(\n fit_func: Callable[..., np.ndarray],\n crs_prob: float,\n mut_prob: float,\n chrom_length: int,\n pop_size: int,\n iterations: int,\n fit_args: Union[List[Any], Tuple, None] = None,\n threshold: float = 1.0\n) -> np.ndarray:\n \"\"\"\n Inputs:\n\n fit_func\n -- fitness function\n -- first positional argument must be a nested (2D) numpy boolean array\n -- must return 1D numpy array with real numbers between\n 0 (invalid sequence) and 1 (perfect fitness) and size\n equal to the number of rows of the given numpy array\n\n crs_prob\n -- positive real number\n -- crossover (recombination) probability\n -- probability that a pair of chromosomes will exchange\n part of bit sequences\n\n mut_prob\n -- positive real number\n -- mutation rate\n -- probability that a bit will be inverted\n\n chrom_length\n -- positive integer number\n -- number of bits in a chromosome\n -- length of a chromosome\n\n fit_args\n -- list, tuple, or None (default)\n -- additional argument(s) require by fitness function\n\n pop_size\n -- positive integer number (default is 100)\n -- must be even\n -- total number of chromosomes in a generation\n\n iterations\n -- positive integer number (default is 200)\n -- maximum number of generations\n\n threshold\n -- positive number less than or equal to 1\n -- minimum satisfactory fitness level\n -- higher value is more strict\n -- default is 1, which produces the highest fitness value after\n a given number of iterations (preferably a perfect fitness\n or an exact match)\n\n\n The fitness function operates on numpy arrays: for a given population\n of chromosomes it must return corresponding fitness values.\n\n This function always returns a chromosome with the largest fitness value.\n However, depending on the fitness function, it can solve both minimization\n and maximization problems.\n\n The highest fitted chromosome is returned even if the minimum fitness threshold\n condition is not satisfied within a given number of generations.\n\n\n Returns a chromosome (1D numpy boolean array).\n \"\"\"\n\n if pop_size & 1:\n raise ValueError('Total amount of chromosomes in a population must be an even number.')\n if (crs_prob <= 0) or (crs_prob >= 1):\n raise ValueError('Crossover probability must be a number between 0.0 and 1.0 (exclusive).')\n if (mut_prob <= 0) or (mut_prob >= 1):\n raise ValueError('Mutation probability must be a number between 0.0 and 1.0 (exclusive).')\n if 1 < threshold < 0:\n raise ValueError('Threshold must be a number between 0.0 and 1.0 (inclusive).')\n\n if fit_args is None:\n fit_args = []\n elif not isinstance(fit_args, (list, tuple)):\n raise TypeError('Additional fitness argument(s) must be placed in a list or tuple.')\n\n # Create initial population and calculate corresponding fitness values\n population = generate(pop_size, chrom_length)\n scores = fit_func(population, *fit_args)\n\n # Find the best candidate from current generation\n alpha = np.argmax(scores)\n alpha_chromosome = population[alpha]\n alpha_score = scores[alpha]\n\n # Enumerate chromosomes in the population (create rows index)\n indexes = np.arange(pop_size, dtype=np.uint)\n\n # Enumerate bits in a chromosome (create columns index)\n bits = np.arange(chrom_length, dtype=np.uint)\n\n # Create successive generations\n for _ in range(iterations):\n\n if alpha_score >= threshold:\n break\n\n # Recreate population\n population = select(population, scores, indexes)\n population = crossover(population, crs_prob, bits)\n population = mutate(population, mut_prob)\n\n # Recalculate fitness values\n scores = fit_func(population, *fit_args)\n\n alpha = np.argmax(scores)\n\n if alpha_score < scores[alpha]:\n alpha_chromosome = population[alpha]\n alpha_score = scores[alpha]\n\n return alpha_chromosome\n", "sub_path": "binary_genetic_algorithm.py", "file_name": "binary_genetic_algorithm.py", "file_ext": "py", "file_size_in_byte": 10142, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.random.Generator", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.random.pcg64.PCG64", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.random", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.random", "line_number": 111, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numpy.random", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 150, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 217, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 217, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 223, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.uint", "line_number": 312, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.uint", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 225, "usage_type": "attribute"}]} +{"seq_id": "642186643", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nproperty_test.py: testing file with property-based tests\nfor multifidelityfunctions package\n\"\"\"\n\n__author__ = 'Sander van Rijn'\n__email__ = 's.j.van.rijn@liacs.leidenuniv.nl'\n\n\nimport numpy as np\nfrom hypothesis import given\nfrom hypothesis.strategies import floats, integers, lists\nimport pytest\n\nfrom .utils import rescale, ValueRange\nimport multifidelityfunctions as mff\n\n\n@mff.row_vectorize\ndef quadratic(xx):\n return np.sqrt(np.sum(xx**2, axis=1))\n\n\nsimple_square = mff.MultiFidelityFunction('simple_square', [-1e8], [1e8], [quadratic, quadratic], ['high', 'low'])\n\n# TEST HELPERS -----------------------------------------------------------------\n\ndef rectangle_lists(n):\n return lists(lists(floats(min_value=0, max_value=1),\n min_size=n, max_size=n), min_size=1)\n\n\ndef _test_1d_array_input(func, x):\n y = func(x)\n\n assert isinstance(y, np.ndarray)\n assert y.ndim == 1\n\n\ndef _test_2d_array_input(func, x):\n y = func(x)\n\n assert isinstance(y, np.ndarray)\n assert y.ndim == 1\n assert np.all(np.isfinite(y))\n\n if len(x) > 1:\n assert np.allclose(func(x[0]), y[0])\n\n\ndef _test_single_function(f, x):\n X = rescale(np.array(x),\n range_in=ValueRange(0, 1),\n range_out=ValueRange(*f.bounds))\n for fidelity in f.functions:\n _test_2d_array_input(fidelity, X.tolist()) # list input TODO: make separate test for @row_vectorize decorator instead\n _test_2d_array_input(fidelity, X) # direct numpy input\n\n\n# TESTS ------------------------------------------------------------------------\n\n\n@given(integers(min_value=1, max_value=100).flatmap(rectangle_lists))\n@pytest.mark.parametrize(\"function\", [\n simple_square,\n mff.forrester,\n])\ndef test_nd_functions(function, x):\n _test_single_function(function, x)\n\n\n@given(rectangle_lists(n=2))\n@pytest.mark.parametrize(\"function\", [\n mff.bohachevsky,\n mff.booth,\n mff.branin,\n mff.currin,\n mff.himmelblau,\n mff.six_hump_camelback,\n mff.adjustable_branin(0.5),\n mff.adjustable_paciorek(0.5),\n])\ndef test_2d_functions(function, x):\n _test_single_function(function, x)\n\n\n@given(rectangle_lists(n=3))\n@pytest.mark.parametrize(\"function\", [\n mff.adjustable_hartmann3(0.5),\n])\ndef test_3d_functions(function, x):\n _test_single_function(function, x)\n\n\n@given(rectangle_lists(n=4))\n@pytest.mark.parametrize(\"function\", [\n mff.park91a,\n mff.park91b,\n])\ndef test_4d_functions(function, x):\n _test_single_function(function, x)\n\n\n@given(rectangle_lists(n=6))\n@pytest.mark.parametrize(\"function\", [\n mff.hartmann6,\n])\ndef test_6d_functions(function, x):\n _test_single_function(function, x)\n\n\n@given(rectangle_lists(n=8))\n@pytest.mark.parametrize(\"function\", [\n mff.borehole,\n])\ndef test_8d_functions(function, x):\n _test_single_function(function, x)\n\n\n@given(rectangle_lists(n=10))\n@pytest.mark.parametrize(\"function\", [\n mff.adjustable_trid(0.5),\n])\ndef test_10d_functions(function, x):\n _test_single_function(function, x)\n", "sub_path": "tests/property_test.py", "file_name": "property_test.py", "file_ext": "py", "file_size_in_byte": 3070, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.sqrt", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 24, "usage_type": "call"}, {"api_name": "multifidelityfunctions.row_vectorize", "line_number": 22, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.MultiFidelityFunction", "line_number": 27, "usage_type": "call"}, {"api_name": "hypothesis.strategies.lists", "line_number": 32, "usage_type": "call"}, {"api_name": "hypothesis.strategies.floats", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.all", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 51, "usage_type": "call"}, {"api_name": "utils.rescale", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "utils.ValueRange", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.ValueRange", "line_number": 57, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 66, "usage_type": "call"}, {"api_name": "hypothesis.strategies.integers", "line_number": 66, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 67, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 67, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.forrester", "line_number": 69, "usage_type": "attribute"}, {"api_name": "hypothesis.given", "line_number": 75, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 76, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 76, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.bohachevsky", "line_number": 77, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.booth", "line_number": 78, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.branin", "line_number": 79, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.currin", "line_number": 80, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.himmelblau", "line_number": 81, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.six_hump_camelback", "line_number": 82, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.adjustable_branin", "line_number": 83, "usage_type": "call"}, {"api_name": "multifidelityfunctions.adjustable_paciorek", "line_number": 84, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 90, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 91, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 91, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.adjustable_hartmann3", "line_number": 92, "usage_type": "call"}, {"api_name": "hypothesis.given", "line_number": 98, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 99, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 99, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.park91a", "line_number": 100, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.park91b", "line_number": 101, "usage_type": "attribute"}, {"api_name": "hypothesis.given", "line_number": 107, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 108, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 108, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.hartmann6", "line_number": 109, "usage_type": "attribute"}, {"api_name": "hypothesis.given", "line_number": 115, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 116, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 116, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.borehole", "line_number": 117, "usage_type": "attribute"}, {"api_name": "hypothesis.given", "line_number": 123, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 124, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 124, "usage_type": "attribute"}, {"api_name": "multifidelityfunctions.adjustable_trid", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "38071175", "text": "import logging\nimport sys\n\nfrom contextlib import contextmanager\n\nLOG = logging.getLogger(__name__)\n\n\nclass Reporter(object):\n\n def __init__(self, name):\n self.name = name\n\n def setup(self):\n logger = logging.getLogger('taas')\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n\n fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n ch.setFormatter(formatter)\n\n logger.setLevel(logging.DEBUG)\n logger.addHandler(ch)\n\n return logger\n\n\n@contextmanager\ndef cleanup(stage):\n try:\n yield\n except (Exception, KeyboardInterrupt):\n LOG.error('Run failed', exc_info=True)\n finally:\n stage.destroy()\n\n\ndef retrieve(client, resource, name, **kwargs):\n director = getattr(client, '%ss' % resource)\n try:\n return director.create(name, **kwargs)\n except Exception:\n LOG.exception('Creation failed')\n sys.exit(1)\n", "sub_path": "taas/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 997, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 24, "usage_type": "attribute"}, {"api_name": "contextlib.contextmanager", "line_number": 30, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "335477936", "text": "\"\"\"\nModule for Artificial Neural Network (ANN) Prediction.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom .approximation import Approximation\n\n\nclass ANN(Approximation):\n \"\"\"\n Feed-Forward Artifical Neural Network (ANN).\n\n :param list layers: ordered list with the number of neurons of each hidden\n layer.\n :param torch.nn.modules.activation function: activation function at each\n layer. A single activation function can be passed or a list of them of\n length equal to the number of hidden layers.\n :param list stop_training: list with the maximum number of training\n iterations (int) and/or the desired tolerance on the training loss\n (float).\n :param torch.nn.Module loss: loss definition (Mean Squared if not given).\n :param torch.optim optimizer: the torch class implementing optimizer.\n Default value is `Adam` optimizer.\n :param float lr: the learning rate. Default is 0.001.\n :param float l2_regularization: the L2 regularization coefficient, it\n corresponds to the \"weight_decay\". Default is 0 (no regularization).\n :param int frequency_print: the frequency in terms of epochs of the print\n during the training of the network.\n :param boolean last_identity: Flag to specify if the last activation\n function is the identity function. In the case the user provides the\n entire list of activation functions, this attribute is ignored. Default\n value is True.\n\n :Example:\n >>> import ezyrb\n >>> import numpy as np\n >>> import torch.nn as nn\n >>> x = np.random.uniform(-1, 1, size =(4, 2))\n >>> y = np.array([np.sin(x[:, 0]), np.cos(x[:, 1]**3)]).T\n >>> ann = ezyrb.ANN([10, 5], nn.Tanh(), [20000,1e-5])\n >>> ann.fit(x, y)\n >>> y_pred = ann.predict(x)\n >>> print(y)\n >>> print(y_pred)\n >>> print(len(ann.loss_trend))\n >>> print(ann.loss_trend[-1])\n \"\"\"\n def __init__(self, layers, function, stop_training, loss=None,\n optimizer=torch.optim.Adam, lr=0.001, l2_regularization=0,\n frequency_print=10, last_identity=True):\n if loss is None:\n loss = torch.nn.MSELoss()\n\n if not isinstance(function, list): # Single activation function passed\n nl = len(layers) if last_identity else len(layers)+1\n function = [function] * nl\n\n if not isinstance(stop_training, list):\n stop_training = [stop_training]\n\n self.layers = layers\n self.function = function\n self.loss = loss\n self.stop_training = stop_training\n\n self.loss_trend = []\n self.model = None\n self.optimizer = optimizer\n\n self.frequency_print = frequency_print\n self.lr = lr\n self.l2_regularization = l2_regularization\n\n def _convert_numpy_to_torch(self, array):\n \"\"\"\n Converting data type.\n\n :param numpy.ndarray array: input array.\n :return: the tensorial counter-part of the input array.\n :rtype: torch.Tensor.\n \"\"\"\n return torch.from_numpy(array).float()\n\n def _convert_torch_to_numpy(self, tensor):\n \"\"\"\n Converting data type.\n\n :param torch.Tensor tensor: input tensor.\n :return: the vectorial counter-part of the input tensor.\n :rtype: numpy.ndarray.\n \"\"\"\n return tensor.detach().numpy()\n\n @staticmethod\n def _list_to_sequential(layers, functions):\n\n layers_torch = []\n inout_layers = [[layers[i], layers[i+1]] for i in range(len(layers)-1)]\n\n while True:\n if inout_layers:\n inp_d, out_d = inout_layers.pop(0)\n layers_torch.append(nn.Linear(inp_d, out_d))\n\n if functions:\n layers_torch.append(functions.pop(0))\n\n if not functions and not inout_layers:\n break\n\n return nn.Sequential(*layers_torch)\n\n def _build_model(self, points, values):\n \"\"\"\n Build the torch model.\n Considering the number of neurons per layer (self.layers), a\n feed-forward NN is defined:\n - activation function from layer i>=0 to layer i+1:\n self.function[i]; activation function at the output layer:\n Identity (by default).\n :param numpy.ndarray points: the coordinates of the given (training)\n points.\n :param numpy.ndarray values: the (training) values in the points.\n \"\"\"\n layers = self.layers.copy()\n layers.insert(0, points.shape[1])\n layers.append(values.shape[1])\n\n self.model = self._list_to_sequential(layers, self.function)\n\n def fit(self, points, values):\n \"\"\"\n Build the ANN given 'points' and 'values' and perform training.\n\n Training procedure information:\n - optimizer: Adam's method with default parameters (see, e.g.,\n https://pytorch.org/docs/stable/optim.html);\n - loss: self.loss (if none, the Mean Squared Loss is set by\n default).\n - stopping criterion: the fulfillment of the requested tolerance\n on the training loss compatibly with the prescribed budget of\n training iterations (if type(self.stop_training) is list); if\n type(self.stop_training) is int or type(self.stop_training) is\n float, only the number of maximum iterations or the accuracy\n level on the training loss is considered as the stopping rule,\n respectively.\n\n :param numpy.ndarray points: the coordinates of the given (training)\n points.\n :param numpy.ndarray values: the (training) values in the points.\n \"\"\"\n\n self._build_model(points, values)\n optimizer = self.optimizer(\n self.model.parameters(),\n lr=self.lr, weight_decay=self.l2_regularization)\n\n points = self._convert_numpy_to_torch(points)\n values = self._convert_numpy_to_torch(values)\n\n n_epoch = 1\n flag = True\n while flag:\n y_pred = self.model(points)\n\n loss = self.loss(y_pred, values)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n scalar_loss = loss.item()\n self.loss_trend.append(scalar_loss)\n\n for criteria in self.stop_training:\n if isinstance(criteria, int): # stop criteria is an integer\n if n_epoch == criteria:\n flag = False\n elif isinstance(criteria, float): # stop criteria is float\n if scalar_loss < criteria:\n flag = False\n\n if (flag is False or\n n_epoch == 1 or n_epoch % self.frequency_print == 0):\n print(f'[epoch {n_epoch:6d}]\\t{scalar_loss:e}')\n\n n_epoch += 1\n\n return optimizer\n\n def predict(self, new_point):\n \"\"\"\n Evaluate the ANN at given 'new_points'.\n\n :param array_like new_points: the coordinates of the given points.\n :return: the predicted values via the ANN.\n :rtype: numpy.ndarray\n \"\"\"\n new_point = self._convert_numpy_to_torch(np.array(new_point))\n y_new = self.model(new_point)\n return self._convert_torch_to_numpy(y_new)\n", "sub_path": "ezyrb/ann.py", "file_name": "ann.py", "file_ext": "py", "file_size_in_byte": 7395, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "approximation.Approximation", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.optim", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 113, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "93054254", "text": "from django.shortcuts import render, redirect\nfrom pure_pagination import Paginator, EmptyPage\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.forms import ModelForm\nfrom .models import *\n\nRESULTS_PER_PAGE = getattr(settings, 'RESULTS_PER_PAGE', 20)\n\n\nclass NewsForm(ModelForm):\n class Meta:\n model = News\n exclude = ('reporter', 'content_type',)\n\nclass GameForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super(GameForm, self).__init__(*args, **kwargs)\n ct = ContentType.objects.get(model='company')\n self.fields[\"company\"].queryset = Company.objects.filter(content_type=ct)\n\n class Meta:\n model = Game\n exclude = ('reporter', 'content_type', 'album', )\n\n# Create your views here.\n\ndef index(request):\n ct = ContentType.objects.get(model='news')\n news_list = News.objects.filter(content_type=ct).order_by('-created_date').select_related('reporter')#.prefetch_related('comments', 'related_to__a', 'related_to__a__content_type', 'related_from__b', 'related_from__b__content_type')\n\n paginator = Paginator(news_list, RESULTS_PER_PAGE)\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n try:\n news_list = paginator.page(page)\n except EmptyPage:\n news_list = paginator.page(paginator.num_pages)\n\n return render(request, 'index.html', {\n 'news_list': news_list,\n })\n\ndef news(request, news_id):\n news = News.objects.filter(pk=news_id).select_related('reporter').prefetch_related('comments','comments__reporter', 'related_to', 'related_to__a', 'related_to__a__content_type', 'related_from', 'related_from__b', 'related_from__b__content_type')\n return render(request, 'news_item.html', {\n 'news': news[0],\n })\n\ndef news_modify(request, news_id = None):\n try:\n instance = News.objects.get(pk=news_id)\n except News.DoesNotExist:\n instance = None\n\n if request.method == 'POST': # If the form has been submitted...\n form = NewsForm(request.POST, instance=instance)\n if form.is_valid():\n news = form.save(commit=False)\n news.reporter = request.user\n news.save()\n return redirect('news', news.pk)\n else:\n form = NewsForm(instance=instance)\n\n return render(request, 'news_modify.html',{\n 'form': form,\n 'instance': instance,\n })\n\ndef games(request):\n games_list = Game.objects.all().order_by('-created_date').select_related('reporter','album','company')#.prefetch_related('comments')\n\n paginator = Paginator(games_list, RESULTS_PER_PAGE)\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n try:\n games_list = paginator.page(page)\n except EmptyPage:\n games_list = paginator.page(paginator.num_pages)\n\n return render(request, 'games.html', {\n 'games_list': games_list,\n })\n\ndef game(request, game_id):\n game = Game.objects.filter(pk=game_id).select_related('reporter').prefetch_related('comments','comments__reporter', 'related_to__a', 'related_to__a__content_type', 'related_from__b', 'related_from__b__content_type')\n return render(request, 'game_item.html', {\n 'game': game[0],\n })\n\ndef game_modify(request, game_id = None):\n try:\n instance = Game.objects.get(pk=game_id)\n except Game.DoesNotExist:\n instance = None\n\n if request.method == 'POST': # If the form has been submitted...\n form = GameForm(request.POST, instance=instance)\n if form.is_valid():\n game = form.save(commit=False)\n game.reporter = request.user\n game.save()\n return redirect('game', game.pk)\n else:\n form = GameForm(instance=instance)\n\n return render(request, 'game_modify.html',{\n 'form': form,\n 'instance': instance,\n })\n\ndef companies(request):\n ct = ContentType.objects.get(model='company')\n comp_list = Company.objects.filter(content_type=ct).order_by('-created_date').select_related('reporter')#.prefetch_related('comments','games')\n\n paginator = Paginator(comp_list, RESULTS_PER_PAGE)\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n try:\n comp_list = paginator.page(page)\n except EmptyPage:\n comp_list = paginator.page(paginator.num_pages)\n\n return render(request, 'companies.html', {\n 'comp_list': comp_list,\n })\n\ndef company(request, comp_id):\n comp = Company.objects.filter(pk=comp_id).select_related('reporter').prefetch_related('comments','comments__reporter', 'games__reporter')\n return render(request, 'company_item.html', {\n 'comp': comp[0]\n })\n", "sub_path": "gtdb/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.conf.settings", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.forms.ModelForm", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 16, "usage_type": "name"}, {"api_name": "pure_pagination.Paginator", "line_number": 32, "usage_type": "call"}, {"api_name": "pure_pagination.EmptyPage", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 64, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 68, "usage_type": "call"}, {"api_name": "pure_pagination.Paginator", "line_number": 76, "usage_type": "call"}, {"api_name": "pure_pagination.EmptyPage", "line_number": 83, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 86, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 92, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 108, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 112, "usage_type": "call"}, {"api_name": "pure_pagination.Paginator", "line_number": 121, "usage_type": "call"}, {"api_name": "pure_pagination.EmptyPage", "line_number": 128, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 131, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "238966867", "text": "from typing import List, Dict\n\nimport attr\nfrom pydantic import BaseModel\n\nfrom .fridge import ProductInFridge, FridgeLogic, ProductUpdate\nfrom .recipe import Ingredient, Recipe\n\n\nclass ShoppingListBase(BaseModel):\n id: int = None\n items: Dict[str, float]\n\n\nclass ShoppingList(ShoppingListBase):\n fridge_id: int = None\n\n class Config:\n orm_mode = True\n\n\n@attr.s(auto_attribs=True)\nclass ShoppingListLogic:\n _fridge: FridgeLogic = None\n shopping_list: ShoppingList = None\n\n def create(self, recipes: List[Recipe]):\n self.shopping_list = ShoppingList(items={}, fridge_id=self._fridge.fridge.id)\n for recipe in recipes:\n for ingredient_in_recipe in recipe.ingredients:\n quantity_available_in_fridge = 0\n try:\n quantity_available_in_fridge = self._fridge.allocate_product(\n ProductInFridge(**ingredient_in_recipe.dict()))\n except StopIteration:\n pass\n # neglect possible mismatch of units for now\n if quantity_available_in_fridge < ingredient_in_recipe.quantity:\n self._add(ingredient_in_recipe, quantity_available_in_fridge)\n\n def update(self, products_changes: List[ProductUpdate]):\n for changed_product in products_changes:\n if changed_product.name in self.shopping_list.items:\n self.shopping_list.items[changed_product.name] -= changed_product.quantity\n self._fridge.allocate_product(changed_product)\n if self.shopping_list.items[changed_product.name] <= 0:\n del self.shopping_list.items[changed_product.name]\n\n def _add(self, ingredient_in_recipe: Ingredient, quantity_available_in_fridge: float = 0.0):\n quantity_to_buy = ingredient_in_recipe.quantity - quantity_available_in_fridge\n if ingredient_in_recipe.name in self.shopping_list.items:\n self.shopping_list.items[ingredient_in_recipe.name] += quantity_to_buy\n else:\n self.shopping_list.items[ingredient_in_recipe.name] = quantity_to_buy\n", "sub_path": "app/shopping_list/domain/shopping_list.py", "file_name": "shopping_list.py", "file_ext": "py", "file_size_in_byte": 2133, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pydantic.BaseModel", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 12, "usage_type": "name"}, {"api_name": "fridge.FridgeLogic", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "recipe.Recipe", "line_number": 27, "usage_type": "name"}, {"api_name": "recipe.ingredients", "line_number": 30, "usage_type": "attribute"}, {"api_name": "fridge.ProductInFridge", "line_number": 34, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "fridge.ProductUpdate", "line_number": 41, "usage_type": "name"}, {"api_name": "recipe.Ingredient", "line_number": 49, "usage_type": "name"}, {"api_name": "attr.s", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "364664349", "text": "from datetime import date\nfrom datetime import timedelta\nfrom ftw.builder import Builder\nfrom ftw.builder import create\nfrom ftw.testbrowser import browsing\nfrom ftw.testbrowser.pages.statusmessages import error_messages\nfrom opengever.tasktemplates.interfaces import IFromTasktemplateGenerated\nfrom opengever.testing import FunctionalTestCase\nfrom plone import api\nfrom plone.app.testing import TEST_USER_ID\nimport transaction\n\n\nclass TestTriggeringTaskTemplate(FunctionalTestCase):\n\n def setUp(self):\n super(TestTriggeringTaskTemplate, self).setUp()\n\n create(Builder('ogds_user').id(u'hugo.boss'))\n create(Builder('ogds_user').id(u'peter.meier'))\n self.dossier = create(Builder('dossier')\n .having(responsible=u'peter.meier'))\n self.templatedossier = create(Builder('templatedossier'))\n\n self.folder1 = create(Builder('tasktemplatefolder')\n .within(self.templatedossier)\n .titled(u'Mitberichtsverfahren')\n .in_state('tasktemplatefolder-state-activ'))\n\n create(Builder('tasktemplate')\n .within(self.folder1)\n .titled(u'Mitbericht FD')\n .having(preselected=True, deadline=17, issuer=u'current_user',\n responsible_client=u'interactive_users',\n responsible=u'current_user'))\n create(Builder('tasktemplate')\n .within(self.folder1)\n .titled(u'Mitbericht DI')\n .having(preselected=False, deadline=3, issuer=u'hugo.boss',\n responsible_client=u'client1',\n responsible=u'hugo.boss'))\n create(Builder('tasktemplate')\n .within(self.folder1)\n .titled(u'Mitbericht SD')\n .having(preselected=False, deadline=5, issuer=u'responsible',\n responsible_client=u'interactive_users',\n responsible=u'responsible'))\n\n def trigger_tasktemplatefolder(self, browser, folder='Mitberichtsverfahren', templates=[]):\n browser.login().open(self.dossier, view='add-tasktemplate')\n browser.fill({'Tasktemplatefolder': folder})\n browser.click_on('Continue')\n\n browser.fill({'Tasktemplates': templates})\n browser.click_on('Trigger')\n\n @browsing\n def test_redirects_back_and_show_statusmessage_when_no_active_tasktemplatefolder_exists(self, browser):\n api.content.transition(\n self.folder1,\n transition='tasktemplatefolder-transition-activ-inactiv')\n transaction.commit()\n\n browser.login().open(self.dossier, view='add-tasktemplate')\n self.assertEquals(self.dossier.absolute_url(), browser.url)\n self.assertEquals(\n ['Currently there are no active task template folders registered.'],\n error_messages())\n\n @browsing\n def test_all_active_tasktemplates_are_listed(self, browser):\n create(Builder('tasktemplatefolder')\n .titled(u'Einsprache abarbeiten'))\n create(Builder('tasktemplatefolder')\n .titled(u'Einb\\xfcrgerungsverfahren')\n .in_state('tasktemplatefolder-state-activ'))\n\n browser.login().open(self.dossier, view='add-tasktemplate')\n\n self.assertEquals(\n [u'Einb\\xfcrgerungsverfahren', 'Mitberichtsverfahren'],\n browser.css('#formfield-form-widgets-tasktemplatefolder').first.options)\n\n @browsing\n def test_step2_list_all_tasktemplates_of_the_selected_folder_and_preselects_them_correctly(self, browser):\n browser.login().open(self.dossier, view='add-tasktemplate')\n browser.fill({'Tasktemplatefolder': 'Mitberichtsverfahren'})\n browser.click_on('Continue')\n\n self.assertEquals(\n ['Mitbericht FD', 'Mitbericht DI', 'Mitbericht SD'],\n browser.css('#formfield-form-widgets-tasktemplates .option').text)\n\n self.assertEquals(\n ['Mitbericht FD'],\n browser.css('#formfield-form-widgets-tasktemplates input[checked]').getparents().text)\n\n @browsing\n def test_creates_main_task_assigned_to_current_user(self, browser):\n self.trigger_tasktemplatefolder(\n browser, templates=['Mitbericht FD', 'Mitbericht DI'])\n\n main_task = self.dossier.get('task-1')\n self.assertEquals(u'Mitberichtsverfahren', main_task.title)\n self.assertEquals(TEST_USER_ID, main_task.responsible)\n self.assertEquals(TEST_USER_ID, main_task.issuer)\n self.assertEquals('direct-execution', main_task.task_type)\n\n @browsing\n def test_sets_main_task_to_in_progress_state(self, browser):\n self.trigger_tasktemplatefolder(\n browser, templates=['Mitbericht FD', 'Mitbericht DI'])\n\n main_task = self.dossier.get('task-1')\n self.assertEquals('task-state-in-progress',\n api.content.get_state(main_task))\n\n @browsing\n def test_main_task_deadline_is_the_highest_template_deadline_plus_five(self, browser):\n self.trigger_tasktemplatefolder(\n browser, templates=['Mitbericht FD', 'Mitbericht DI'])\n\n self.assertEquals(date.today() + timedelta(days=17 + 5),\n self.dossier.get('task-1').deadline)\n\n @browsing\n def test_all_tasks_are_marked_with_marker_interface(self, browser):\n self.trigger_tasktemplatefolder(\n browser, templates=['Mitbericht FD', 'Mitbericht DI'])\n\n main_task = self.dossier.get('task-1')\n self.assertTrue(IFromTasktemplateGenerated.providedBy(main_task))\n\n for subtask in main_task.listFolderContents():\n self.assertTrue(IFromTasktemplateGenerated.providedBy(subtask))\n\n @browsing\n def test_creates_a_subtask_for_each_selected_template(self, browser):\n self.trigger_tasktemplatefolder(\n browser, templates=['Mitbericht FD', 'Mitbericht SD'])\n\n main_task = self.dossier.get('task-1')\n self.assertEquals(2, len(main_task.listFolderContents()))\n\n subtask1, subtask2 = main_task.listFolderContents()\n self.assertEquals('Mitbericht FD', subtask1.title)\n self.assertEquals('Mitbericht SD', subtask2.title)\n\n @browsing\n def test_replace_interactive_issuer(self, browser):\n self.trigger_tasktemplatefolder(\n browser,\n templates=['Mitbericht FD', 'Mitbericht DI', 'Mitbericht SD'])\n\n main_task = self.dossier.get('task-1')\n subtask1, subtask2, subtask3 = main_task.listFolderContents()\n\n # current_user\n self.assertEquals(TEST_USER_ID, subtask1.issuer)\n\n # not interactive\n self.assertEquals('hugo.boss', subtask2.issuer)\n\n # responsible\n self.assertEquals('peter.meier', subtask3.issuer)\n\n @browsing\n def test_replace_interactive_responsibles(self, browser):\n self.trigger_tasktemplatefolder(\n browser,\n templates=['Mitbericht FD', 'Mitbericht DI', 'Mitbericht SD'])\n\n main_task = self.dossier.get('task-1')\n subtask1, subtask2, subtask3 = main_task.listFolderContents()\n\n # current_user\n self.assertEquals(TEST_USER_ID, subtask1.responsible)\n self.assertEquals('client1', subtask1.responsible_client)\n\n # not interactive\n self.assertEquals('hugo.boss', subtask2.responsible)\n self.assertEquals('client1', subtask1.responsible_client)\n\n # responsible\n self.assertEquals('peter.meier', subtask3.responsible)\n self.assertEquals('client1', subtask1.responsible_client)\n\n @browsing\n def set_relateditems_on_every_subtask_when_selected(self, browser):\n doc1 = create(Builder('document')\n .within(self.dossier)\n .titled(u'Doc A'))\n doc2 = create(Builder('document')\n .within(self.dossier)\n .titled(u'Doc B'))\n\n self.trigger_tasktemplatefolder(\n browser, templates=['Mitbericht FD', 'Mitbericht SD'],\n documents=['Doc A', 'Doc B'])\n\n main_task = self.dossier.get('task-1')\n subtask1, subtask2 = main_task.listFolderContents()\n\n self.assertEquals([doc1, doc2], subtask1.relatedItems)\n self.assertEquals([doc1, doc2], subtask2.relatedItems)\n", "sub_path": "opengever/tasktemplates/tests/test_trigger.py", "file_name": "test_trigger.py", "file_ext": "py", "file_size_in_byte": 8346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "opengever.testing.FunctionalTestCase", "line_number": 14, "usage_type": "name"}, {"api_name": "ftw.builder.create", "line_number": 19, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 19, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 20, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 20, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 21, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 21, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 23, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 23, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 25, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 25, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 30, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 30, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 36, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 36, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 42, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 42, "usage_type": "call"}, {"api_name": "plone.api.content.transition", "line_number": 59, "usage_type": "call"}, {"api_name": "plone.api.content", "line_number": 59, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 59, "usage_type": "name"}, {"api_name": "transaction.commit", "line_number": 62, "usage_type": "call"}, {"api_name": "ftw.testbrowser.pages.statusmessages.error_messages", "line_number": 68, "usage_type": "call"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 57, "usage_type": "name"}, {"api_name": "ftw.builder.create", "line_number": 72, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 72, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 74, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 74, "usage_type": "call"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 70, "usage_type": "name"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 84, "usage_type": "name"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 105, "usage_type": "argument"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 106, "usage_type": "argument"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 98, "usage_type": "name"}, {"api_name": "plone.api.content.get_state", "line_number": 116, "usage_type": "call"}, {"api_name": "plone.api.content", "line_number": 116, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 116, "usage_type": "name"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 109, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 123, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 123, "usage_type": "call"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 118, "usage_type": "name"}, {"api_name": "opengever.tasktemplates.interfaces.IFromTasktemplateGenerated.providedBy", "line_number": 132, "usage_type": "call"}, {"api_name": "opengever.tasktemplates.interfaces.IFromTasktemplateGenerated", "line_number": 132, "usage_type": "name"}, {"api_name": "opengever.tasktemplates.interfaces.IFromTasktemplateGenerated.providedBy", "line_number": 135, "usage_type": "call"}, {"api_name": "opengever.tasktemplates.interfaces.IFromTasktemplateGenerated", "line_number": 135, "usage_type": "name"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 126, "usage_type": "name"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 137, "usage_type": "name"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 159, "usage_type": "argument"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 149, "usage_type": "name"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 177, "usage_type": "argument"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 167, "usage_type": "name"}, {"api_name": "ftw.builder.create", "line_number": 190, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 190, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 193, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 193, "usage_type": "call"}, {"api_name": "ftw.testbrowser.browsing", "line_number": 188, "usage_type": "name"}]} +{"seq_id": "414474084", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='BasicConfiguration',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('company_name', models.CharField(max_length=255, verbose_name=b'Nombre de la empresa')),\n ],\n ),\n migrations.CreateModel(\n name='Manufacturer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, verbose_name=b'Name')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='Supplier',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255, verbose_name=b'Name')),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n", "sub_path": "kstore/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 1298, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "280133465", "text": "import plotly.graph_objs as go\nimport plotly\nfrom retrieve_data import DataLoader, RESOURCE\n\n\nclass Task1:\n def __init__(self):\n self.loader = DataLoader(RESOURCE.TEMPERATURE)\n self.data = {i: -1 for i in range(1, 49)}\n\n def _get_data(self, time):\n if self.data[time] != -1:\n return self.data[time]\n else:\n self.data[time] = self.loader.get_data(time)\n return self.data[time]\n\n def _get_df(self, time, height):\n data = self._get_data(time)\n df = data[height]\n return df\n\n def render(self, time, height):\n\n df = self._get_df(time, height)\n trace = go.Contour(\n z=df.values,\n colorbar=go.ColorBar(\n title='Degree Celcius'\n ),\n )\n data = [trace]\n\n layout = go.Layout(\n title='Temperatur for t={} and Altitude (z={}): {:.1f} km'.format(str(time).lstrip('0'), height, height*0.2),\n xaxis = dict(\n title='Longitude'\n ),\n yaxis = dict(\n title='Latitude'\n ),\n )\n\n fig = go.Figure(data=data, layout=layout)\n plotly.offline.iplot(fig)\n", "sub_path": "ex2/task1.py", "file_name": "task1.py", "file_ext": "py", "file_size_in_byte": 1216, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "retrieve_data.DataLoader", "line_number": 8, "usage_type": "call"}, {"api_name": "retrieve_data.RESOURCE.TEMPERATURE", "line_number": 8, "usage_type": "attribute"}, {"api_name": "retrieve_data.RESOURCE", "line_number": 8, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Contour", "line_number": 26, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 26, "usage_type": "name"}, {"api_name": "plotly.graph_objs.ColorBar", "line_number": 28, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 28, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 34, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 34, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 44, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 44, "usage_type": "name"}, {"api_name": "plotly.offline.iplot", "line_number": 45, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "305242237", "text": "import base64\nfrom cryptography import fernet\nfrom aiohttp import web\nfrom aiohttp_session import setup, get_session\nfrom aiohttp_session.cookie_storage import EncryptedCookieStorage\n\n\ndef flash(request, message):\n request.setdefault('flash_outgoing', []).append(message)\n\n\ndef get_messages(request):\n return request.pop('flash_incoming')\n\n\nasync def flash_middleware(app, handler):\n async def process(request):\n session = await get_session(request)\n request['flash_incoming'] = session.pop('flash', [])\n response = await handler(request)\n session['flash'] = (request.get('flash_incoming', []) +\n request.get('flash_outgoing', []))\n return response\n return process\n\n\nasync def flash_handler(request):\n flash(request, 'You have just visited flash page')\n return web.HTTPFound('/')\n\n\nasync def handler(request):\n text = 'No flash messages yet'\n messages = get_messages(request)\n if messages:\n text = 'Messages: {}'.format(','.join(messages))\n return web.Response(text=text)\n\n\ndef make_app():\n app = web.Application()\n # secret_key must be 32 url-safe base64-encoded bytes\n fernet_key = fernet.Fernet.generate_key()\n secret_key = base64.urlsafe_b64decode(fernet_key)\n setup(app, EncryptedCookieStorage(secret_key))\n app.router.add_get('/', handler)\n app.router.add_get('/flash', flash_handler)\n # Install flash middleware\n app.middlewares.append(flash_middleware)\n return app\n\n\nweb.run_app(make_app())\n\n", "sub_path": "demo/flash_messages_example.py", "file_name": "flash_messages_example.py", "file_ext": "py", "file_size_in_byte": 1529, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "aiohttp_session.get_session", "line_number": 18, "usage_type": "call"}, {"api_name": "aiohttp.web.HTTPFound", "line_number": 29, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 29, "usage_type": "name"}, {"api_name": "aiohttp.web.Response", "line_number": 37, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 37, "usage_type": "name"}, {"api_name": "aiohttp.web.Application", "line_number": 41, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 41, "usage_type": "name"}, {"api_name": "cryptography.fernet.Fernet.generate_key", "line_number": 43, "usage_type": "call"}, {"api_name": "cryptography.fernet.Fernet", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cryptography.fernet", "line_number": 43, "usage_type": "name"}, {"api_name": "base64.urlsafe_b64decode", "line_number": 44, "usage_type": "call"}, {"api_name": "aiohttp_session.setup", "line_number": 45, "usage_type": "call"}, {"api_name": "aiohttp_session.cookie_storage.EncryptedCookieStorage", "line_number": 45, "usage_type": "call"}, {"api_name": "aiohttp.web.run_app", "line_number": 53, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "477434438", "text": "# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\nfrom openerp.exceptions import ValidationError\nfrom openerp.tools.translate import _\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n# Fundraising Studio groups\nclass FRSTzGruppeDetail(models.Model):\n _name = \"frst.zgruppedetail\"\n _rec_name = \"gruppe_lang\"\n\n # Compute a name based on - - \n # display_name = fields.Char(string=\"Group Name\",\n # compute=\"_compute_display_name\",\n # search=\"_search_display_name\",\n # readonly=True,\n # store=False)\n\n zgruppe_id = fields.Many2one(comodel_name=\"frst.zgruppe\", inverse_name='zgruppedetail_ids',\n string=\"Gruppenordner\",\n required=True, ondelete=\"cascade\", index=True)\n\n tabellentyp_id = fields.Selection(related=\"zgruppe_id.tabellentyp_id\", readonly=True, store=True)\n\n geltungsbereich = fields.Selection(string=\"Geltungsbereich\",\n selection=[('local', 'Local Group'),\n ('system', 'System Group')],\n default='system')\n gui_anzeige_profil = fields.Boolean(string=\"GuiAnzeigeProfil\",\n help=\"Show this group in the person profile view.\",\n default=True)\n\n gruppe_kurz = fields.Char(string=\"GruppeKurz\", required=True,\n help=\"gruppe_kurz is no longer in use - use gruppe_lang instead! \"\n \"The value of gruppe_lang will be copied to gruppe_kurz if gruppe_kurz is empty!\")\n gruppe_lang = fields.Char(string=\"GruppeLang\", required=True)\n gui_anzeigen = fields.Boolean(\"GuiAnzeigen\",\n help=\"If set this group is available for this instance\")\n active = fields.Boolean(string=\"Active\", compute=\"_compute_active\", store=True)\n\n # ATTENTION: \"gueltig_von\" und \"gueltig_bis\" is NOT IN USE for zGruppeDetail and may be removed in the future!\n #\n # But these fields ARE IN USE by the specific groups-for-models models like \"frst.persongruppe\"!\n # The fields are inherited through the abstract class \"frst.gruppestate\"\n #\n gueltig_von = fields.Date(string=\"GueltigVon\", required=True,\n default=lambda s: fields.Datetime.now()) # Not used -> Wird in Sicht integriert als Anlagedatum. Ist derzeit nicht als Anlagedatum gedacht!\n gueltig_bis = fields.Date(string=\"GueltigBis\", required=True,\n default=lambda s: fields.date(2099, 12, 31)) # Not used\n\n # PersonGruppe\n frst_persongruppe_ids = fields.One2many(comodel_name=\"frst.persongruppe\", inverse_name='zgruppedetail_id',\n string=\"PersonGruppe IDS\")\n frst_persongruppe_count = fields.Integer(string=\"Person Subscribers\",\n compute=\"cmp_frst_persongruppe_count\")\n\n # PersonEmailGruppe\n frst_personemailgruppe_ids = fields.One2many(comodel_name=\"frst.personemailgruppe\", inverse_name='zgruppedetail_id',\n string=\"PersonEmailGruppe IDS\")\n frst_personemailgruppe_count = fields.Integer(string=\"E-Mail Subscribers\",\n compute=\"cmp_frst_personemailgruppe_count\")\n\n # NEW SETTING FOR GROUPS / CHECKBOXES THAT MUST BE VALIDATE BY BY SOME SORT OF APPROVAL\n # HINT: This field is checked on group creation in abstract class frst.gruppestate > create()\n # approval_needed = fields.Boolean(\"Approval needed\",\n # default=False,\n # help=\"If this checkbox is set gueltig_von and gueltig_bis will be set to \"\n # \"the past date 09.09.1999 when the group is created to indicate that \"\n # \"an approval is needed before set the group to active.\")\n\n bestaetigung_erforderlich = fields.Boolean(string=\"Approval needed\",\n default=False,\n help=\"If this checkbox is set gueltig_von and gueltig_bis will be set \"\n \"to the past date 09.09.1999 when the group is created to indicate \"\n \"that an approval is needed before set the group to active.\")\n bestaetigung_typ = fields.Selection(selection=[('doubleoptin', 'DoubleOptIn'),\n ('phone_call', \"Phone Call\"),\n ('workflow', \"Fundraising Studio Workflow\"),\n ],\n string=\"Approval Type\", default='doubleoptin')\n\n # @api.multi\n # @api.depends('gruppe_lang', 'zgruppe_id')\n # def _compute_display_name(self):\n # tabellentyp_dict = dict(self.env['frst.zgruppe']._fields['tabellentyp_id'].selection)\n # for r in self:\n # r.display_name = \"%s (%s, %s)\" % (\n # r.gruppe_lang or r.gruppe_kurz,\n # tabellentyp_dict.get(r.zgruppe_id.tabellentyp_id, _('unknown')).upper() if r.zgruppe_id else _('unknown'),\n # r.sosync_fs_id if 'sosync_fs_id' in r._fields else _('unknown')\n # )\n\n @api.depends('gui_anzeigen')\n def _compute_active(self):\n for r in self:\n r.active = r.gui_anzeigen\n\n def _search_display_name(self, operator, value):\n return ['|',\n ('gruppe_lang', operator, value),\n ('sosync_fs_id', operator, value)\n ]\n\n @api.onchange('gruppe_lang', 'geltungsbereich')\n def onchange_gruppe_lang_geltungsbereich(self):\n for r in self:\n if r.gruppe_lang and not r.gruppe_kurz:\n r.gruppe_kurz = r.gruppe_lang\n if r.geltungsbereich == 'local':\n r.gui_anzeigen = True\n\n @api.multi\n def cmp_frst_persongruppe_count(self):\n for r in self:\n r.frst_persongruppe_count = len(self.frst_persongruppe_ids) or 0\n\n @api.multi\n def cmp_frst_personemailgruppe_count(self):\n for r in self:\n r.frst_personemailgruppe_count = len(self.frst_personemailgruppe_ids) or 0\n\n @api.model\n def create(self, vals):\n if vals.get('geltungsbereich') != 'local':\n assert self.env.user.has_group('base.sosync'), _(\"You can not create a system group!\")\n\n return super(FRSTzGruppeDetail, self).create(vals)\n\n @api.multi\n def write(self, vals):\n if self and vals and not self.env.user.has_group('base.sosync'):\n # Do not change the group folder (because of the tabellentyp_id)\n if 'zgruppe_id' in vals:\n raise ValidationError(_(\"You can not change the group folder (zgruppe_id). Please delete and \"\n \"recreate the group!\"))\n # Do not change the \"geltungsbereich\"\n if 'geltungsbereich' in vals:\n raise ValidationError(_(\"You can not change the 'geltungsbereich'. Please delete and \"\n \"recreate the group!\"))\n\n # Prevent the change of any relevant fields for system groups\n if any(r.geltungsbereich != 'local' for r in self):\n if any(f in vals for f in ['zgruppe_id', 'geltungsbereich', 'gui_anzeige_profil', 'gruppe_kurz',\n 'gruppe_lang', 'gui_anzeigen', 'gueltig_von', 'gueltig_bis', 'steuerung_bit',\n ]):\n raise ValidationError('You can not change system groups!')\n\n return super(FRSTzGruppeDetail, self).write(vals)\n\n @api.multi\n def unlink(self):\n if not self.env.user.has_group('base.sosync'):\n if any(r.geltungsbereich != 'local' for r in self):\n raise ValidationError('You can not delete system groups!')\n\n return super(FRSTzGruppeDetail, self).unlink()\n", "sub_path": "addons-own/fso_frst_groups/models/frst_zgruppedetail.py", "file_name": "frst_zgruppedetail.py", "file_ext": "py", "file_size_in_byte": 8378, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "openerp.models.Model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 10, "usage_type": "name"}, {"api_name": "openerp.fields.Many2one", "line_number": 21, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 21, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 25, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 25, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 27, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 27, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 31, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 31, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 35, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 35, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 38, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 38, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 39, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 39, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 41, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 41, "usage_type": "name"}, {"api_name": "openerp.fields.Date", "line_number": 48, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 48, "usage_type": "name"}, {"api_name": "openerp.fields.Datetime.now", "line_number": 49, "usage_type": "call"}, {"api_name": "openerp.fields.Datetime", "line_number": 49, "usage_type": "attribute"}, {"api_name": "openerp.fields", "line_number": 49, "usage_type": "name"}, {"api_name": "openerp.fields.Date", "line_number": 50, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 50, "usage_type": "name"}, {"api_name": "openerp.fields.date", "line_number": 51, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 51, "usage_type": "name"}, {"api_name": "openerp.fields.One2many", "line_number": 54, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 54, "usage_type": "name"}, {"api_name": "openerp.fields.Integer", "line_number": 56, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 56, "usage_type": "name"}, {"api_name": "openerp.fields.One2many", "line_number": 60, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 60, "usage_type": "name"}, {"api_name": "openerp.fields.Integer", "line_number": 62, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 62, "usage_type": "name"}, {"api_name": "openerp.fields.Boolean", "line_number": 73, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 73, "usage_type": "name"}, {"api_name": "openerp.fields.Selection", "line_number": 78, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 78, "usage_type": "name"}, {"api_name": "openerp.api.depends", "line_number": 95, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 95, "usage_type": "name"}, {"api_name": "openerp.api.onchange", "line_number": 106, "usage_type": "call"}, {"api_name": "openerp.api", "line_number": 106, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 114, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 114, "usage_type": "name"}, {"api_name": "openerp.api.multi", "line_number": 119, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 119, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 127, "usage_type": "call"}, {"api_name": "openerp.api.model", "line_number": 124, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 124, "usage_type": "name"}, {"api_name": "openerp.exceptions.ValidationError", "line_number": 136, "usage_type": "call"}, {"api_name": "openerp.tools.translate._", "line_number": 136, "usage_type": "call"}, {"api_name": "openerp.exceptions.ValidationError", "line_number": 140, "usage_type": "call"}, {"api_name": "openerp.tools.translate._", "line_number": 140, "usage_type": "call"}, {"api_name": "openerp.exceptions.ValidationError", "line_number": 148, "usage_type": "call"}, {"api_name": "openerp.api.multi", "line_number": 131, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 131, "usage_type": "name"}, {"api_name": "openerp.exceptions.ValidationError", "line_number": 156, "usage_type": "call"}, {"api_name": "openerp.api.multi", "line_number": 152, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 152, "usage_type": "name"}]} +{"seq_id": "405618423", "text": "from django.shortcuts import render\n\ndef welcome(request):\n return render(request, 'index.html', {'msg':'Hello World!'})\n\ndef restaurant_list(request):\n\n context = {\n \"my_list\": [{\n\n \"restaurant_name\" : \"White Robata\",\n \"food_type\" : \"Japanese food\"},\n {\"restaurant_name\" : \"Chipolte\",\n \"food_type\" : \"Mexican food\"},\n {\"restaurant_name\" : \"PF Changs\",\n \"food_type\" : \"Chinese food\"},]\n\n }\n return render(request, 'list.html', context)\n\n\ndef restaurant_detail(request):\n\n context = {\n \"my_object\": {\n\n \"restaurant_name\": \"Mishmash\",\n \"food_type\": \"Lebanese food\",\n \n\n }\n\n }\n return render(request, 'detail.html', context)\n", "sub_path": "restaurants/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 721, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.shortcuts.render", "line_number": 4, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "650644619", "text": "'''\n\"INTEL CONFIDENTIAL\nCopyright 30-11-2012 Intel Corporation All Rights Reserved.\nThe source code contained or described herein and all documents related to the source code (\"Material\") are owned by Intel Corporation or its suppliers or\nlicensors.\nTitle to the Material remains with Intel Corporation or its suppliers and licensors.\nThe Material may contain trade secrets and proprietary and confidential information of Intel Corporation and its suppliers and licensors,\nand is protected by worldwide copyright and trade secret laws and treaty provisions.\nNo part of the Material may be used, copied, reproduced, modified, published, uploaded, posted, transmitted, distributed, or disclosed in any way without\nIntel's prior express written permission.\nNo license under any patent, copyright, trade secret or other intellectual property right is granted to or conferred upon you by disclosure or delivery\nof the Materials, either expressly, by implication, inducement, estoppel or otherwise.\nAny license under such intellectual property rights must be express and approved by Intel in writing.\nInclude any supplier copyright notices as supplier requires Intel to use.\nInclude supplier trademarks or logos as supplier requires Intel to use, preceded by an asterisk. An asterisked footnote can be added as follows:\n*Third Party trademarks are the property of their respective owners.\nUnless otherwise agreed by Intel in writing, you may not remove or alter this notice or any other notice embedded in Materials by Intel or Intel's suppliers\nor licensors in any way.\"\n'''\n\nimport os\nimport shutil\nimport platform\nimport logging\nimport re\n\nimport checks\nimport consts\nimport wrappers.wrapper as wrapper\nfrom utils import join, exec_with_timeout, rm_tree\n\nlog = logging.getLogger(__name__)\n\n\nUNEXPECTED = \"Unexpected extensions\"\n\napi_names = ['OCL_VERSION_1_0 API', 'OCL_VERSION_1_1 API', 'OCL_VERSION_1_2 API', 'OGL_VERSION_1_0 API',\n 'OGL_VERSION_1_1 API', 'OGL_VERSION_1_2 API', 'OGL_VERSION_1_3 API', 'OGL_VERSION_1_4 API',\n 'OGL_VERSION_1_5 API', 'OGL_VERSION_2_0 API', 'OGL_VERSION_2_1 API', 'OGL_VERSION_3_0 API',\n 'OGL_VERSION_3_1 API', 'OGL_VERSION_3_2 API', 'OGL_VERSION_3_3 API', 'OGL_VERSION_4_0 API',\n 'OGL_VERSION_4_1 API', 'OGL_VERSION_4_2 API', 'OGL_VERSION_4_3 API', 'OGL_VERSION_4_4 API']\n\n\ndef parse_api_functions(filename):\n lines = (open(filename, 'r')).readlines()\n ocl_version_1_0_api = {}\n ocl_version_1_1_api = {}\n ocl_version_1_2_api = {}\n ogl_version_1_0_api = {}\n ogl_version_1_1_api = {}\n ogl_version_1_2_api = {}\n ogl_version_1_3_api = {}\n ogl_version_1_4_api = {}\n ogl_version_1_5_api = {}\n ogl_version_2_0_api = {}\n ogl_version_2_1_api = {}\n ogl_version_3_0_api = {}\n ogl_version_3_1_api = {}\n ogl_version_3_2_api = {}\n ogl_version_3_3_api = {}\n ogl_version_4_0_api = {}\n ogl_version_4_1_api = {}\n ogl_version_4_2_api = {}\n ogl_version_4_3_api = {}\n ogl_version_4_4_api = {}\n\n api_dicts = [ocl_version_1_0_api, ocl_version_1_1_api, ocl_version_1_2_api, ogl_version_1_0_api,\n ogl_version_1_1_api, ogl_version_1_2_api, ogl_version_1_3_api, ogl_version_1_4_api,\n ogl_version_1_5_api, ogl_version_2_0_api, ogl_version_2_1_api, ogl_version_3_0_api,\n ogl_version_3_1_api, ogl_version_3_2_api, ogl_version_3_3_api, ogl_version_4_0_api,\n ogl_version_4_1_api, ogl_version_4_2_api, ogl_version_4_3_api, ogl_version_4_4_api]\n\n for i in range(0, len(api_names)):\n create_api_functions_dict(api_names[i], api_dicts[i], lines)\n\n for i in range(0, len(api_dicts)):\n # testing support for current API functions\n for name, is_supported_info in api_dicts[i].iteritems():\n # tested function not supported\n if is_supported_info == 'NOT':\n # case failed\n log.info('From %s not supported function: %s' % (api_names[i], name))\n else:\n # case passed\n continue\n return api_dicts\n\n\ndef create_api_functions_dict(api_string, api_dict, diag_lines):\n count = 0\n for i in range(0, len(diag_lines)):\n if 'GL Extensions string:' in diag_lines[i]:\n break\n if api_string in diag_lines[i]:\n count = i + 1\n break\n while not diag_lines[count].strip() == '' and count != 0 and not '_VERSION' in diag_lines[count]:\n api_dict.update({diag_lines[count].split()[0]: diag_lines[count].split()[1]})\n count += 1\n\n\ndef parse_pixel_formats(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n pixel_formats_found = False\n pixel_formats_number = ''\n pixel_formats_content = []\n\n count = 0\n for line in lines:\n if 'pixel formats' in line:\n pixel_formats_found = True\n #print 'Found pixel formats in line: ' + str(count)\n for c in line:\n if c.isdigit():\n pixel_formats_number += c\n pixel_formats_number = int(pixel_formats_number)\n\n # acquire the number of the first line containing pixel format data\n count += 1\n break\n count += 1\n\n if not pixel_formats_found:\n log.info('Unknown number of pixel formats. Exiting..')\n f.close()\n return\n\n log.info('Found ' + str(pixel_formats_number) + ' pixel formats.')\n log.info('Parsing...')\n\n #print 'Pixel format header line: ' + str(count)\n pixel_formats_header = lines[count]\n #print pixel_formats_header\n\n count += 1\n #print 'Pixel format data start line: ' + str(count)\n #print 'Pixel format data end line: ' + str(count + pixel_formats_number)\n\n keys_list = pixel_formats_header.split('|')\n for k in range(0, len(keys_list)):\n keys_list[k] = keys_list[k].strip()\n #print keys_list[k]\n\n #extract the actual pixel formats data\n pixel_formats_list = []\n for i in range(count, count + pixel_formats_number):\n #read the line containing pixel format values from a file\n content_line = lines[i].rstrip()\n pixel_formats_content.append(content_line)\n\n #extract pixel format values from the line\n pf_values_list = content_line.split('|')\n for v in range(0, len(pf_values_list)):\n pf_values_list[v] = pf_values_list[v].strip()\n\n #create a dictionary containing key-value map of a pixel format\n pf_dict = dict(zip(keys_list, pf_values_list))\n\n #append the dictionary to the list\n pixel_formats_list.append(pf_dict)\n\n log.info('Number of parsed pixel formats: ' + str(len(pixel_formats_content)))\n f.close()\n return pixel_formats_list\n\n\ndef parse_csv(filename):\n pixel_formats_list = []\n for csv_line in [line.strip() for line in open(filename, 'r')]:\n if 'ID' in csv_line:\n keys_list = csv_line.split(',')\n else:\n pixel_formats_list.append(dict(zip(keys_list, csv_line.split(','))))\n log.info('Number of parsed pixel formats: ' + str(len(pixel_formats_list)))\n return pixel_formats_list\n\n\nclass ExtensionGuardTool(wrapper.ExtendedTestTool):\n \"\"\"\n Extension guard wrapper\n \"\"\"\n TEST_TOOL_NAME = \"extension_guard\"\n TEST_LOGS_DIR = \"test_logs\"\n TEST_TOOL_DIR = \"tool\"\n RERUN_TIMEOUTS = False\n REMOVE_CASES = False\n FILES_TO_COPY = []\n FORCE_KILL = False\n FORCE_FLUSH = False\n\n def compare_pixel_formats(self, pattern_pixel_formats_list, tested_pixel_formats_list):\n\n pattern_pf_ids = []\n tested_pf_ids = []\n\n missing_pixel_formats = []\n additional_pixel_formats = []\n\n result = \"\"\n\n #extract a list of pixel formats IDs\n for pattern_pf in pattern_pixel_formats_list:\n pattern_pf_ids.append(pattern_pf[\"ID\"])\n for tested_pf in tested_pixel_formats_list:\n tested_pf_ids.append(tested_pf[\"ID\"])\n\n #find missing and additional pixel_formats_ids\n if not set(pattern_pf_ids) == set(tested_pf_ids):\n\n missing_pixel_formats_ids = set(pattern_pf_ids) - set(tested_pf_ids)\n if not len(missing_pixel_formats_ids) == 0:\n for missing_id in missing_pixel_formats_ids:\n missing_pixel_formats.append(item for item in pattern_pixel_formats_list if item[\"ID\"] == missing_id)\n result += 'Missing pixel formats IDs: ' + str(missing_id) + \"\\n\"\n\n additional_pixel_formats_ids = set(tested_pf_ids) - set(pattern_pf_ids)\n if not len(additional_pixel_formats_ids) == 0:\n for additional_id in additional_pixel_formats_ids:\n additional_pixel_formats.append(item for item in tested_pixel_formats_list\n if item[\"ID\"] == additional_id)\n result += 'Additional pixel formats IDs: ' + str(additional_id) + \"\\n\"\n\n #if there are no differences check if the order is the same\n #it is assumed that there are no duplicates in pixel formats lists\n else:\n for i in range(0, len(pattern_pf_ids)):\n if not pattern_pf_ids[i] == tested_pf_ids[i]:\n result += \"Pixel formats are in different order.\" + \"\\n\"\n\n shared_pixel_formats_ids = set(pattern_pf_ids) & set(tested_pf_ids)\n\n #check if the values of shared pixel formats are the same\n for pf_id in shared_pixel_formats_ids:\n tested_dict = (item for item in tested_pixel_formats_list if item[\"ID\"] == pf_id).next()\n pattern_dict = (item for item in pattern_pixel_formats_list if item[\"ID\"] == pf_id).next()\n\n #check whether the values corresponding to keys are equal for both pixel formats\n res = {\"status\": consts.TC_RESULT_PASS, \"comment\": \"\"}\n for key in list(pattern_dict.keys()):\n if not pattern_dict[key] == tested_dict[key]:\n log.info('Fail in pixel format ID = ' + pf_id + '. Key ' + key +\n ': tested = ' + tested_dict[key] + ', reference = ' + pattern_dict[key])\n result += 'Fail in pixel format ID = ' + pf_id + '. Key ' + key + \\\n ': tested = ' + tested_dict[key] + ', reference = ' + pattern_dict[key] + '\\n'\n res[\"comment\"] += 'Fail in pixel format ID = ' + pf_id + '. Key ' + key + \\\n ': tested = ' + tested_dict[key] + ', reference = ' + pattern_dict[key] + '\\n'\n if res[\"comment\"] != \"\":\n res[\"status\"] = consts.TC_RESULT_FAIL\n res[\"cmd_line\"] = self.cmd_line\n self.update_res(str(pf_id), **res)\n return result\n\n def update_res(self, testcase, **result):\n result[\"time\"] = \"0.001\"\n if self.testcase_allowed(self.TEST_TOOL_NAME + \".\" + testcase):\n self.update_result(testcase, **result)\n else:\n if not self.REMOVE_CASES:\n result[\"status\"] = consts.TC_RESULT_DISB\n self.update_result(testcase, **result)\n log.info(\"%s blocked by white/blacklist settings\" % (self.TEST_TOOL_NAME + \".\" + testcase))\n\n def testcase_allowed(self, testcase):\n \"\"\" Function returns True if given testsuite/testcase combination is allowed by Berta black/whitelist settings \"\"\"\n return len(self.get_limited_tc_list([testcase])) > 0\n\n def get_oglc(self):\n \"\"\" Function getting version of oglconform to be used in tests \"\"\"\n if self.get_arch() == \"android_arm\":\n return \"oglconform_and_arm\"\n if self.get_arch() == \"android_x86\":\n return \"oglconform\"\n\n execSuffix = \"\"\n if platform.system() == \"Windows\":\n execSuffix = \".exe\"\n if \"64\" in os.environ[\"BERTA_OS\"] and checks.get_distro() != \"Android\":\n return \"oglconform\" + execSuffix\n else:\n return \"oglconform\" + execSuffix\n\n def get_arch(self):\n if checks.get_distro() == \"Android\":\n output, ret = exec_with_timeout(\"cat /proc/cpuinfo\", 10, shell=True)\n if not ret:\n if \"ARM\" in output:\n return \"android_arm\"\n else:\n return \"android_x86\"\n else:\n if platform.system() == \"Windows\":\n if \"64\" in os.environ[\"BERTA_OS\"]:\n return \"win_x64\"\n else:\n return \"win_x86\"\n else:\n if platform.system() == \"Linux\":\n if \"64\" in os.environ[\"BERTA_OS\"]:\n return \"lnx_x64\"\n else:\n return \"lnx_x86\"\n else:\n if \"64\" in os.environ[\"BERTA_OS\"]:\n return \"mac_x64\"\n else:\n return \"mac_x86\"\n\n def __init__(self, scen_file, tool_path, task_id, bld_path, conf_file=None):\n wrapper.ExtendedTestTool.__init__(self, scen_file, tool_path, task_id, bld_path, conf_file)\n self.scenario = []\n self.detected_gl_extensions = {}\n self.detected_egl_extensions = {}\n self.detected_caps = {}\n self.expected_values = {}\n self.extensions_file = None\n self.pixelformats_file = None\n self.pixelformats_csv = None\n self.caps_file = None\n self.functions = False\n self.tooldir = \"\"\n self.oglc = \"\"\n self.configs_to_run = []\n\n def prepare_tool(self):\n\n log.info(\"Loading file_obj with scenario names: %s\", self.scen_file)\n with open(self.scen_file, \"r\") as file_obj:\n files_data = file_obj.read().splitlines()\n\n run_parameters = [x.strip() for x in files_data[0].split(\" \")]\n if \"--runBuild\" in run_parameters:\n self.oglconform_dir = self.bld_path\n run_parameters.remove(\"--runBuild\")\n else:\n self.oglconform_dir = self.tool_path + os.sep + \"bin\"\n\n scenario_dir = self.tool_path + os.sep + \"scenarios\" + os.sep\n\n if \"--functions\" in run_parameters:\n run_parameters.remove(\"--functions\")\n self.functions = True\n\n if \"--removeCases\" in run_parameters:\n run_parameters.remove(\"--removeCases\")\n self.REMOVE_CASES = True\n\n if \"--pixelformat\" in run_parameters:\n run_parameters.remove(\"--pixelformat\")\n self.pixelformats_file = scenario_dir + run_parameters[0]\n\n if \"--cap\" in run_parameters:\n run_parameters.remove(\"--cap\")\n self.caps_file = scenario_dir + run_parameters[0]\n\n if \"--ext\" in run_parameters:\n run_parameters.remove(\"--ext\")\n self.extensions_file = scenario_dir + run_parameters[0]\n\n if platform.system() == \"Windows\":\n self.tooldir = os.environ['SYSTEMDRIVE'] + \"\\\\\" + self.TEST_TOOL_DIR\n\n else:\n self.tooldir = \"/data/local/tmp/\" + self.TEST_TOOL_DIR\n\n if os.path.isdir(self.tooldir) and self.TOOLDIR_CLEANUP:\n # remove used test binaries\n log.info(\"Removing local tool directory: %s\", self.tooldir)\n try:\n rm_tree(self.tooldir)\n except(IOError) as why:\n log.error(\"Failed to remove local tool directory: %s\", why)\n\n if not os.path.isdir(self.tooldir):\n # copy test binaries to current directory here\n log.info(\"Copying tools from %s to %s\", self.tool_path + os.sep + \"bin\", self.tooldir)\n try:\n shutil.copytree(self.tool_path + os.sep + \"bin\", self.tooldir)\n except (IOError) as why:\n log.error(\"Failed to copy tool file_obj: %s\", str(why))\n log.error(\"self.tooldir=%s, tool_path=%s\\n\", self.tooldir, self.tool_path)\n exit()\n\n else:\n log.warning(\"Test tool directory %s already exists.\", self.tooldir)\n\n if not os.path.isdir(self.tooldir + os.sep + self.TEST_LOGS_DIR):\n os.mkdir(self.tooldir + os.sep + self.TEST_LOGS_DIR)\n\n if not os.path.isdir(os.getcwd() + os.sep + self.TEST_LOGS_DIR):\n os.mkdir(os.getcwd() + os.sep + self.TEST_LOGS_DIR)\n\n self.oglc = join(self.tooldir, self.get_oglc())\n\n log.info(\" Copying oglconform build from %s to %s.\", join(self.oglconform_dir, self.get_arch(), self.get_oglc()), self.oglc)\n if platform.system() == \"Windows\":\n exec_with_timeout(\"copy \" + join(self.oglconform_dir, self.get_arch(), self.get_oglc()) + \" \" + self.oglc, 5 * 60, shell=True, tracing=True,\n cwd=os.getcwd())\n else:\n exec_with_timeout(\"cp \" + join(self.oglconform_dir, self.get_arch(), self.get_oglc()) + \" \" + self.oglc, 5 * 60, shell=True, tracing=True,\n cwd=os.getcwd())\n exec_with_timeout(\"chmod -R 755 \" + self.oglc, 5 * 60, shell=True, tracing=True, cwd=os.getcwd())\n\n def prepare_scenario(self):\n \"\"\" Reads scenario file_obj and creates test cases list \"\"\"\n\n log.info(\"Preparing scenarios\")\n\n mappings = open(os.path.join(self.tool_path, \"scenarios\", \"configs.txt\")).read().splitlines()\n self.configs_to_run = {}\n for mapping in mappings:\n tokens = mapping.split(\",\")\n self.configs_to_run[tokens[0]] = []\n self.configs_to_run[tokens[0]].extend(tokens[1:])\n\n self.detected_gl_extensions[tokens[0]] = []\n self.detected_egl_extensions[tokens[0]] = []\n self.expected_values[tokens[0]] = {}\n\n expected_configs = set()\n\n current_config = None\n if self.caps_file:\n for caps_line in [line.strip() for line in open(self.caps_file, \"r\")]:\n log.info(caps_line)\n if caps_line.startswith(\"#\"):\n continue\n\n if \":\" in caps_line:\n current_config = caps_line[:-1]\n expected_configs.update([current_config])\n\n if \"=\" in caps_line:\n if \"caps\" not in self.expected_values[current_config]:\n self.expected_values[current_config][\"caps\"] = []\n self.expected_values[current_config][\"caps\"].append(caps_line.strip())\n\n if self.extensions_file:\n for csv_line in [line.strip() for line in open(self.extensions_file, \"r\")]:\n csv_contents = csv_line.split(\";\")\n\n if len(csv_contents) != 2:\n continue\n\n if csv_contents[1] == \"\":\n continue\n\n ext_name = csv_contents[0]\n if ext_name.strip().startswith(\"#\"):\n continue\n\n configs = csv_contents[1].split(\",\")\n expected_configs.update(configs)\n\n for config in configs:\n if config == \"\":\n continue\n if config not in self.configs_to_run:\n continue\n if \"extensions\" not in self.expected_values[config]:\n self.expected_values[config][\"extensions\"] = []\n self.expected_values[config][\"extensions\"].append(ext_name)\n\n keys = self.configs_to_run.keys()\n for k in keys:\n if k not in expected_configs:\n del self.configs_to_run[k]\n\n if self.pixelformats_file or self.functions:\n if checks.is_windows():\n self.configs_to_run[\"wgl\"] = [\"\", \"\"]\n if checks.is_android():\n self.configs_to_run[\"es31\"] = [\" -es3 -ctx es3.1\", \"\"]\n\n if self.pixelformats_file:\n self.pixelformats_csv = parse_csv(self.pixelformats_file)\n self.scenario.append((\"pixelformats\", 120, \"\", False))\n\n self.diag_lines = {}\n self.diag = {}\n for config in self.configs_to_run.keys():\n if platform.system() == \"Windows\":\n try:\n exec_with_timeout(\n self.get_oglc() + \" -diag \" + self.configs_to_run[config][1] + \" > \" + self.TEST_LOGS_DIR + os.sep + \"diag-\" + config + \".log \",\n 60 * 5,\n shell=True,\n tracing=True,\n cwd=self.tooldir\n )\n except IndexError:\n log.exception(\"No Windows commandline found for config %s\" % config)\n else:\n try:\n exec_with_timeout(\n \"./\" + self.get_oglc() + \" -diag \" + self.configs_to_run[config][0] + \" > \" + self.TEST_LOGS_DIR + os.sep + \"diag-\" + config + \".log \",\n 60 * 5,\n shell=True,\n tracing=True,\n cwd=self.tooldir)\n except IndexError:\n log.exception(\"No Android commandline found for config %s\" % config)\n\n shutil.copy2(self.tooldir + os.sep + self.TEST_LOGS_DIR + os.sep + \"diag-\" + config + \".log\", os.getcwd())\n shutil.copy2(self.tooldir + os.sep + self.TEST_LOGS_DIR + os.sep + \"diag-\" + config + \".log\",\n os.getcwd() + os.sep + self.TEST_LOGS_DIR + os.sep + \"diag-\" + config + \".log\")\n self.diag_lines[config] = [l.strip() for l in open(self.tooldir + os.sep + self.TEST_LOGS_DIR + os.sep + \"diag-\" + config + \".log\", \"r\")\n if not l.strip().startswith(\"#\")]\n self.diag[config] = open(self.tooldir + os.sep + self.TEST_LOGS_DIR + os.sep + \"diag-\" + config + \".log\", \"r\").read()\n for line in self.diag_lines[config]:\n if \"OGLconform will exit.\" in line:\n del self.configs_to_run[config]\n continue\n\n for line in self.diag_lines[config]:\n if line.startswith(\"GL Extensions string\"):\n gl_extensions = (line.strip().split(\": \")[1]).split(\" \")\n self.detected_gl_extensions[config] = gl_extensions\n if line.startswith(\"EGL Extensions string\"):\n egl_extensions = (line.strip().split(\": \")[1]).split(\" \")\n self.detected_egl_extensions[config] = egl_extensions\n\n caps_text = open(self.tooldir + os.sep + self.TEST_LOGS_DIR + os.sep + \"diag-\" + config + \".log\", \"r\").read()\n caps_text = re.search(r\"(?:CAPABILITIES)(.*?)\\n{2}\", caps_text, re.S).group(0)\n\n self.detected_caps[config] = [line.strip() for line in caps_text.split(\"\\n\") if \"=\" in line]\n\n self.scenario = []\n\n def run_testing(self):\n detected_gl_flatlist = {}\n for config in self.detected_gl_extensions.keys():\n detected_gl_flatlist[config] = []\n for extension in self.detected_gl_extensions[config]:\n detected_gl_flatlist[config].append(extension)\n\n detected_egl_flatlist = {}\n for config in self.detected_egl_extensions.keys():\n detected_egl_flatlist[config] = []\n for extension in self.detected_egl_extensions[config]:\n detected_egl_flatlist[config].append(extension)\n\n for config in self.configs_to_run.keys():\n if platform.system() == \"Windows\":\n self.cmd_line = self.get_oglc() + \" -diag \" + self.configs_to_run[config][1]\n else:\n self.cmd_line = self.get_oglc() + \" -diag \" + self.configs_to_run[config][0]\n\n if self.extensions_file:\n for ext in self.expected_values[config][\"extensions\"]:\n result = {\"status\": consts.TC_RESULT_PASS}\n\n in_extension_string = False\n\n in_extension_string |= (\"egl\" not in config and ext in detected_gl_flatlist[config])\n in_extension_string |= (\"egl\" in config and ext in detected_egl_flatlist[config])\n\n if not in_extension_string:\n result[\"status\"] = consts.TC_RESULT_FAIL\n result[\"comment\"] = \"Expected extension is not reported\"\n result[\"cmd_line\"] = self.cmd_line\n\n wstring = \"WARNING: Extension %s is reported but its API is NOT COMPLETE.\" % (ext)\n if wstring in self.diag_lines[config]:\n result[\"status\"] = consts.TC_RESULT_FAIL\n result[\"comment\"] = wstring\n result[\"cmd_line\"] = self.cmd_line\n\n self.update_res(config + \".extension.\" + ext, **result)\n\n unexpected_extensions_result = {\"status\": consts.TC_RESULT_PASS}\n\n differences = []\n if config.startswith(\"egl\"):\n differences = (list(set(self.detected_egl_extensions[config]) - set(self.expected_values[config][\"extensions\"])))\n else:\n differences = (list(set(self.detected_gl_extensions[config]) - set(self.expected_values[config][\"extensions\"])))\n\n if len(differences) > 0:\n log_path = config + \"-unexpected.log\"\n unexpected_extensions_result[\"status\"] = consts.TC_RESULT_FAIL\n unexpected_extensions_result[\"comment\"] = str(len(differences)) + \" extensions were found in driver ext string but weren't expected. \" + \\\n \"List of extensions: \" + 'log'\n unexpected_extensions_result[\"cmd_line\"] = self.cmd_line\n log.warning(\"Extensions found in driver extension string but weren't expected (config \" + config + \"): \\n\" + str(differences))\n with open(os.getcwd() + os.sep + log_path, 'w') as file_obj:\n file_obj.write(\"Extensions found in driver extension string but weren't expected (config \" + config + \"): \\n\" + \"\\n\".join(differences))\n\n self.update_res(config + \".extension.\" + UNEXPECTED, **unexpected_extensions_result)\n\n if self.caps_file:\n for cap in self.expected_values[config][\"caps\"]:\n cap_result = {\"status\": consts.TC_RESULT_PASS}\n stripped_cap = cap.strip()\n\n if stripped_cap.endswith(\"*\"):\n stripped_cap = re.escape(stripped_cap[:-1]) + \".*\"\n else:\n stripped_cap = re.escape(stripped_cap)\n cap_regex = re.compile(stripped_cap)\n log.info(\"regex: %s\" % (stripped_cap))\n if not any(cap_regex.match(detected_cap) for detected_cap in self.detected_caps[config]):\n cap_result[\"status\"] = consts.TC_RESULT_FAIL\n cap_result[\"comment\"] = \"Capability not found or incorrect value. Diag log: \" + 'log'\n cap_result[\"cmd_line\"] = self.cmd_line\n else:\n cap_result[\"comment\"] = \"ok\"\n\n self.update_res(config + \".capability.\" + cap.split(\"=\")[0].strip(), **cap_result)\n\n if self.pixelformats_csv:\n parsed_pixel_formats_list_pattern = parse_pixel_formats(self.tooldir + os.sep + self.TEST_LOGS_DIR + os.sep + \"diag-\" + config + \".log\")\n log.info(\"Comparing pixel formats\")\n res = self.compare_pixel_formats(self.pixelformats_csv, parsed_pixel_formats_list_pattern)\n if res == \"\":\n result = {\"status\": consts.TC_RESULT_PASS}\n else:\n result = {\"status\": consts.TC_RESULT_FAIL, \"cmd_line\": self.cmd_line, \"comment\": res}\n self.update_res(\"pixelformats\", **result)\n\n if self.functions:\n api_dicts = parse_api_functions(self.tooldir + os.sep + self.TEST_LOGS_DIR + os.sep + \"diag-\" + config + \".log\")\n for i in range(0, len(api_dicts)):\n function_dict = api_dicts[i]\n api = api_names[i]\n for function_name, function_support in function_dict.items():\n if \"supported\" in function_support:\n result = {\"status\": consts.TC_RESULT_PASS, \"comment\": \"supported\"}\n else:\n result = {\"status\": consts.TC_RESULT_FAIL, \"cmd_line\": self.cmd_line, \"comment\": \"not supported\"}\n self.update_res(api + \".\" + function_name, **result)\n\n if platform.system() == \"Windows\":\n exec_with_timeout(\"del \" + self.oglc, 5 * 60, shell=True, tracing=True, cwd=os.getcwd())\n else:\n exec_with_timeout(\"rm \" + self.oglc, 5 * 60, shell=True, tracing=True, cwd=os.getcwd())\n\nif __name__ == \"__main__\":\n wrapper.run(ExtensionGuardTool)\n", "sub_path": "berta/berta/wrappers/extension_guard.py", "file_name": "extension_guard.py", "file_ext": "py", "file_size_in_byte": 29182, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "wrappers.wrapper.ExtendedTestTool", "line_number": 179, "usage_type": "attribute"}, {"api_name": "wrappers.wrapper", "line_number": 179, "usage_type": "name"}, {"api_name": "consts.TC_RESULT_PASS", "line_number": 239, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_FAIL", "line_number": 249, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_DISB", "line_number": 260, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 276, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 278, "usage_type": "attribute"}, {"api_name": "checks.get_distro", "line_number": 278, "usage_type": "call"}, {"api_name": "checks.get_distro", "line_number": 284, "usage_type": "call"}, {"api_name": "utils.exec_with_timeout", "line_number": 285, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 292, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 293, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 298, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 299, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 304, "usage_type": "attribute"}, {"api_name": "wrappers.wrapper.ExtendedTestTool.__init__", "line_number": 310, "usage_type": "call"}, {"api_name": "wrappers.wrapper.ExtendedTestTool", "line_number": 310, "usage_type": "attribute"}, {"api_name": "wrappers.wrapper", "line_number": 310, "usage_type": "name"}, {"api_name": "os.sep", "line_number": 336, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 338, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 360, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 361, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 366, "usage_type": "call"}, {"api_name": "os.path", "line_number": 366, "usage_type": "attribute"}, {"api_name": "utils.rm_tree", "line_number": 370, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 374, "usage_type": "call"}, {"api_name": "os.path", "line_number": 374, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 376, "usage_type": "attribute"}, {"api_name": "shutil.copytree", "line_number": 378, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 378, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 387, "usage_type": "call"}, {"api_name": "os.path", "line_number": 387, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 387, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 388, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 388, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 390, "usage_type": "call"}, {"api_name": "os.path", "line_number": 390, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 390, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 390, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 391, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 391, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 391, "usage_type": "attribute"}, {"api_name": "utils.join", "line_number": 393, "usage_type": "call"}, {"api_name": "utils.join", "line_number": 395, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 396, "usage_type": "call"}, {"api_name": "utils.exec_with_timeout", "line_number": 397, "usage_type": "call"}, {"api_name": "utils.join", "line_number": 397, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 398, "usage_type": "call"}, {"api_name": "utils.exec_with_timeout", "line_number": 400, "usage_type": "call"}, {"api_name": "utils.join", "line_number": 400, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 401, "usage_type": "call"}, {"api_name": "utils.exec_with_timeout", "line_number": 402, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 402, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 409, "usage_type": "call"}, {"api_name": "os.path", "line_number": 409, "usage_type": "attribute"}, {"api_name": "checks.is_windows", "line_number": 470, "usage_type": "call"}, {"api_name": "checks.is_android", "line_number": 472, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 482, "usage_type": "call"}, {"api_name": "utils.exec_with_timeout", "line_number": 484, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 485, "usage_type": "attribute"}, {"api_name": "utils.exec_with_timeout", "line_number": 495, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 496, "usage_type": "attribute"}, {"api_name": "shutil.copy2", "line_number": 504, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 504, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 504, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 505, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 505, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 506, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 506, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 507, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 509, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 523, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 524, "usage_type": "call"}, {"api_name": "re.S", "line_number": 524, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 544, "usage_type": "call"}, {"api_name": "consts.TC_RESULT_PASS", "line_number": 551, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_FAIL", "line_number": 559, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_FAIL", "line_number": 565, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_PASS", "line_number": 571, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_FAIL", "line_number": 581, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 586, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 586, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_PASS", "line_number": 593, "usage_type": "attribute"}, {"api_name": "re.escape", "line_number": 597, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 599, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 600, "usage_type": "call"}, {"api_name": "consts.TC_RESULT_FAIL", "line_number": 603, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 613, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_PASS", "line_number": 617, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_FAIL", "line_number": 619, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 623, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_PASS", "line_number": 629, "usage_type": "attribute"}, {"api_name": "consts.TC_RESULT_FAIL", "line_number": 631, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 634, "usage_type": "call"}, {"api_name": "utils.exec_with_timeout", "line_number": 635, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 635, "usage_type": "call"}, {"api_name": "utils.exec_with_timeout", "line_number": 637, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 637, "usage_type": "call"}, {"api_name": "wrappers.wrapper.run", "line_number": 640, "usage_type": "call"}, {"api_name": "wrappers.wrapper", "line_number": 640, "usage_type": "name"}]} +{"seq_id": "1829250", "text": "\n#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n\nfrom argparse import ArgumentParser\n\nclass InputParser:\n \"\"\"\n This class is used to define behaviour based on user input.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes default parsers.\n \"\"\"\n self.__parser = ArgumentParser()\n subparsers = self.__parser.add_subparsers(dest='command')\n self.__add_version_parser(subparsers)\n self.__add_status_parser(subparsers)\n self.__add_flow_parser(subparsers)\n\n\n def parse(self):\n result = self.__parser.parse_args()\n if result.command == 'version':\n print('version')\n return\n\n if result.command == 'status':\n print('status')\n return\n\n if result.command == 'flow':\n print('flow')\n return\n\n\n def __add_version_parser(self, parser: ArgumentParser):\n _ = parser.add_parser('version',\n help='Shows current version.')\n\n\n def __add_status_parser(self, parser: ArgumentParser):\n _ = parser.add_parser('status',\n help='Shows current project status.')\n\n\n def __add_flow_parser(self, parser: ArgumentParser):\n push_parser = parser.add_parser('flow',\n help='It starts a process of pre-deployment for current changes.')\n push_parser.add_argument('-b',\n action='store_true',\n help='Increments current build number for all targets.')\n push_parser.add_argument('-p',\n action='store_true',\n help='Pushes changes to remote repository.')\n push_parser.add_argument('-t',\n action='store_true',\n help='Adds tag to current commit.')\n push_parser.add_argument('-v',\n action='store',\n help='Updates application version to provided one.',\n type=str)\n", "sub_path": "juicer/utilities/argparser.py", "file_name": "argparser.py", "file_ext": "py", "file_size_in_byte": 2120, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 38, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 43, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "509873589", "text": "import numpy as np\nimport cv2\n\ncolor = cv2.imread(\"butterfly.jpg\", 1)\n\ngray = cv2.cvtColor(color, cv2.COLOR_RGB2GRAY) # 转到灰度空间\ncv2.imshow(\"gray\", gray)\n\nb = color[:, :, 0]\ng = color[:, :, 1]\nr = color[:, :, 2]\n\nrgba = cv2.merge((b, g, r, g))\ncv2.imwrite(\"rgba.png\", rgba)\n\ncv2.waitKey(0)\n", "sub_path": "code/basic/channel.py", "file_name": "channel.py", "file_ext": "py", "file_size_in_byte": 301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "596714052", "text": "import pafy;\n\n\nmyvid = pafy.new(\"https://www.youtube.com/watch?v=Z9aRmmWX5XI\");\nstreams = myvid.streams;\n\n#for s in streams:\n#\tprint(s.extension, s.get_filesize(), s.url);\n\nbest = myvid.getbest(\"webm\");\ndl = best.download();\n\nprint(dl);\nx = input(\"\\npress enter to continue\");", "sub_path": "src/vidDL.py", "file_name": "vidDL.py", "file_ext": "py", "file_size_in_byte": 276, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pafy.new", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "572813907", "text": "# -*- coding: utf-8 -*-\n\nimport mock\n\nfrom django.test import TestCase\n\nfrom hikers.tests.factories import HikerFactory\n\nfrom equipment.models import Equipment\nfrom equipment.tests.factories import EquipmentFactory\n\n\nclass EquipmentModelTests(TestCase):\n\n def setUp(self): # noqa\n self.equipment = EquipmentFactory()\n\n def test_equipment_unicode(self):\n self.assertIsInstance(self.equipment, Equipment)\n self.assertIn(self.equipment.recommended_gear,\n self.equipment.__unicode__())\n self.assertIn(self.equipment.gear_type,\n self.equipment.__unicode__())\n\n @mock.patch('equipment.models.deleted_hiker_fallback')\n def test_equipment_save(self, mock_deleted_hiker_fallback):\n added = HikerFactory()\n empty = HikerFactory()\n mock_deleted_hiker_fallback.return_value = empty\n equipment = EquipmentFactory(added_by=added)\n equipment.save()\n self.assertFalse(mock_deleted_hiker_fallback.called)\n\n equipment.added_by = None\n equipment.save()\n self.assertTrue(mock_deleted_hiker_fallback.called)\n", "sub_path": "equipment/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 1133, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.test.TestCase", "line_number": 13, "usage_type": "name"}, {"api_name": "equipment.tests.factories.EquipmentFactory", "line_number": 16, "usage_type": "call"}, {"api_name": "equipment.models.Equipment", "line_number": 19, "usage_type": "argument"}, {"api_name": "hikers.tests.factories.HikerFactory", "line_number": 27, "usage_type": "call"}, {"api_name": "hikers.tests.factories.HikerFactory", "line_number": 28, "usage_type": "call"}, {"api_name": "equipment.models", "line_number": 30, "usage_type": "name"}, {"api_name": "equipment.tests.factories.EquipmentFactory", "line_number": 30, "usage_type": "call"}, {"api_name": "equipment.models.save", "line_number": 31, "usage_type": "call"}, {"api_name": "equipment.models", "line_number": 31, "usage_type": "name"}, {"api_name": "equipment.models.added_by", "line_number": 34, "usage_type": "attribute"}, {"api_name": "equipment.models", "line_number": 34, "usage_type": "name"}, {"api_name": "equipment.models.save", "line_number": 35, "usage_type": "call"}, {"api_name": "equipment.models", "line_number": 35, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "252615145", "text": "\"\"\"\nFunctionallity for testing implementations and trained models against some\nstandard benchmarks\n\"\"\"\nimport gym\nimport time\nimport tensorflow as tf\nimport rlalgs.utils.logger as logger\nimport rlalgs.tester.utils as testutils\nimport rlalgs.utils.preprocess as preprocess\n\n# Just disables the warning, doesn't enable AVX/FMA\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\ndef run_episode(sess, env, x, pi, render, preprocess_fn):\n \"\"\"\n Runs a single episode of the given environment for a model\n\n Arguments:\n sess : the tensorflow session\n env : the gym environment\n x : the policy model input tf placeholder\n pi : the policy model output tf placeholder\n\n Returns:\n epRew : total reward for episode\n \"\"\"\n epRew = 0\n o, r, d = env.reset(), 0, False\n t = 0\n while not d:\n if render:\n env.render()\n time.sleep(0.01)\n o = preprocess_fn(o, env)\n a = sess.run(pi, {x: o.reshape(1, -1)})\n try:\n a_processed = a[0]\n except IndexError:\n # some algs return action directly (i.e. argmax(Q-val) for Q-learning)\n a_processed = a\n o, r, d, _ = env.step(a_processed)\n epRew += r\n t += 1\n return epRew, t\n\n\ndef load_model(fpath):\n \"\"\"\n Load a trained model from file\n\n Arguments:\n fpath : path to model directory\n\n Returns:\n sess : tensorflow sess\n x : the policy model input tf placeholder\n pi : the policy model output tf placeholder\n \"\"\"\n sess = tf.Session()\n model_vars = logger.restore_model(sess, args.fpath)\n x = model_vars[\"inputs\"][logger.OBS_NAME]\n pi = model_vars[\"outputs\"][logger.ACTS_NAME]\n return sess, x, pi\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"fpath\", metavar='fpath', type=str,\n help=\"saved model directory name (i.e. the simple_save folder)\")\n parser.add_argument(\"--trials\", type=int, default=100)\n parser.add_argument(\"--render\", action=\"store_true\")\n args = parser.parse_args()\n\n env_name = logger.get_env_name(args.fpath)\n trials, reward = testutils.get_benchmark(env_name)\n if trials is None or reward is None:\n print(\"No benchmark found for {}, please see tests.md for a list of supported envs\"\n .format(env_name))\n trials = args.trials\n print(\"Running for {} trials\".format(trials))\n env = gym.make(env_name)\n\n sess, x, pi = load_model(args.fpath)\n preprocess_fn, _ = preprocess.get_preprocess_fn(env_name)\n\n total_rew = 0\n for i in range(trials):\n ep_rew, t = run_episode(sess, env, x, pi, args.render, preprocess_fn)\n print(\"Trial {}: \\t total reward = {}, total steps = {}\".format(i, ep_rew, t))\n total_rew += ep_rew\n\n print(\"-\" * 20, \"\\n\")\n print(\"Test finished\")\n print(\"Average reward over {} trials = {}\".format(trials, total_rew / trials))\n if reward is not None:\n print(\"Benchmark reward = {}\\n\".format(reward))\n if total_rew / trials > reward:\n print(\"Benchmark passed\\n\")\n else:\n print(\"Benchmark failed\\n\")\n print(\"-\" * 20, \"\\n\")\n", "sub_path": "rlalgs/tester/tester.py", "file_name": "tester.py", "file_ext": "py", "file_size_in_byte": 3233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 62, "usage_type": "call"}, {"api_name": "rlalgs.utils.logger.restore_model", "line_number": 63, "usage_type": "call"}, {"api_name": "rlalgs.utils.logger", "line_number": 63, "usage_type": "name"}, {"api_name": "rlalgs.utils.logger.OBS_NAME", "line_number": 64, "usage_type": "attribute"}, {"api_name": "rlalgs.utils.logger", "line_number": 64, "usage_type": "name"}, {"api_name": "rlalgs.utils.logger.ACTS_NAME", "line_number": 65, "usage_type": "attribute"}, {"api_name": "rlalgs.utils.logger", "line_number": 65, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 71, "usage_type": "call"}, {"api_name": "rlalgs.utils.logger.get_env_name", "line_number": 78, "usage_type": "call"}, {"api_name": "rlalgs.utils.logger", "line_number": 78, "usage_type": "name"}, {"api_name": "rlalgs.tester.utils.get_benchmark", "line_number": 79, "usage_type": "call"}, {"api_name": "rlalgs.tester.utils", "line_number": 79, "usage_type": "name"}, {"api_name": "gym.make", "line_number": 85, "usage_type": "call"}, {"api_name": "rlalgs.utils.preprocess.get_preprocess_fn", "line_number": 88, "usage_type": "call"}, {"api_name": "rlalgs.utils.preprocess", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "69490053", "text": "import os\nimport re\nfrom datetime import datetime\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext.webapp import util\nfrom django.utils import simplejson\nfrom model import get_current_youtify_user_model\nfrom model import create_youtify_user_model\nfrom model import get_youtify_user_struct\nfrom model import get_followers_for_youtify_user_model\nfrom model import get_followings_for_youtify_user_model\nfrom model import get_settings_struct_for_youtify_user_model\nfrom model import generate_device_token\nfrom languages import auto_detect_language\nfrom snapshots import get_deployed_translations_struct\nfrom languages import get_languages\ntry:\n import config\nexcept ImportError:\n import config_template as config\n\nclass NotFoundHandler(webapp.RequestHandler):\n\n def get(self):\n self.response.set_status(404)\n self.response.out.write(\"404 Not found\")\n\nclass MainHandler(webapp.RequestHandler):\n\n def get(self):\n # Find videotag and generate open graph meta tags\n match = re.compile(r'tracks/youtube/(.*)').search(self.request.url)\n if match: \n og_tag = ''\n else:\n og_tag = ''\n\n # TODO add og_tag for SoundCloud & Official.fm tracks\n\n path = os.path.join(os.path.dirname(__file__), 'html', 'index.html')\n self.response.headers['Content-Type'] = 'text/html; charset=utf-8';\n self.response.out.write(template.render(path, {\n 'CURRENT_VERSION_ID': os.environ['CURRENT_VERSION_ID'],\n 'USE_PRODUCTION_JAVASCRIPT': config.ON_PRODUCTION,\n 'INCLUDE_GOOGLE_ANALYTICS': config.ON_PRODUCTION,\n\t\t\t'url': self.request.url,\n 'og_tag': og_tag,\n }))\n\nclass ApiMainHandler(webapp.RequestHandler):\n\n def get(self):\n my_followers_struct = []\n my_followings_struct = []\n settings_struct = {}\n youtify_user_struct = None\n\n current_user = users.get_current_user()\n youtify_user_model = get_current_youtify_user_model()\n\n if (current_user is not None) and (youtify_user_model is None):\n youtify_user_model = create_youtify_user_model()\n\n if youtify_user_model is not None:\n youtify_user_model.device = generate_device_token()\n youtify_user_model.last_login = datetime.now()\n youtify_user_struct = get_youtify_user_struct(youtify_user_model, include_private_data=True)\n\n # https://developers.google.com/appengine/docs/python/runtime#Request_Headers\n youtify_user_model.country = self.request.headers.get('X-AppEngine-Country', None)\n youtify_user_model.reqion = self.request.headers.get('X-AppEngine-Region', None)\n youtify_user_model.city = self.request.headers.get('X-AppEngine-City', None)\n youtify_user_model.latlon = self.request.headers.get('X-AppEngine-CityLatLong', None)\n\n youtify_user_model.save()\n\n my_followers_struct = get_followers_for_youtify_user_model(youtify_user_model)\n my_followings_struct = get_followings_for_youtify_user_model(youtify_user_model)\n settings_struct = get_settings_struct_for_youtify_user_model(youtify_user_model)\n\n lang_code = auto_detect_language(self.request)\n\n json = {\n 'ON_PRODUCTION': config.ON_PRODUCTION,\n 'SEARCH_STATS_URL': config.SEARCH_STATS_URL,\n 'languagesFromServer': [lang for lang in get_languages() if lang['enabled_on_site']],\n 'device': youtify_user_model is not None and youtify_user_model.device,\n 'user': youtify_user_struct,\n 'lastNotificationSeenTimestamp': youtify_user_model is not None and youtify_user_model.last_notification_seen_timestamp, \n 'myFollowers': my_followers_struct,\n 'myFollowings': my_followings_struct,\n 'settingsFromServer': settings_struct,\n 'autoDetectedLanguageByServer': lang_code,\n 'autoDetectedTranslations': get_deployed_translations_struct(lang_code),\n 'loginUrl': users.create_login_url('/'),\n 'logoutUrl': users.create_logout_url('/'),\n }\n\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(json));\n\ndef main():\n application = webapp.WSGIApplication([\n ('/api/main', ApiMainHandler),\n ('/.*\\.(?:png|ico|jpg|gif|xml|css|swf|js|yaml|py|pyc|woff|eot|svg|ttf)$', NotFoundHandler),\n ('/.*', MainHandler),\n ], debug=True)\n util.run_wsgi_app(application)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 24, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 24, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 30, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 30, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 42, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 44, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 44, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "config_template.ON_PRODUCTION", "line_number": 46, "usage_type": "attribute"}, {"api_name": "config_template.ON_PRODUCTION", "line_number": 47, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 52, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 52, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 60, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 60, "usage_type": "name"}, {"api_name": "model.get_current_youtify_user_model", "line_number": 61, "usage_type": "call"}, {"api_name": "model.create_youtify_user_model", "line_number": 64, "usage_type": "call"}, {"api_name": "model.generate_device_token", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "name"}, {"api_name": "model.get_youtify_user_struct", "line_number": 69, "usage_type": "call"}, {"api_name": "model.get_followers_for_youtify_user_model", "line_number": 79, "usage_type": "call"}, {"api_name": "model.get_followings_for_youtify_user_model", "line_number": 80, "usage_type": "call"}, {"api_name": "model.get_settings_struct_for_youtify_user_model", "line_number": 81, "usage_type": "call"}, {"api_name": "languages.auto_detect_language", "line_number": 83, "usage_type": "call"}, {"api_name": "config_template.ON_PRODUCTION", "line_number": 86, "usage_type": "attribute"}, {"api_name": "config_template.SEARCH_STATS_URL", "line_number": 87, "usage_type": "attribute"}, {"api_name": "languages.get_languages", "line_number": 88, "usage_type": "call"}, {"api_name": "snapshots.get_deployed_translations_struct", "line_number": 96, "usage_type": "call"}, {"api_name": "google.appengine.api.users.create_login_url", "line_number": 97, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 97, "usage_type": "name"}, {"api_name": "google.appengine.api.users.create_logout_url", "line_number": 98, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 98, "usage_type": "name"}, {"api_name": "django.utils.simplejson.dumps", "line_number": 102, "usage_type": "call"}, {"api_name": "django.utils.simplejson", "line_number": 102, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.WSGIApplication", "line_number": 105, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp", "line_number": 105, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.run_wsgi_app", "line_number": 110, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.util", "line_number": 110, "usage_type": "name"}]} +{"seq_id": "337308931", "text": "import datetime\nimport zipfile\nimport os\nfrom .CreatLogName import log_name\nfrom .Connection import Connection\n\n\nclass CompressedFile(object):\n\n def __init__(self, config_file_path=\"../PyFramework/LogCenter/config.ini\"):\n \"\"\"\n 一个实例化CompressedFile对象对应一个GetInfo实例化对象\n :param config_file_path:\n \"\"\"\n self.cfp = Connection(config_file_path)\n self.path = config_file_path\n\n def compressed(self):\n \"\"\"\n 压缩与删除操作\n :return:\n \"\"\"\n now_time = log_name.check_new(self.path)\n keep_day = int(self.cfp.get_info(\"compressed\", \"keep_day\")) + 1\n deal_day = datetime.datetime(int(now_time[0:4]), int(now_time[5:7]), int(now_time[8:10])) - datetime.timedelta(\n days=keep_day)\n deal_day_info = str(deal_day)[0:10] # 生成需要进行压缩的文件前缀,同时也是当天的压缩包名,以一天为一个压缩包\n log_path = self.cfp.get_info(\"log\", \"log_path\")\n for root, dirs, files in os.walk(os.getcwd() + \"/\" + log_path):\n for single in files:\n if deal_day_info + \"_\" in single:\n self.add_file(log_path + \"/\" + deal_day_info + '.zip',\n log_path + \"/\" + single)\n self.delete_file(log_path + \"/\" + single)\n\n @staticmethod\n def add_file(zip_filename=None, dirname=None):\n \"\"\"\n 添加dirname压缩入zip_filename文件中\n :param zip_filename: 目标压缩文件\n :param dirname: 目标待压缩文件\n :return:\n \"\"\"\n if os.path.isfile(dirname):\n with zipfile.ZipFile(zip_filename, 'a') as z:\n z.write(dirname)\n else:\n with zipfile.ZipFile(zip_filename, 'a') as z:\n for root, dirs, files in os.walk(dirname):\n for single_file in files:\n if single_file != zip_filename:\n filepath = os.path.join(root, single_file)\n z.write(filepath)\n\n @staticmethod\n def delete_file(filename=None):\n \"\"\"\n 删除文件\n :param filename:\n :return:\n \"\"\"\n os.remove(filename)\n", "sub_path": "LogCenter/Center/CompressedFile.py", "file_name": "CompressedFile.py", "file_ext": "py", "file_size_in_byte": 2274, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "Connection.Connection", "line_number": 15, "usage_type": "call"}, {"api_name": "CreatLogName.log_name.check_new", "line_number": 23, "usage_type": "call"}, {"api_name": "CreatLogName.log_name", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 25, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 29, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 45, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 48, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "508516320", "text": "import sys\nimport numpy as np\nimport os\nfrom scipy.integrate import simps\nfrom scipy.interpolate import interp1d\nfrom GUI_Design_EQE import *\nimport pyqtgraph as pg\nimport pandas.io.parsers\n\n\n\nclass GUIForm(QtGui.QMainWindow):\n \n \n\tdef __init__(self, master=None):\n\t\tQtGui.QMainWindow.__init__(self,master)\n\t\tself.ui = Ui_EQE_Design()\n\t\tself.ui.setupUi(self)\n \n\t\tQtCore.QObject.connect(self.ui.loadbutton, QtCore.SIGNAL('clicked()'), self.getfile) # Button to load data file\n\t\tQtCore.QObject.connect(self.ui.clearbutton, QtCore.SIGNAL('clicked()'), self.cleardata) # Button to load data file\n\t\tQtCore.QObject.connect(self.ui.EQE_button, QtCore.SIGNAL('clicked()'), self.plot_EQE) # Button to load data file\n\t\tQtCore.QObject.connect(self.ui.SR_button, QtCore.SIGNAL('clicked()'), self.plot_SR) # Button to load data file\n \n\t\tself.p1 = self.ui.top_plot.addPlot()\n\t\tself.p2 = self.ui.bottom_plot.addPlot()\n\t\tself.p2.setLabel('bottom', text='Wavelength (nm)')\n\t\tself.p2.setLabel('left', text='Spectral Intensity')\n\t\t[self.WL, self.I]=np.genfromtxt(\"AM1.5_SpectralIntensity.dat\",unpack='True')\n\t\tself.p2.setXRange(300,1100)\t\t\n\t\tself.p2.plot(self.WL, self.I)\n\n\n#\n# getfile is a function that is used to find and load data files that have EQE data\n#____________________________________________________________________________________________________________\n\n\tdef getfile(self): \n\t\tself.datafilename = str(QtGui.QFileDialog.getOpenFileName(self, 'Open file',os.getcwd())) #Get filename for data\n\t\tself.direc=os.path.dirname(self.datafilename) # Remember the directory in which the file is stored\n\t\tself.ui.pathname.setText(self.datafilename)\t\t\t\n\t\tself.Load()\n\n\n\tdef cleardata(self):\n\t\tself.p1.clear()\n\t\tself.ui.pathname.setText(\"\")\n\t\t\n\n\tdef Load(self): \n\t\t# Load setpoint values \n\t\tself.p1.clear()\t\n\t\tself.datafilename = str(self.ui.pathname.text())\n\t\t[self.WL, self.ch1, self.ch2, self.ch3, self.ch4, self.temp, self.Isc, self.EQE, self.IQE, self.Rs, self.SR, self.Rd, ]=np.genfromtxt(self.datafilename,unpack='True') # Upload the QE data file and unpack the data columns\n\t\t[self.wl, self.I]=np.genfromtxt(\"AM1.5_SpectralIntensity.dat\",unpack='True') # Upload spectral intensity data\n\t\tfunc = interp1d(self.wl, self.I)\n\t\tself.Inew = func(self.WL)\n\t\tself.integrand = np.multiply(self.Inew, self.SR)\n\t\tJsc = simps(self.SR, self.WL)/10\n\t\tself.ui.Jsc_display.setText(str(Jsc))\n\n\n\tdef plot_EQE(self):\n\t\tself.p1.clear()\n\t\tself.p1.setLabel('bottom', text='Wavelength (nm)')\n\t\tself.p1.setLabel('left', text='EQE')\t\t\t\n\t\tself.Load()\n\t\tself.p1.plot(self.WL, self.EQE)\n\n\n\tdef plot_SR(self):\n\t\tself.p1.clear\n\t\tself.p1.setLabel('bottom', text='Wavelength (nm)')\n\t\tself.p1.setLabel('left', text='Spectral Response (A/W)')\n\t\tself.Load()\n\t\tself.p1.plot(self.WL, self.SR)\t\n\n\t\t \n\n\nif __name__ == \"__main__\":\n\tapp = QtGui.QApplication(sys.argv)\n\tmyapp = GUIForm()\n\tmyapp.show()\n\tsys.exit(app.exec_())\n", "sub_path": "EQE_plotter.py", "file_name": "EQE_plotter.py", "file_ext": "py", "file_size_in_byte": 2915, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.genfromtxt", "line_number": 29, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.genfromtxt", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 55, "usage_type": "call"}, {"api_name": "scipy.interpolate.interp1d", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 58, "usage_type": "call"}, {"api_name": "scipy.integrate.simps", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 82, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "634274951", "text": "import protocol as proto\nimport util\nimport struct\nimport socket\nimport logging\nimport time\n\nstreamHandler = logging.StreamHandler()\nstreamHandler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s - %(message)s'))\nlogger = logging.getLogger(\"CLIENT\")\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(streamHandler)\n\nmax_size = 1024\naddr = (\"localhost\", 6789)\n\n\ndef main():\n # Build frame\n frame_data = proto.FrameData(struct.pack(\">10B\", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))\n logger.debug(\"Data: {} size: {}\".format(bytes(frame_data), frame_data.size))\n\n header_dict = {\n \"command\": 0x0001,\n \"version\": 0x0002\n }\n\n frame_packet = util.PackFrame(header_dict, frame_data)\n # frame_packet = util.PackFrame(header_dict)\n logger.debug(\"Frame packet: {} size: {}\".format(bytes(frame_packet), frame_packet.size))\n\n # Build serial\n serial_packet = util.PackSerial(frame_packet)\n logger.debug(\"Serial packet: {} size: {}\".format(bytes(serial_packet), serial_packet.size))\n logger.debug(\"Serial checksum: {}\".format(serial_packet.checksum))\n\n # Send\n while True:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client:\n try:\n client.connect(addr)\n\n while True:\n logger.debug(\"Send serial packet\")\n data = b\"\".join([bytes(serial_packet), bytes(serial_packet)])\n client.sendall(data)\n time.sleep(5)\n\n except Exception as e:\n logger.debug(\"Socket error occurred: {}\".format(e))\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as e:\n logger.debug(\"Protocol client error: {}\".format(e))\n", "sub_path": "test-protocol/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1763, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.StreamHandler", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 11, "usage_type": "attribute"}, {"api_name": "protocol.FrameData", "line_number": 20, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 20, "usage_type": "call"}, {"api_name": "util.PackFrame", "line_number": 28, "usage_type": "call"}, {"api_name": "util.PackSerial", "line_number": 33, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 39, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 39, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 39, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "9109078", "text": "import pandas as pd\nfrom matplotlib import pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\nplt.style.use('seaborn-pastel')\n\nfilepath = (r\"C:\\Users\\ngaij\\Desktop\\Project Swallow Software\\Python\\Accelerometer\\Set1.csv\")\ncols_list = ['Microphone1','X_Axis', 'Y_Axis', 'Z_Axis', 'Microphone2','Button']\n\ndef animate(i):\n df = pd.read_csv(filepath, header=None).drop_duplicates()\n df.columns = df.iloc[0]\n df = df.iloc[1:]\n\n M1 = df[\"Microphone1\"].astype(float)\n X = df[\"X_Axis\"].astype(float)\n Y = df[\"Y_Axis\"].astype(float)\n Z = df[\"Z_Axis\"].astype(float)\n M2 = df[\"Microphone2\"].astype(float)\n btn = df[\"Button\"].astype(float)\n\n plt.cla()\n plt.plot(M1, label=\"X-Axis\")\n plt.plot(M2, label=\"Y-Axis\")\n\n plt.legend(loc='upper left')\n plt.tight_layout()\n\n\nani = FuncAnimation(plt.gcf(), animate, interval=1)\n\nplt.tight_layout()\nplt.show()\n", "sub_path": "Python_Code/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 889, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "142142036", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 24 13:06:33 2021\r\n\r\n@author: keigo\r\n\"\"\"\r\nimport MeCab\r\nfrom collections import deque\r\nimport pickle\r\nimport random\r\nimport dataDownload\r\nimport question\r\nimport word2vec\r\nimport win32com.client as wincl\r\nfrom numba import jit\r\n\r\n\r\n# 単語に反応してそれから始まる文を生成しているだけのボット\r\nclass markovBot:\r\n analyzer = question.Analyzer()\r\n log = {} # user : bot\r\n tagger = MeCab.Tagger(\"おはよう。\")\r\n tagger.parse(\"\")\r\n voice = wincl.Dispatch(\"SAPI.SpVoice\")\r\n model = {}\r\n\r\n def speech(self, text):\r\n self.voice.Speak(text)\r\n\r\n def load_text_data(self, directory_path):\r\n return dataDownload.open_zipfile(\"./text_data/conversation_data.zip\")\r\n\r\n def wakati(self, text):\r\n \"\"\"\r\n using mecab\r\n Morphological Analysis(\"形態素解析\")\r\n\r\n Parameters\r\n ----------\r\n text : str\r\n DESCRIPTION.unioned learning data -> str\r\n\r\n Returns\r\n -------\r\n res : list\r\n DESCRIPTION.splited word(Morphological Analysis(\"形態素解析\"))\r\n\r\n \"\"\"\r\n res = []\r\n node = self.tagger.parseToNode(text)\r\n while node:\r\n res.append(node.surface)\r\n node = node.next\r\n return res\r\n\r\n # 始点を \"[BOS]\" として終点を \"。\" とする\r\n def makeModel(self, text, order=4):\r\n # word_list = 形態素解析済みdata(list)\r\n word_list = self.wakati(text)\r\n # print(word_list)\r\n if len(word_list) <= order:\r\n return\r\n queue = deque([], order)\r\n queue.append(\"[BOS]\")\r\n for markov_value in word_list:\r\n if len(queue) < order:\r\n queue.append(markov_value)\r\n continue\r\n\r\n if queue[-1] == \"。\":\r\n markov_key = tuple(queue)\r\n if markov_key not in self.model:\r\n self.model[markov_key] = []\r\n self.model.setdefault(markov_key, []).append(\"[BOS]\")\r\n queue.append(\"[BOS]\")\r\n markov_key = tuple(queue)\r\n self.model.setdefault(markov_key, []).append(markov_value)\r\n queue.append(markov_value)\r\n # print(self.model)\r\n\r\n def saveModel(self):\r\n with open(\"./markov_model.binaryfile\", \"wb\") as file:\r\n pickle.dump(self.model, file)\r\n\r\n def loadModel(self, path=\"./markov_model.binaryfile\"):\r\n try:\r\n with open(path, \"rb\") as file:\r\n self.model = pickle.load(file)\r\n except FileNotFoundError:\r\n print(\"モデルが保存されていないので初期化します\")\r\n self.makeModel(self.load_text_data(\"\"))\r\n\r\n def makeSentence(self, sentence_num=5, seed=\"[BOS]\", max_words=1000):\r\n sentence_count = 0\r\n\r\n key_candidates = [key for key in self.model if key[0] == seed]\r\n if not key_candidates:\r\n print(\"Not found Keyword\")\r\n return\r\n markov_key = random.choice(key_candidates)\r\n queue = deque(list(markov_key), len(list(self.model.keys())[0]))\r\n\r\n sentence = \"\".join(markov_key)\r\n for _ in range(max_words):\r\n markov_key = tuple(queue)\r\n next_word = random.choice(self.model[markov_key])\r\n sentence += next_word\r\n queue.append(next_word)\r\n\r\n if next_word == \"。\":\r\n sentence_count += 1\r\n if sentence_count == sentence_num:\r\n break\r\n return sentence\r\n\r\n def make_response(self, user_text: str) -> list:\r\n pass\r\n\r\n @jit\r\n def start_chat(self):\r\n end_word = [\"さようなら\", \"またね\", \"ばいばい\", \"バイバイ\"]\r\n tagger = MeCab.Tagger(\"\")\r\n\r\n while True:\r\n user_text = input(\"You -> \")\r\n # if input is end_word, chat end\r\n if (user_text in end_word):\r\n print(\"Bot -> \" + user_text)\r\n return\r\n if user_text[-1] != \"。\":\r\n user_text += \"。\"\r\n self.makeModel(user_text)\r\n tagger.parse(\"\")\r\n node = tagger.parseToNode(user_text)\r\n # ?があるか\r\n if self.analyzer.judgment(user_text):\r\n while node:\r\n word = node.surface\r\n pos = node.feature.split(',')[0]\r\n if pos == \"名詞\":\r\n sentence = self.analyzer.association(word)\r\n if sentence == word:\r\n print(\"処理未実装\")\r\n print(\"Bot -> \" + word + \"は\" + sentence + \"です。\")\r\n self.log[word] = sentence\r\n break\r\n node = node.next\r\n else:\r\n while node:\r\n word = node.surface\r\n pos = node.feature.split(',')[0]\r\n if pos == \"感動詞\":\r\n print(\"Bot -> \" + word)\r\n self.log[word] = word\r\n elif (pos in [\"名詞\", \"形容詞\", \"動詞\"]):\r\n sentences = []\r\n for _ in range(10):\r\n sentences.append(self.makeSentence(seed=word, sentence_num=1))\r\n if None not in sentences:\r\n sentence = word2vec.get_trust(user_text, sentences)\r\n print(\"Bot -> \" + sentence)\r\n self.log[word] = sentence\r\n # print(sentences)\r\n node = node.next\r\n\r\n\r\nif __name__ == \"__main__\":\r\n bot = markovBot()\r\n bot.loadModel()\r\n bot.start_chat()\r\n bot.saveModel()\r\n \r\n \r\n \r\n", "sub_path": "markovBot.py", "file_name": "markovBot.py", "file_ext": "py", "file_size_in_byte": 5852, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "question.Analyzer", "line_number": 20, "usage_type": "call"}, {"api_name": "MeCab.Tagger", "line_number": 22, "usage_type": "call"}, {"api_name": "win32com.client.Dispatch", "line_number": 24, "usage_type": "call"}, {"api_name": "win32com.client", "line_number": 24, "usage_type": "name"}, {"api_name": "dataDownload.open_zipfile", "line_number": 31, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 63, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 83, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 88, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 100, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 101, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 106, "usage_type": "call"}, {"api_name": "MeCab.Tagger", "line_number": 122, "usage_type": "call"}, {"api_name": "word2vec.get_trust", "line_number": 160, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 119, "usage_type": "name"}]} +{"seq_id": "629686757", "text": "import os\nimport numpy as np\nimport random\nfrom collections import defaultdict\nimport gym\nimport environment\n\nenv = gym.make('maze-5x5-v0')\n\n# State 의 boundary\nSTATE_BOUNDS = list(zip(env.observation_space.low, env.observation_space.high))\n# Maze의 size (10, 10)\nNUM_GRID = tuple((env.observation_space.high + np.ones(env.observation_space.shape)).astype(int))\n\nclass Agent:\n def __init__(self, actions):\n self.actions = actions\n self.discount_factor = 0.9 # 감가율\n self.epsilon = 0.1 # 엡실론\n self.q_table = defaultdict(lambda: [0.0, 0.0, 0.0, 0.0])\n self.learning_rate = 0.1\n\n # 의 샘플로부터 큐함수를 업데이트\n def learn(self, state, action, reward, next_state, next_action):\n # TODO: 큐함수를 업데이트 하는 코드를 작성\n current_q = self.q_table[state][action]\n next_state_q = self.q_table[next_state][next_action]\n new_q = (current_q + self.learning_rate *\n (reward + self.discount_factor * next_state_q - current_q))\n self.q_table[state][action] = new_q\n\n # 입실론 탐욕 정책에 따라서 행동을 반환하는 메소드입니다.\n def get_action(self, state):\n # TODO: ε-탐욕 정책 코드를 작성\n # self.epsilon을 이용하세요.\n if np.random.rand() < self.epsilon:\n # 무작위 행동 반환\n action = np.random.choice(self.actions)\n else:\n # 큐함수에 따른 행동 반환\n state_action = self.q_table[str(state)]\n action = self.arg_max(state_action)\n return int(action)\n\n @staticmethod\n def arg_max(state_action):\n max_index_list = []\n max_value = state_action[0]\n for index, value in enumerate(state_action):\n if value > max_value:\n max_index_list.clear()\n max_value = value\n max_index_list.append(index)\n elif value == max_value:\n max_index_list.append(index)\n return random.choice(max_index_list)\n\n# 범위 밖으로 나간 state를 다시 maze안으로 넣어주는 코드\ndef state_to_bucket(state):\n bucket_indice = []\n for i in range(len(state)):\n if state[i] <= STATE_BOUNDS[i][0]:\n bucket_index = 0\n elif state[i] >= STATE_BOUNDS[i][1]:\n bucket_index = NUM_GRID[i] - 1\n else:\n # Mapping the state bounds to the bucket array\n bound_width = STATE_BOUNDS[i][1] - STATE_BOUNDS[i][0]\n offset = (NUM_GRID[i] - 1) * STATE_BOUNDS[i][0] / bound_width\n scaling = (NUM_GRID[i] - 1) / bound_width\n bucket_index = int(round(scaling * state[i] - offset))\n bucket_indice.append(bucket_index)\n return tuple(bucket_indice)\n\n\nif __name__ == \"__main__\":\n env.reset()\n agent = Agent(actions=list(range(env.action_space.n)))\n scores = []\n episodes = []\n\n for episode in range(250):\n state = env.reset()\n state = state_to_bucket(state)\n action = agent.get_action(state)\n total_reward = 0\n\n while True:\n env.render()\n\n next_state, reward, done, _ = env.step(action)\n next_state = state_to_bucket(next_state)\n next_action = agent.get_action(next_state)\n\n agent.learn(str(state), action, reward, str(next_state), next_action)\n total_reward += reward\n state = next_state\n action = next_action\n\n if done:\n print(\"Episode : %d total reward = %f . \" % (episode, total_reward))\n episodes.append(episode)\n scores.append(total_reward)\n\n break", "sub_path": "assignment.py", "file_name": "assignment.py", "file_ext": "py", "file_size_in_byte": 3734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "gym.make", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 13, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 38, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "508068772", "text": "import discord\r\nfrom discord.ext import commands\r\nimport datetime\r\n\r\nclient=commands.Bot(command_prefix='#')\r\nclient.remove_command(\"help\")\r\n\r\n@client.command()\r\nasync def help(ctx):\r\n embed= discord.Embed(\r\n title=\"DBC-BOT-COMMANDS\",\r\n description=\"List of all Commands\" +\"\\n\"+\"Command_Prefix-- #\",\r\n colour=discord.Colour.dark_red(),\r\n author=\"WarDog\"\r\n )\r\n embed.set_thumbnail(url=\"https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/i/cf806150-5d66-4b08-bc68-8a3ea3b64896/debbdqx-0be4a429-794b-4f31-8382-53d6e2ebaa40.png/v1/fill/w_734,h_811,q_80,strp/ghost_discord_icon_by_inkwoodgfx_debbdqx-fullview.jpg\")\r\n embed.add_field(name='help', value='displaying this list', inline=\"False\")\r\n embed.add_field(name='server', value='server information', inline=\"False\")\r\n embed.add_field(name='clear',value='to clear messages',inline=\"False\")\r\n embed.add_field(name='kick', value='kick members',inline=\"False\")\r\n embed.add_field(name='ban', value='ban members',inline=\"False\")\r\n embed.add_field(name='unban', value='unban members',inline=\"False\")\r\n\r\n await ctx.send(embed=embed)\r\n\r\n@client.command()\r\nasync def server(ctx):\r\n name=ctx.guild.name\r\n description=ctx.guild.description\r\n region=ctx.guild.region\r\n icon=ctx.guild.icon_url\r\n member_count=ctx.guild.member_count\r\n owner=ctx.guild.owner\r\n\r\n embed=discord.Embed(\r\n title=name+\" \"+\"Server Information\",\r\n description=description,\r\n colour=discord.Colour.dark_red()\r\n )\r\n embed.set_thumbnail(url=icon)\r\n embed.add_field(name='Owner: ',value=str(owner))\r\n embed.add_field(name='Region: ', value=region)\r\n embed.add_field(name='Members Count: ', value=member_count)\r\n\r\n await ctx.send(embed=embed)\r\n\r\n@client.command()\r\n@commands.has_role(\"alpha\")\r\nasync def clear(ctx,amount,month=None,day=None,year=None):\r\n if amount == '-' :\r\n amount=None\r\n else:\r\n amount=int(amount)+1\r\n if month==None or day==None or year==None:\r\n date=None\r\n else:\r\n date=datetime.datetime(int(year),int(month),int(day))\r\n\r\n await ctx.channel.purge(limit=amount,before=date)\r\n\r\n@clear.error\r\nasync def clearError(ctx,error):\r\n if isinstance(error,commands.CheckFailure):\r\n await ctx.send(\"You do not have permissions \")\r\n\r\n@client.command()\r\n@commands.has_role(\"alpha\")\r\nasync def kick(ctx, member: discord.Member,*,reason):\r\n await member.kick(reason=reason)\r\n\r\n@kick.error\r\nasync def kick_Error(ctx,error):\r\n if isinstance(error,commands.CheckFailure):\r\n await ctx.send(\"You do not have permissions \")\r\n\r\n@client.command()\r\n@commands.has_role(\"alpha\")\r\nasync def ban(ctx, member: discord.Member,*,reason):\r\n await member.ban(reason=reason)\r\n\r\n@ban.error\r\nasync def ban_Error(ctx,error):\r\n if isinstance(error,commands.CheckFailure):\r\n await ctx.send(\"You do not have permissions \")\r\n\r\n@client.command()\r\n@commands.has_role(\"alpha\")\r\nasync def unban(ctx,*,member):\r\n banned_members= await ctx.guild.bans()\r\n for person in banned_members:\r\n user=person.user\r\n if member==str(user):\r\n await ctx.guild.unban(user)\r\n\r\n@unban.error\r\nasync def unban_Error(ctx,error):\r\n if isinstance(error,commands.CheckFailure):\r\n await ctx.send(\"You do not have permissions \")\r\n\r\nclient.run('token')\r\n", "sub_path": "admin_bot.py", "file_name": "admin_bot.py", "file_ext": "py", "file_size_in_byte": 3347, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 5, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 5, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 10, "usage_type": "call"}, {"api_name": "discord.Colour.dark_red", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 13, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 35, "usage_type": "call"}, {"api_name": "discord.Colour.dark_red", "line_number": 38, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 38, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "call"}, {"api_name": "discord.ext.commands.has_role", "line_number": 48, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 48, "usage_type": "name"}, {"api_name": "discord.ext.commands.CheckFailure", "line_number": 63, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 63, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 68, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.has_role", "line_number": 67, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 67, "usage_type": "name"}, {"api_name": "discord.ext.commands.CheckFailure", "line_number": 73, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 73, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 78, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.has_role", "line_number": 77, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 77, "usage_type": "name"}, {"api_name": "discord.ext.commands.CheckFailure", "line_number": 83, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 83, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_role", "line_number": 87, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 87, "usage_type": "name"}, {"api_name": "discord.ext.commands.CheckFailure", "line_number": 97, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "330620805", "text": "import pandas as pd, numpy as np, simplejson\nimport ast\n\nrolling_median = []\nlooplist = [0]\n\ndf = pd.DataFrame(pd.read_csv(r'C:\\Users\\ADMIN\\Documents\\GitHub\\insight_coding_challenge_1.2\\input_file\\venmo-trans.txt',header=None,index_col=None,sep=';',squeeze=True).apply(lambda x: ast.literal_eval(x)).tolist())\ndf['created_time'] = df['created_time'].astype(np.datetime64)\n\nfor i in looplist:\n try:\n df['seconds_gap'] = (df['created_time'] - df['created_time'].ix[i]).apply(lambda x: x.total_seconds())\n df1 = df[df['seconds_gap'] <= np.absolute(60.00)]\n df1 = df1[['actor','target']]\n for v in range(len(df1)):\n rolling_median.append(np.median(pd.melt(df1.ix[:v+1])['value'].value_counts().tolist()))\n df = df.ix[(v+1):].reset_index(drop = True)\n looplist.extend([0])\n except:\n pass\n\nf = open(r'C:\\Users\\ADMIN\\Documents\\GitHub\\insight_coding_challenge_1.2\\output_file\\coding_challenge_output.txt','w')\nsimplejson.dump(rolling_median,f)\nf.close()\n\ndel df, df1, looplist, rolling_median\n \n#comments:\n# import libaries pandas, numpy, ast, simplejson\n#rolling median is stored in list variable named 'rolling_median'\n#looplist is the counter for the number of times the dataframe object is broken into smaller objects based on rolling 60 second window threshold\n#import data file, convert data file into pandas DataFrame object\n#column 'created_time' is converted into numpy datetime dtype object\n#column 'seconds_gap' measures the time difference in seconds between the first record and subsequent records until the time difference exceeds 60 seconds....\n#...at which point the loop breaks the bigger dataframe 'df' into smaller dataframe 'df1'\n#run a second loop within the smaller dataframe 'df1' to compute rolling median and store the output into the rolling_median list object\n#reset the dataframe object 'df1' so that the dataframe is resized to cut out the part of the dataframe for which the rolling median has been computed\n# write the output (rolling_median) to text file using simplejson\n\n \n \n \n", "sub_path": "src/coding_challenge.py", "file_name": "coding_challenge.py", "file_ext": "py", "file_size_in_byte": 2039, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pandas.DataFrame", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.datetime64", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.absolute", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.melt", "line_number": 16, "usage_type": "call"}, {"api_name": "simplejson.dump", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "471090032", "text": "import numpy as np\n# from scipy.interpolate import *\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker\nimport statistics\n\ndef least_squares_fit(x, y, err_x, err_y):\n plt.rc('font', family = 'serif', serif = 'cmr10')\n plt.rcParams['mathtext.fontset'] = \"cm\" \n # plt.rcParams[\"font.family\"] = \"Times New Roman\" \n plt.rcParams[\"axes.linewidth\"] = 1.0\n\n arr_size = int(len(x))\n N = arr_size - 1 # N = x(arr_size - 1) # where N is the final element in the array (of x vals)\n # we have to -1 as elements in array start count at 0 NOT 1\n\n p = np.polyfit(x, y, 1) # finds the coefficients for the 'best' fitting function\n \n f = np.polyval(p, x) # these are the 'y-values' of the fitting function\n sigma = statistics.stdev(f - y) # standard deviation of quantity 'f - y'\n\n # plt.figure(figsize = (8, 6))\n plt.errorbar(x, y, err_y, err_x, fmt = \"r+\", capsize = 3, LineWidth = 0.9, LineStyle = \"none\")\n # the last argument for the two lines below, is for the legend\n plt.plot(x, y, Marker = \"+\", MarkerSize = 10, MarkerEdgeColor = \"r\", MarkerFaceColor = \"r\", LineWidth = 0.9, LineStyle = \"none\", label = \"Data Points\")\n plt.plot(x, f, LineWidth = 0.9, Linestyle = \"-\", Color = \"b\", label = \"Model\") # 'line of best fit'\n\n plt.title(\"Least Squares Fit\", fontsize = 12, fontweight = \"bold\")\n plt.xlabel(\"x values\", fontsize = 12)\n plt.ylabel(\" y values\", fontsize = 12)\n plt.legend(loc = \"upper right\", title = \"Legend\", fontsize = 10)\n # plt.axis([-1.0, 6.0, -2, 2])\n # plt.xlim(-1, 6)\n # ax.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(1))\n # plt.ylim(-2, 2)\n # ax.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(1))\n plt.gca().tick_params(width = 1.0, labelsize = 10)\n \n plt.savefig(\"Least_Squares_Fit.pdf\")\n\n print(\"Gradient is: {:f}\" .format(p[0]))\n print(\"y-intercept is: {:f}\" .format(p[1]))\n print(\"Sigma is: {:f}\" .format(sigma))\n \n # The next two lines are for linear i.e. straight line fits only\n grad_err = (2 * sigma) / (x[N] - x[0])\n print(\"Gradient error is: {:f}\" .format(grad_err))", "sub_path": "Useful_Graphs/LS_Polynomials/LS_Poly.py", "file_name": "LS_Poly.py", "file_ext": "py", "file_size_in_byte": 2136, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "matplotlib.pyplot.rc", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 9, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 19, "usage_type": "call"}, {"api_name": "statistics.stdev", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "342416705", "text": "import os\nimport logging\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n\n\nclass Zacks:\n def __init__(self):\n self.logger = logging.getLogger('zacks')\n self.url_head = 'https://www.zacks.com/stock/research/'\n self.url_tail = '/brokerage-recommendations'\n self.tags_to_parse = [12, 14, 15, 17, 18]\n\n def download(self, tickers):\n recommendation_details = pd.DataFrame(columns=tickers)\n for ticker in tickers:\n r = urlopen(self.url_head + ticker + self.url_tail)\n soup = BeautifulSoup(r, 'html.parser')\n trTags = soup.find_all('tr')\n\n for idx in self.tags_to_parse:\n if idx == 12:\n\n # 12: Average Broker Recommendation (ABR)\n for i, child in enumerate(trTags[idx].children):\n if i == 1:\n row_idx = child.contents[0]\n elif i == 3:\n recommendation_details.loc[row_idx, ticker] = child.string\n\n else:\n\n # 14: Number of recommendations\n # 15: Average Target Price\n # 17: Industry\n # 18: Industry rank\n for i, child in enumerate(trTags[idx].children):\n if i == 1:\n row_idx = child.string\n elif i == 3:\n recommendation_details.loc[row_idx, ticker] = child.string\n\n return recommendation_details\n", "sub_path": "zacks.py", "file_name": "zacks.py", "file_ext": "py", "file_size_in_byte": 1586, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 18, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "377404017", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom scipy.optimize import curve_fit\n\nplt.figure(figsize=(12,4))\nplt.rcParams.update({'font.size': 15})\nplt.rc('axes', linewidth=3)\n\nfolder = \"S:\\\\Brouwer\\\\LMST fitting\\\\Fitting MyOne trajectory\\\\20190121_data_004_1 - all free - sequence\\\\\"\nfolder_M270 = \"S:\\\\Brouwer\\\\LMST fitting\\\\Fitting M270 trajectory\\\\Last itertation - sequence (bead 1)\\\\\"\nfolder_M270 = \"S:\\\\Brouwer\\\\LMST fitting\\\\Fitting M270 trajectory\\\\Last itertation - sequence (bead 2)\\\\\"\nfolder_M270 = \"S:\\\\Brouwer\\\\LMST fitting\\\\Fitting M270 trajectory\\\\Last iteration - sequence (bead 2) - reversed (correct r)\\\\\"\nsave_folder = \"C:\\\\Users\\\\brouw\\\\Desktop\\\\\"\n\nfile = \"data_004_bead_005_LMST_fit.dat\"\nfile_M270 = \"data_005_bead_001_LMST_fit_M270.dat\"\nfile_M270 = \"data_005_bead_002_LMST_fit_M270.dat\"\nfile_M270 = \"data_005_bead_002_LMST_fit_M270.dat\"\n\ndf = pd.read_csv(folder+file, sep='\\t')\ndf_M270 = pd.read_csv(folder_M270+file_M270, sep='\\t')\n\ndef predict(x, slope, intercept):\n return slope * x + intercept\n\n# MyOne\nfitted = df['fit successful'].values\nZ_piezo = df['Z piezo (nm)'].values\nZ_fit = df['Z fit (nm)'].values\n\nZ = Z_piezo[np.where(fitted == 1)]\nfit = Z_fit[np.where(fitted == 1)]\n\nZ = 0.88 * Z[110:]\nfit = fit[110:]\n\nZ_select = Z[-200:]\nfit_select = fit[-200:]\n# plt.scatter(Z_select, fit_select, color='black', zorder=100)\n\n# fitting line\n# slope, intercept, r_value, p_value, std_err = stats.linregress(Z, fit)\nslope, intercept, r_value, p_value, std_err = stats.linregress(Z_select, fit_select)\n# fitting line, fixed slope at -1\nintercept = curve_fit(lambda x, intercept: predict(x, slope=-1, intercept=intercept), Z_select, fit_select)[0]\nslope = -1\n\nfitline = predict(Z, slope, intercept)\n\nplt.scatter(Z, fit, label=\"MyOne beads\", s=50, facecolors='none', edgecolors='blue', alpha=0.5)\nplt.plot(Z, fitline, color='red', label=\"Linear Fit\", linewidth=3)\n\n# fit error of approx. 0.5 %\nerr = []\nfor f in fit:\n err.append(0.005 * f)\n\n# plt.errorbar(Z, fit, label=\"MyOne beads\", yerr=err, fmt='--o', alpha=0.5, ecolor='grey', capsize=2)\n\n# M270\nfitted_M270 = df_M270['fit successful'].values\nZ_piezo_M270 = df_M270['Z piezo (nm)'].values\nZ_fit_M270 = df_M270['Z fit (nm)'].values\n\nZ_M270 = Z_piezo_M270[np.where(fitted_M270 == 1)]\nfit_M270 = Z_fit_M270[np.where(fitted_M270 == 1)]\n# Z_M270 = Z_piezo_M270\n# fit_M270 = Z_fit_M270\n\nZ_M270 = (0.88 * Z_M270) + 4.19\n\nslope_M270, intercept_M270, r_value_M270, p_value_M270, std_err_M270 = stats.linregress(Z_M270, fit_M270)\nfitline_M270 = predict(Z_M270, slope_M270, intercept_M270)\n\nplt.scatter(Z_M270, fit_M270, label=\"M270 beads\", s=50, facecolors='none', edgecolors='green', alpha=0.5)\n# plt.plot(Z_M270, fitline_M270, color='red', label=\"Linear Fit\", linewidth=3)\n\n\nplt.xlabel('$Z_{\\mathrm{piezo}} (\\mu m)$')\nplt.ylabel('$Z_{\\mathrm{LMST}} (\\mu m)$')\n\nplt.tick_params(direction='in', top=True, right=True, length=6, width=3)\nplt.legend(loc=1, frameon=False)\n# plt.savefig(save_folder + \"LMST fitting trajectory\",dpi=600, bbox_inches=\"tight\")\n# plt.savefig(save_folder + \"LMST fitting trajectory.pdf\", bbox_inches=\"tight\")\nplt.show()", "sub_path": "Tracking/xFigure 1c - LMST fitting Z trajectory.py", "file_name": "xFigure 1c - LMST fitting Z trajectory.py", "file_ext": "py", "file_size_in_byte": 3165, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 45, "usage_type": "name"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 68, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 74, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "235160661", "text": "# Copyright 2019 Contributors to Hyperledger Sawtooth\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -----------------------------------------------------------------------------\n\"\"\"Sets up a standard logging format and setting\"\"\"\n\nimport logging\n\nLIB_LEVELS = {\"asyncio\": logging.WARNING}\nLOGGER_FORMAT = \"%(levelname)s %(asctime)s %(name)s %(module)s %(pathname)s %(message)s\"\n\nlogging.basicConfig(level=logging.INFO, format=LOGGER_FORMAT)\n\nfor lib, level in LIB_LEVELS.items():\n logging.getLogger(lib).setLevel(level)\n\n\ndef get_logger(name):\n \"\"\"Return the logger\n Written to match the standard python logging.getLogger\n function for ease of migration\n \"\"\"\n logger = logging.getLogger(name)\n return logger\n", "sub_path": "rbac/common/logs/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1231, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.WARNING", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "433567535", "text": "\"\"\"\nThis is the decorator for login enforcement\n\"\"\"\nfrom functools import wraps\nimport json\nimport os\n\nfrom flask import request\nfrom flask import Response\nfrom qube.src.commons.context import AuthContext\nimport requests\n\n\nauth_url = os.getenv('AUTH_API_URL', 'https://api.qubeship.io/v1/auth')\n\n\ndef validate_with_qubeship_auth(auth_token):\n \"\"\" check if the auth_token is valid\n \"\"\"\n headers = {'content-type': 'application/json', 'Authorization': auth_token}\n # payload = {'token': auth_token}\n resp = requests.get(auth_url + '/user',\n headers=headers)\n return resp.text, resp.status_code\n\n\ndef login_required(f):\n \"\"\"create parser\n \"\"\"\n\n def auth_required():\n \"\"\" return error message\n \"\"\"\n data = {\n 'error': 'github authorization required'\n }\n js = json.dumps(data)\n\n resp = Response(js, status=401, mimetype='application/json')\n return resp\n\n def unsupported_token():\n \"\"\" return error message\n \"\"\"\n data = {\n 'error': 'master tokens are forbidden instead use org tokens'\n }\n js = json.dumps(data)\n\n resp = Response(js, status=403, mimetype='application/json')\n return resp\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n \"\"\" definition of login_required\n \"\"\"\n bearer_token = request.headers.get('Authorization')\n if not bearer_token:\n return auth_required()\n\n auth_token = bearer_token.split()[1]\n if not auth_token:\n return auth_required()\n\n # validate auth_token\n response, status_code = validate_with_qubeship_auth(auth_token)\n if status_code != 200:\n return auth_required()\n\n userinfo = json.loads(response)\n if userinfo['type'] != \"org\":\n return unsupported_token()\n is_system_user = userinfo['is_system_user'] \\\n if 'is_system_user' in userinfo else False\n auth_context = AuthContext(userinfo['tenant']['id'],\n userinfo['tenant']['name'],\n userinfo['tenant']['orgs'][0]['id'],\n userinfo['tenant']['orgs'][0]['name'],\n userinfo['id'], 'auth_not_implemented',\n is_system_user)\n kwargs['authcontext'] = {\n 'context': auth_context\n }\n\n return f(*args, **kwargs)\n\n return decorated_function\n", "sub_path": "qube/src/api/decorators.py", "file_name": "decorators.py", "file_ext": "py", "file_size_in_byte": 2537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 39, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 70, "usage_type": "call"}, {"api_name": "qube.src.commons.context.AuthContext", "line_number": 75, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "554299722", "text": "import datetime\nimport json\nimport logging\nimport sys\n\nfrom utils.encoder import Encoder\n\n\nclass Logger:\n __slots__ = \"logger\"\n\n def __init__(self, class_name=None) -> None:\n self.logger = LogHandler().app_logger(class_name=class_name)\n\n def debug(self, msg, *args, **kwargs):\n log_json = self.__prepare_log(msg, args, kwargs)\n self.logger.debug(json.dumps(log_json, cls=Encoder))\n\n def info(self, msg, *args, **kwargs):\n log_json = self.__prepare_log(msg, args, kwargs)\n self.logger.info(json.dumps(log_json, cls=Encoder))\n\n def warning(self, msg, *args, **kwargs):\n log_json = self.__prepare_log(msg, args, kwargs)\n self.logger.warning(json.dumps(log_json, cls=Encoder))\n\n def error(self, msg, *args, **kwargs):\n log_json = self.__prepare_log(msg, args, kwargs)\n self.logger.error(json.dumps(log_json, cls=Encoder))\n\n def fatal(self, msg, *args, **kwargs):\n log_json = self.__prepare_log(msg, args, kwargs)\n self.logger.fatal(json.dumps(log_json, cls=Encoder))\n\n @staticmethod\n def __prepare_log(msg, args, kwargs):\n record = dict()\n if type(msg) == dict:\n record.update(msg)\n\n for arg in args:\n if type(arg) == dict:\n record.update(arg)\n\n record.update(kwargs)\n log_json = dict(message_dict=record, message=msg)\n return log_json\n\n\nclass LogHandler(object):\n loggers = {}\n\n def app_logger(self, class_name=None):\n if class_name is None:\n class_name = \"undefined\"\n\n if self.loggers.get(class_name):\n logger = self.loggers.get(class_name)\n\n return logger\n\n else:\n logger = logging.getLogger(class_name)\n log_level = logging.DEBUG\n\n # create a sys stdout handler\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(log_level)\n\n logging.basicConfig(level=log_level)\n\n formatter = Formatter(self)\n\n handler.setFormatter(formatter)\n\n # add the handlers to the logger\n logger.propagate = False\n logger.addHandler(handler)\n\n # Save new logger to existing loggers\n self.loggers.update({class_name: logger})\n\n return logger\n\n\nclass Formatter(logging.Formatter):\n def __init__(self, log_handler, *args, **kwargs):\n super(Formatter, self).__init__(*args, **kwargs)\n self.log_handler = log_handler\n\n def format(self, logger):\n\n payload = json.loads(logger.getMessage())\n log_status = dict(\n severity=logger.levelname,\n message_dict=payload.get(\"message_dict\"),\n message=payload.get(\"message\"),\n timestamp=str(\n datetime.datetime.now(\n datetime.timezone(offset=datetime.timedelta(hours=-3))\n )\n ),\n )\n\n return json.dumps(log_status)\n", "sub_path": "app/utils/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 2981, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "json.dumps", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.encoder.Encoder", "line_number": 17, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.encoder.Encoder", "line_number": 21, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.encoder.Encoder", "line_number": 25, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "utils.encoder.Encoder", "line_number": 29, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "utils.encoder.Encoder", "line_number": 33, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 64, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 67, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 70, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 86, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 99, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 100, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "339789116", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 23 20:10:32 2015\n\n@author: ev\n\"\"\"\n\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n#x = np.array([0.15,2.3,3.15,4.85,6.25,7.95])\n#yn = np.array([4.79867,4.49013,4.2243,3.47313,2.66674,1.51909])\n\n# Let's create a function to model and create data\n\n#def func1(x, a, b):\n# return a*np.exp(b*x)\n\ndef func(x, a, b, c):\n return a*x**2+b*x+c\n\ndef func1(x, a, b):\n return a*x+b\n \n\n\ndef polyfit(x, yn, n,name, c=False):\n \"\"\"If c then we have 3 gradig polyome\"\"\"\n if c:\n popt, pcov = curve_fit(func, x[:n], yn[:n])\n ym = func(x, popt[0], popt[1], popt[2])\n print(\"The Vo fitted model is: {0:2f}*x²+{1:2f}*x+{2:2f} \".format(popt[0], popt[1], popt[2]))\n #Otherwise tw gradig ploynome\n else:\n popt, pcov = curve_fit(func1, x[:n], yn[:n])\n ym = func1(x, popt[0], popt[1])\n print(\"The rate fitted model for Ln(mean({0}))= {1:2f}*x+{2:2f} \".format(name,popt[0], popt[1]))\n \n #popt returns the best fit values for parameters of the given model (func)\n \n \n \n \n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n #ax.plot(x, y, c='k', label='Function')\n ax.scatter(x, yn, color = 'r', marker = 'x')\n ax.plot(x, ym, color = 'g')\n \n \n plt.legend(['fitted Model',str(name)],fontsize=16,loc='upper left')\n \n ax.set_yscale('linear',fontsize=16)\n ax.tick_params(axis='x', labelsize=14)\n ax.tick_params(axis='y', labelsize=14)\n plt.ylabel('Ln(OD600_)',fontsize=16)\n plt.xlabel('Time(sec)',fontsize=16)\n ax.grid()\n plt.grid()\n plt.show()\n #print( popt )\n fig.savefig('scipy_311_ex2.pdf', bbox_inches='tight')\n \n return popt[0]\n\n\nif __name__ == \"__main__\":\n li=[]\n import pandas as pd\n \n for i in range(1,8):\n file='/home/ev/Documents/Kenetics_PLots/Sample-'+str(i)+'.xlsx'\n data1 = pd.read_excel(file)\n x=data1['Time(sec)']\n y=data1['Abs']\n \n fun=polyfit(x, y, 300,'wt', c=False)\n li.append(fun)", "sub_path": "LastWeekProject/Trim1/polyTRim1.py", "file_name": "polyTRim1.py", "file_ext": "py", "file_size_in_byte": 2096, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "scipy.optimize.curve_fit", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "pandas.read_excel", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "274430182", "text": "# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ******************************************************************************\n# Copyright (c) 2018 Mejbah ul Alam, Justin Gottschlich, Abdullah Muzahid\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# ******************************************************************************\n\nimport subprocess\n\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\n\n\nclass CustomInstall(install):\n \"\"\"Custom handler for the 'install' command.\"\"\"\n def run(self):\n print('[DEBUG] making perfpoint.so')\n subprocess.check_call('make', cwd='./autoperf/profiler/', shell=True)\n super().run()\n\n\nclass CustomDevelop(develop):\n \"\"\"Custom handler for the 'develop' command.\"\"\"\n def run(self):\n print('[DEBUG] making perfpoint.so')\n subprocess.check_call('make', cwd='./autoperf/profiler/', shell=True)\n super().run()\n\n\nsetup(name='autoperf',\n version='1.0',\n description='AutoPerf helps identify performance regressions in large codebases',\n long_description='AutoPerf is a tool for low-overhead, automated diagnosis \\\n of performance anomalies in multithreaded programs via \\\n hardware performance counters (HWPCs) in Intel CPUs',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.8',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Software Development :: Quality Assurance',\n ],\n keywords='autoperf performance regression monitoring',\n author='Intel Corporation',\n license='MIT',\n # packages=['autoperf','annotation'], #include fsm and annotation\n packages=find_packages(where=\".\", exclude=(\"./docs\",'./profiler', './.empty', './__pycache__')),\n zip_safe=False,\n entry_points={'console_scripts': ['autoperf=autoperf.__main__:main']},\n cmdclass={'install': CustomInstall, 'develop': CustomDevelop},\n package_dir={'autoperf': 'autoperf'},\n package_data={'autoperf': ['profiler/perfpoint.so']},\n include_package_data=True\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 3247, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "setuptools.command.install.install", "line_number": 33, "usage_type": "name"}, {"api_name": "subprocess.check_call", "line_number": 37, "usage_type": "call"}, {"api_name": "setuptools.command.develop.develop", "line_number": 41, "usage_type": "name"}, {"api_name": "subprocess.check_call", "line_number": 45, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 49, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "556380912", "text": "\"\"\"\nGreenlet-local objects.\n\nThis module is based on `_threading_local.py`__ from the standard\nlibrary of Python 3.4.\n\n__ https://github.com/python/cpython/blob/3.4/Lib/_threading_local.py\n\nGreenlet-local objects support the management of greenlet-local data.\nIf you have data that you want to be local to a greenlet, simply create\na greenlet-local object and use its attributes:\n\n >>> mydata = local()\n >>> mydata.number = 42\n >>> mydata.number\n 42\n\nYou can also access the local-object's dictionary:\n\n >>> mydata.__dict__\n {'number': 42}\n >>> mydata.__dict__.setdefault('widgets', [])\n []\n >>> mydata.widgets\n []\n\nWhat's important about greenlet-local objects is that their data are\nlocal to a greenlet. If we access the data in a different greenlet:\n\n >>> log = []\n >>> def f():\n ... items = list(mydata.__dict__.items())\n ... items.sort()\n ... log.append(items)\n ... mydata.number = 11\n ... log.append(mydata.number)\n >>> greenlet = gevent.spawn(f)\n >>> greenlet.join()\n >>> log\n [[], 11]\n\nwe get different data. Furthermore, changes made in the other greenlet\ndon't affect data seen in this greenlet:\n\n >>> mydata.number\n 42\n\nOf course, values you get from a local object, including a __dict__\nattribute, are for whatever greenlet was current at the time the\nattribute was read. For that reason, you generally don't want to save\nthese values across greenlets, as they apply only to the greenlet they\ncame from.\n\nYou can create custom local objects by subclassing the local class:\n\n >>> class MyLocal(local):\n ... number = 2\n ... initialized = False\n ... def __init__(self, **kw):\n ... if self.initialized:\n ... raise SystemError('__init__ called too many times')\n ... self.initialized = True\n ... self.__dict__.update(kw)\n ... def squared(self):\n ... return self.number ** 2\n\nThis can be useful to support default values, methods and\ninitialization. Note that if you define an __init__ method, it will be\ncalled each time the local object is used in a separate greenlet. This\nis necessary to initialize each greenlet's dictionary.\n\nNow if we create a local object:\n\n >>> mydata = MyLocal(color='red')\n\nNow we have a default number:\n\n >>> mydata.number\n 2\n\nan initial color:\n\n >>> mydata.color\n 'red'\n >>> del mydata.color\n\nAnd a method that operates on the data:\n\n >>> mydata.squared()\n 4\n\nAs before, we can access the data in a separate greenlet:\n\n >>> log = []\n >>> greenlet = gevent.spawn(f)\n >>> greenlet.join()\n >>> log\n [[('color', 'red'), ('initialized', True)], 11]\n\nwithout affecting this greenlet's data:\n\n >>> mydata.number\n 2\n >>> mydata.color\n Traceback (most recent call last):\n ...\n AttributeError: 'MyLocal' object has no attribute 'color'\n\nNote that subclasses can define slots, but they are not greenlet\nlocal. They are shared across greenlets::\n\n >>> class MyLocal(local):\n ... __slots__ = 'number'\n\n >>> mydata = MyLocal()\n >>> mydata.number = 42\n >>> mydata.color = 'red'\n\nSo, the separate greenlet:\n\n >>> greenlet = gevent.spawn(f)\n >>> greenlet.join()\n\naffects what we see:\n\n >>> mydata.number\n 11\n\n>>> del mydata\n\n.. versionchanged:: 1.1a2\n Update the implementation to match Python 3.4 instead of Python 2.5.\n This results in locals being eligible for garbage collection as soon\n as their greenlet exits.\n\n\"\"\"\n\nfrom copy import copy\nfrom weakref import ref\nfrom contextlib import contextmanager\nfrom gevent.hub import getcurrent, PYPY\nfrom gevent.lock import RLock\n\n__all__ = [\"local\"]\n\n\nclass _wrefdict(dict):\n \"\"\"A dict that can be weak referenced\"\"\"\n\n\nclass _localimpl(object):\n \"\"\"A class managing thread-local dicts\"\"\"\n __slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'\n\n def __init__(self):\n # The key used in the Thread objects' attribute dicts.\n # We keep it a string for speed but make it unlikely to clash with\n # a \"real\" attribute.\n self.key = '_threading_local._localimpl.' + str(id(self))\n # { id(Thread) -> (ref(Thread), thread-local dict) }\n self.dicts = _wrefdict()\n\n def get_dict(self):\n \"\"\"Return the dict for the current thread. Raises KeyError if none\n defined.\"\"\"\n thread = getcurrent()\n return self.dicts[id(thread)][1]\n\n def create_dict(self):\n \"\"\"Create a new dict for the current thread, and return it.\"\"\"\n localdict = {}\n key = self.key\n thread = getcurrent()\n idt = id(thread)\n\n # If we are working with a gevent.greenlet.Greenlet, we can\n # pro-actively clear out with a link. Use rawlink to avoid\n # spawning any more greenlets\n try:\n rawlink = thread.rawlink\n except AttributeError:\n # Otherwise we need to do it with weak refs\n def local_deleted(_, key=key):\n # When the localimpl is deleted, remove the thread attribute.\n thread = wrthread()\n if thread is not None:\n del thread.__dict__[key]\n\n def thread_deleted(_, idt=idt):\n # When the thread is deleted, remove the local dict.\n # Note that this is suboptimal if the thread object gets\n # caught in a reference loop. We would like to be called\n # as soon as the OS-level thread ends instead.\n _local = wrlocal()\n if _local is not None:\n _local.dicts.pop(idt, None)\n wrlocal = ref(self, local_deleted)\n wrthread = ref(thread, thread_deleted)\n thread.__dict__[key] = wrlocal\n else:\n wrdicts = ref(self.dicts)\n\n def clear(_):\n dicts = wrdicts()\n if dicts:\n dicts.pop(idt, None)\n rawlink(clear)\n wrthread = None\n\n self.dicts[idt] = wrthread, localdict\n return localdict\n\n\n@contextmanager\ndef _patch(self):\n impl = object.__getattribute__(self, '_local__impl')\n orig_dct = object.__getattribute__(self, '__dict__')\n try:\n dct = impl.get_dict()\n except KeyError:\n # it's OK to acquire the lock here and not earlier, because the above code won't switch out\n # however, subclassed __init__ might switch, so we do need to acquire the lock here\n dct = impl.create_dict()\n args, kw = impl.localargs\n with impl.locallock:\n self.__init__(*args, **kw)\n with impl.locallock:\n object.__setattr__(self, '__dict__', dct)\n yield\n object.__setattr__(self, '__dict__', orig_dct)\n\n\nclass local(object):\n \"\"\"\n An object whose attributes are greenlet-local.\n \"\"\"\n __slots__ = '_local__impl', '__dict__'\n\n def __new__(cls, *args, **kw):\n if args or kw:\n if (PYPY and cls.__init__ == object.__init__) or (not PYPY and cls.__init__ is object.__init__):\n raise TypeError(\"Initialization arguments are not supported\")\n self = object.__new__(cls)\n impl = _localimpl()\n impl.localargs = (args, kw)\n impl.locallock = RLock()\n object.__setattr__(self, '_local__impl', impl)\n # We need to create the thread dict in anticipation of\n # __init__ being called, to make sure we don't call it\n # again ourselves.\n impl.create_dict()\n return self\n\n def __getattribute__(self, name):\n with _patch(self):\n return object.__getattribute__(self, name)\n\n def __setattr__(self, name, value):\n if name == '__dict__':\n raise AttributeError(\n \"%r object attribute '__dict__' is read-only\"\n % self.__class__.__name__)\n with _patch(self):\n return object.__setattr__(self, name, value)\n\n def __delattr__(self, name):\n if name == '__dict__':\n raise AttributeError(\n \"%r object attribute '__dict__' is read-only\"\n % self.__class__.__name__)\n with _patch(self):\n return object.__delattr__(self, name)\n\n def __copy__(self):\n impl = object.__getattribute__(self, '_local__impl')\n current = getcurrent()\n currentId = id(current)\n d = impl.get_dict()\n duplicate = copy(d)\n\n cls = type(self)\n if (PYPY and cls.__init__ != object.__init__) or (not PYPY and cls.__init__ is not object.__init__):\n args, kw = impl.localargs\n instance = cls(*args, **kw)\n else:\n instance = cls()\n\n new_impl = object.__getattribute__(instance, '_local__impl')\n tpl = new_impl.dicts[currentId]\n new_impl.dicts[currentId] = (tpl[0], duplicate)\n\n return instance\n", "sub_path": "satori-rules/plugin/libs/gevent/local.py", "file_name": "local.py", "file_ext": "py", "file_size_in_byte": 8772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "gevent.hub.getcurrent", "line_number": 166, "usage_type": "call"}, {"api_name": "gevent.hub.getcurrent", "line_number": 173, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 197, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 198, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 201, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 214, "usage_type": "name"}, {"api_name": "gevent.hub.PYPY", "line_number": 241, "usage_type": "name"}, {"api_name": "gevent.lock.RLock", "line_number": 246, "usage_type": "call"}, {"api_name": "gevent.hub.getcurrent", "line_number": 276, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 279, "usage_type": "call"}, {"api_name": "gevent.hub.PYPY", "line_number": 282, "usage_type": "name"}]} +{"seq_id": "268664658", "text": "def _all_():\n import os\n import time\n import sys\n import time\n import subprocess\n from baner import baner\n try:\n import re\n except:\n time.sleep(1)\n print(\"[-] Pleass Install The Librery --> re\")\n sys.exit()\n try:\n from colorama import Fore\n except:\n time.sleep(1)\n print(\"[-] Pleass Install The Librery --> colorama\")\n sys.exit()\n try:\n import netifaces\n except:\n time.sleep(1)\n print(\"[-] Pleass Install The Librery --> netifaces\")\n sys.exit()\n time.sleep(0.6)\n os.system(\"clear\")\n baner()\n time.sleep(1)\n def result():\n my_list = input(Fore.LIGHTBLACK_EX + \"\\n[!] Pleass Enter Your New Mac And Name Netword : \").split(\",\")\n if len(my_list) != 2:\n time.sleep(1)\n print(Fore.RED + \"\\n[-] Your Data Hase The Problem !!!\")\n try:\n nwe_mac = my_list[0]\n except:\n time.sleep(1)\n print(Fore.RED + \"\\n[-] Your Arguman Hase The Problem !!!\")\n sys.exit()\n try:\n name_network = my_list[1]\n except:\n time.sleep(1)\n print(Fore.RED + \"\\n[-] Your Arguman Hase The Problem !!!\")\n sys.exit()\n name_all_network = netifaces.interfaces()\n if name_network not in name_all_network:\n time.sleep(1)\n print(Fore.RED + \"\\n[-] Your Name Network Hase The Problem !!!\")\n sys.exit()\n else:\n pass\n if re.search(\"\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\" , nwe_mac):\n time.sleep(1)\n print(Fore.RED + \"\\n[-] Your New Mac Hase The Problem !!!\")\n sys.exit()\n subprocess.run([\"sudo\" , \"ifconfig\" , name_network , \"down\"] , shell=True)\n subprocess.run([\"sudo\" , \"ipconfig\" , name_network , \"hw\" , \"ether\" , nwe_mac])\n subprocess.run([\"sudo\" , \"ifconfig\" , name_network , \"up\"] , shell=True)\n", "sub_path": "mac_changer.py", "file_name": "mac_changer.py", "file_ext": "py", "file_size_in_byte": 1722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "os.system", "line_number": 27, "usage_type": "call"}, {"api_name": "baner.baner", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "colorama.Fore.LIGHTBLACK_EX", "line_number": 31, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 31, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 34, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 34, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 39, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 39, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 45, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 45, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 46, "usage_type": "call"}, {"api_name": "netifaces.interfaces", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 50, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 50, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 51, "usage_type": "call"}, {"api_name": "re.search", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "colorama.Fore.RED", "line_number": 56, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 56, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 57, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 58, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 59, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "384297625", "text": "from panda3d.core import Point3, Vec3\n\n\n\"\"\"\nDifferent filters used throughout the code.\n\"\"\"\nclass Lpf():\n\t#-------------------------------------------------------------------------#\n\tdef __init__(self, val, length):\n\t#-------------------------------------------------------------------------#\n\t\tself.val = val\n\t\tself.cnt = 0\n\t\tself.len = length\n\n\t\tself.vals = []\n\n\t\tself.lastEntry = 0\n\n\t#-------------------------------------------------------------------------#\n\tdef filter(self, val):\n\t#-------------------------------------------------------------------------#\n\t\t\n\t\t# If the filter has all entries filled, subtract the oldest entry\n\t\tif(self.cnt == self.len):\n\t\t\tself.val -= self.vals[self.lastEntry]\n\t\t\tself.vals[self.lastEntry] = val\n\t\telse:\n\t\t\t# If the filter does not have all entries filled, increment the\n\t\t\t# number filled since we are adding one\n\t\t\tself.vals.append(val)\n\t\t\tself.cnt = self.cnt + 1\n\n\t\tself.val += self.vals[self.lastEntry]\n\t\tself.lastEntry = (self.lastEntry + 1)%self.len\n\n\t\treturn self.val/self.cnt\n\n\t#-------------------------------------------------------------------------#\n\tdef getVals(self):\n\t#-------------------------------------------------------------------------#\n\t\treturn ', '.join([str(x) for x in self.vals])\n\n\nclass LpfVec3():\n\tdef __init__(self, val, length):\n\t\tself.lpfx = Lpf(val.getX(), length)\n\t\tself.lpfy = Lpf(val.getY(), length)\n\t\tself.lpfz = Lpf(val.getZ(), length)\n\n\tdef filter(self, val):\n\t\treturn Vec3(self.lpfx.filter(val.getX()),\n\t\t\t\t\tself.lpfy.filter(val.getY()),\n\t\t\t\t\tself.lpfz.filter(val.getZ()))\n\n\n\nclass Hysteresis():\n\tdef __init__(self, lb, hb):\n\t\tself.lb = lb\n\t\tself.hb = hb\n\n\t\tself.STATE_ABOVE_HB = 0\n\t\tself.STATE_BELOW_HB_ABOVE_LB = 1\n\t\tself.STATE_BELOW_LB = 2\n\n\t\tself.state = self.STATE_ABOVE_HB\n\n\t\tself.minVal = float(\"inf\")\n\n\tdef filter(self, val):\n\t\t\n\t\tnextState = self.state\n\t\trVal = val\n\n\t\tif(self.state == self.STATE_ABOVE_HB):\n\t\t\tif(val < self.lb):\n\t\t\t\tnextState = self.STATE_BELOW_LB\n\t\t\t\trVal = val\n\t\t\telif(val < self.hb):\n\t\t\t\tnextState = self.STATE_BELOW_HB_ABOVE_LB\n\t\t\t\trVal = val\n\n\t\telif(self.state == self.STATE_BELOW_HB_ABOVE_LB):\n\t\t\tif(val < self.lb):\n\t\t\t\tnextState = self.STATE_BELOW_LB\n\t\t\t\tif(val < self.minVal):\n\t\t\t\t\tself.minVal = val\n\t\t\t\trVal = self.minVal\n\n\t\telif(self.state == self.STATE_BELOW_LB):\n\t\t\tif(val > self.hb):\n\t\t\t\tnextState = self.STATE_ABOVE_HB\n\t\t\t\trVal = val\n\t\t\telif(val < self.lb):\n\t\t\t\tif(val < self.minVal):\n\t\t\t\t\tself.minVal = val\n\t\t\t\trVal = self.minVal\n\t\t\telse:\n\t\t\t\trVal = self.minVal\n\n\t\tself.state = nextState\n\t\treturn max(rVal,0)\n\n\tdef getState(self):\n\t\treturn self.state\n\n\n\n", "sub_path": "filter.py", "file_name": "filter.py", "file_ext": "py", "file_size_in_byte": 2579, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "panda3d.core.Vec3", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "38474257", "text": "# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\n# pylint: disable=arguments-renamed\n\nimport logging\nfrom typing import Optional\n\nfrom azure.ai.ml._restclient.v2022_05_01.models import ContainerResourceSettings\nfrom azure.ai.ml.entities._mixins import RestTranslatableMixin\n\nmodule_logger = logging.getLogger(__name__)\n\n\nclass ResourceSettings(RestTranslatableMixin):\n def __init__(self, cpu: Optional[str] = None, memory: Optional[str] = None, gpu: Optional[str] = None):\n self.cpu = cpu\n self.memory = memory\n self.gpu = gpu\n\n def _to_rest_object(self) -> ContainerResourceSettings:\n return ContainerResourceSettings(cpu=self.cpu, memory=self.memory, gpu=self.gpu)\n\n @classmethod\n def _from_rest_object(cls, settings: ContainerResourceSettings) -> \"ResourceSettings\":\n return (\n ResourceSettings(\n cpu=settings.cpu,\n memory=settings.memory,\n gpu=settings.gpu,\n )\n if settings\n else None\n )\n\n def _merge_with(self, other: \"ResourceSettings\") -> None:\n if other:\n self.cpu = other.cpu or self.cpu\n self.memory = other.memory or self.memory\n self.gpu = other.gpu or self.gpu\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, ResourceSettings):\n return NotImplemented\n if not other:\n return False\n # only compare mutable fields\n return self.cpu == other.cpu and self.memory == other.memory and self.gpu == other.gpu\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n", "sub_path": "sdk/ml/azure-ai-ml/azure/ai/ml/entities/_deployment/container_resource_settings.py", "file_name": "container_resource_settings.py", "file_ext": "py", "file_size_in_byte": 1788, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "azure.ai.ml.entities._mixins.RestTranslatableMixin", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 17, "usage_type": "name"}, {"api_name": "azure.ai.ml._restclient.v2022_05_01.models.ContainerResourceSettings", "line_number": 23, "usage_type": "call"}, {"api_name": "azure.ai.ml._restclient.v2022_05_01.models.ContainerResourceSettings", "line_number": 22, "usage_type": "name"}, {"api_name": "azure.ai.ml._restclient.v2022_05_01.models.ContainerResourceSettings", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "610511458", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 20 16:24:21 2017\n\n@author: bmyers\n\"\"\"\n#Websites and example code I used to figure this out:\n#http://www.pythonforbeginners.com/regex/regular-expressions-in-python\n#https://stackoverflow.com/questions/23277268/parse-date-strings\n#https://stackoverflow.com/questions/14441754/scatter-plot-of-dates-and-times\n#http://www.u.arizona.edu/~erdmann/mse350/topics/plotting_with_pylab.html\n#https://stackoverflow.com/questions/35839529/count-of-days-of-each-month-from-a-list-of-dates-python\n#https://stackoverflow.com/questions/34py96518/python-using-a-dictionary-to-count-the-items-in-a-list\n#https://stackoverflow.com/questions/3486121/how-to-plot-data-against-specific-dates-on-the-x-axis-using-matplotlib?rq=1\n#https://stackoverflow.com/questions/9847213/how-do-i-get-the-day-of-week-given-a-date-in-python\n\nimport csv\nimport itertools\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.dates import WeekdayLocator\nimport re\nimport datetime as dt\nimport dateutil\nfrom dateutil.parser import *\nfrom datetime import *\nimport pylab\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nfrom plotly.graph_objs import *\nimport collections\nfrom collections import Counter\n\n#-----------------------------------------------\n#open file, create csv reader\n#-----------------------------------------------\nwith open(\"received final.csv\", \"r\", encoding=\"utf8\") as csvfile:\n reader = csv.reader(csvfile)\n \n#-----------------------------------------------\n#create list to store dates\n#-----------------------------------------------\n dates = [] \n \n#-----------------------------------------------\n#iterate through each row and field in the csv: the indentation of this section continues\n#-----------------------------------------------\n for row in reader:\n for field in row:\n \n#-----------------------------------------------\n#change text dates to datetime objects\n#----------------------------------------------- \n form_pattern = re.compile(\"^\\d{1,2}\\/\\d{1,2}\\/\\d{4}\") #look for this date pattern DD/MM/YYYY\n matches = re.findall(form_pattern, field) \n if matches: #if matches is not empty\n dates.append(matches)\n \ndates = list(itertools.chain(*dates)) #flatten list\n\nx = 0 #incrementing the for loop below\ndatetimes = [] #empty list for datetime objects\n\nfor i in dates: \n datetimes.append(datetime.strptime(str(dates[x]), '%m/%d/%Y')) #look for dates and convert\n x+=1\n \ndateslist = Counter(datetimes) #Counter counts the frequency of each date and put into dateslist dictionary in the format - datetime: key\n#-----------------------------------------------\n#plot received email dates for academic year\n#-----------------------------------------------\nx = [] #blank lists for axis\ny = []\n\nfor freq in dateslist.values(): #get the dictionary values (frequency counts) for the y axis\n y.append(freq)\nfor dates in dateslist.keys(): #get the dictionary keys (the dates) for the x axis\n x.append(dates) \n \nfig = plt.figure(figsize=(10, 5), dpi=150) #can change size of plot\ngraph = fig.add_subplot(111)\ngraph.plot_date(x,y,'g')\nax = fig.gca()\nax.set_xticklabels(['October', 'November', 'December', 'January', 'February', 'March', 'April', 'May', 'June'], fontsize=8)\nplt.ylabel('Number of emails')\nplt.title('Email distribution through 2016-17 school year')\n#plt.savefig('foo.png') #uncomment to save image to directory - can change filename\nplt.show()\n\n\n#-----------------------------------------------\n#plot received email dates per quarter ticks per week\n#-----------------------------------------------\n\n#fall\nfall = []\n\nfall_start_date = datetime(2016, 9, 19, 0, 0)\nfall_end_date = datetime(2017, 1, 8, 0, 0)\n\nfor i in datetimes:\n if i >= fall_start_date and i <= fall_end_date:\n fall.append(i)\n \nfall = Counter(fall)\n\nfallx = [] #blank lists for axis\nfally = []\n\nfor freq in fall.values(): #get the dictionary values (frequency counts) for the y axis\n fally.append(freq)\nfor dates in fall.keys(): #get the dictionary keys (the dates) for the x axis\n fallx.append(dates) \n \nfig = plt.figure(figsize=(15, 5), dpi=150) #can change size of plot\ngraph = fig.add_subplot(111)\nax = fig.gca()\nax.set_xticklabels(['September\\n15', 'September\\n29', 'October\\n13', 'October\\n27', 'November\\n10', 'November\\n24', 'December\\n8', 'December\\n22', 'January\\n5'], fontsize=8)\nplt.yticks(np.arange(min(fally), max(fally)+1, 1.0))\ngraph.plot_date(fallx,fally,color='#3B9AED', linestyle='solid', marker='o')\nplt.ylabel('Number of emails')\nplt.title('Fall quarter 2016')\n\nplt.show()\n\n#winter\nwinter = []\n\nwinter_start_date = datetime(2017, 1, 9, 0, 0)\nwinter_end_date = datetime(2017, 4, 2, 0, 0)\n\nfor i in datetimes:\n if i >= winter_start_date and i <= winter_end_date:\n winter.append(i)\n \nwinter = Counter(winter)\n\nwinterx = [] #blank lists for axis\nwintery = []\n\nfor freq in winter.values(): #get the dictionary values (frequency counts) for the y axis\n wintery.append(freq)\nfor dates in winter.keys(): #get the dictionary keys (the dates) for the x axis\n winterx.append(dates) \n \nfig = plt.figure(figsize=(10, 5), dpi=150) #can change size of plot\ngraph = fig.add_subplot(111)\nax = fig.gca()\nax.set_xticklabels(['January\\n11', 'January\\n25', 'February\\n8', 'February\\n22', 'March\\n8', 'March\\n22'], fontsize=8)\nplt.yticks(np.arange(min(fally), max(fally)+1, 1.0))\ngraph.plot_date(winterx,wintery,color='#3B9AED', linestyle='solid', marker='o')\nplt.ylabel('Number of emails')\nplt.title('Winter quarter 2017')\n\nplt.show()\n\n#spring\n\nspring = []\n\nspring_start_date = datetime(2017, 4, 3, 0, 0)\nspring_end_date = datetime(2017, 6, 17, 0, 0)\n\nfor i in datetimes:\n if i >= spring_start_date and i <= spring_end_date:\n spring.append(i)\n \nspring = Counter(spring)\n\nspringx = [] #blank lists for axis\nspringy = []\n\nfor freq in spring.values(): #get the dictionary values (frequency counts) for the y axis\n springy.append(freq)\nfor dates in spring.keys(): #get the dictionary keys (the dates) for the x axis\n springx.append(dates) \n \nfig = plt.figure(figsize=(10, 5), dpi=150) #can change size of plot\ngraph = fig.add_subplot(111)\nax = fig.gca()\nax.set_xticklabels(['April\\n2', 'April\\n16', 'April\\n30', 'May\\n14', 'May\\n28', 'June\\n11'], fontsize=8)\nplt.yticks(np.arange(min(fally), max(fally)+1, 1.0))\ngraph.plot_date(springx,springy,color='#3B9AED', linestyle='solid', marker='o')\nplt.ylabel('Number of emails')\nplt.title('Spring quarter 2017')\n\nplt.show()\n\n\n#-----------------------------------------------\n#pie chart number of days in the week\n#-----------------------------------------------\n\nsundays = []\nmondays = []\ntuesdays=[]\nwednesdays=[]\nthursdays=[]\nfridays=[]\nsaturdays=[]\n\nfor i in datetimes:\n if i.weekday() == 6:\n sundays.append(i)\n elif i.weekday() == 0:\n mondays.append(i)\n elif i.weekday() == 1:\n tuesdays.append(i)\n elif i.weekday() == 2:\n wednesdays.append(i)\n elif i.weekday() == 3:\n thursdays.append(i)\n elif i.weekday() == 4:\n fridays.append(i)\n elif i.weekday() == 5:\n saturdays.append(i)\n \nsunday_counts = len(sundays)\nmonday_counts = len(mondays)\ntuesday_counts = len(tuesdays)\nwednesday_counts = len(wednesdays)\nthursday_counts = len(thursdays)\nfriday_counts = len(fridays)\nsaturday_counts = len(saturdays)\n\n#show pie chart\nlabels = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']\ncounts = [sunday_counts, monday_counts, tuesday_counts, wednesday_counts, thursday_counts, friday_counts, saturday_counts]\nsizes = [sunday_counts, monday_counts, tuesday_counts, wednesday_counts, thursday_counts, friday_counts, saturday_counts]\ncolors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue', 'coral', 'blue', 'orange']\n\nplt.pie(sizes, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=140, pctdistance=0.8)\nplt.axis('equal')\nfig = plt.gcf()\nfig.set_size_inches(6,6)\nplt.suptitle('2016-2017 weekday distribution', fontsize=24)\nplt.show()", "sub_path": "processReceivedMail.py", "file_name": "processReceivedMail.py", "file_ext": "py", "file_size_in_byte": 8185, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "csv.reader", "line_number": 39, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 55, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 56, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 66, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pie", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}]} +{"seq_id": "405665309", "text": "# This file is a part of pyctr.\n#\n# Copyright (c) 2017-2021 Ian Burgwin\n# This file is licensed under The MIT License (MIT).\n# You can find the full license text in LICENSE in the root of this project.\n\nfrom os import PathLike\nfrom threading import Lock\nfrom typing import TYPE_CHECKING\n\nfrom ..common import PyCTRError\nfrom ..crypto import CryptoEngine\nfrom ..util import readle\nfrom .base.typereader import TypeReaderCryptoBase\n\nif TYPE_CHECKING:\n from typing import BinaryIO, Union\n\nNAND_MEDIA_UNIT = 0x200\n\n\nclass NANDError(PyCTRError):\n \"\"\"Generic error for NAND operations.\"\"\"\n\n\nclass InvalidNANDError(NANDError):\n \"\"\"Invalid NAND header exception.\"\"\"\n\n\nclass NAND(TypeReaderCryptoBase):\n def __init__(self, file: 'Union[PathLike, str, bytes, BinaryIO]', *, closefd: bool = True,\n crypto: CryptoEngine = None, dev: bool = False, otp: bytes = None,\n otp_file: 'Union[PathLike, str, bytes]' = None):\n super().__init__(file=file, closefd=closefd, crypto=crypto, dev=dev)\n\n self._lock = Lock()\n\n # set up otp if it was provided\n # otherwise it has to be in essential.exefs or set up manually with a custom CryptoEngine object\n if otp:\n self._crypto.setup_keys_from_otp(otp)\n elif otp_file:\n self._crypto.setup_keys_from_otp_file(otp_file)\n\n # ignore the signature, we don't need it\n self._file.seek(0x100, 1)\n header = self._file.read(0x100)\n if header[0:4] != b'NCSD':\n raise InvalidNANDError('NCSD magic not found')\n\n # make sure the Media ID is all zeros, since anything else makes it a CCI\n media_id = header[0x8:0x10]\n if media_id != b'\\0' * 8:\n raise InvalidNANDError('Not a NAND, this is a CCI')\n", "sub_path": "pyctr/type/nand.py", "file_name": "nand.py", "file_ext": "py", "file_size_in_byte": 1784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 16, "usage_type": "name"}, {"api_name": "common.PyCTRError", "line_number": 22, "usage_type": "name"}, {"api_name": "base.typereader.TypeReaderCryptoBase", "line_number": 30, "usage_type": "name"}, {"api_name": "crypto.CryptoEngine", "line_number": 32, "usage_type": "name"}, {"api_name": "threading.Lock", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "348415432", "text": "import os\nimport sys\nfrom bs4 import BeautifulSoup\nfrom lxml import etree\nimport requests\nimport csv\nimport re\n\n#incase your connection keeps getting interrupted\nSTARTNEW = True\n\ndef setAction(whatAction):\n\treturn 'action='+whatAction+'&'\n\ndef setFormat(whatFormat):\n\treturn 'format='+whatFormat+'&'\n\ndef searchFor(searchTerms, limit):\n\treturn 'search='+searchTerms+'&limit='+limit+'&'\n\ndef setProp(whatProp):\n\treturn 'prop='+whatProp+'&'\n\ndef titles(whatTitles):\n\tlistOfTitles = ''\n\tfor title in whatTitles:\n\t\tlistOfTitles += title+\"|\"\n\treturn 'titles='+listOfTitles[:-1]+'&'\n\ndef getPage(url):\n\tpage = requests.get(url)\n\treturn page\n\ndef searchWikiURL(wikiURL, searchTerms, limit):\n\treturn wikiURL+setAction('opensearch')+setFormat('xml')+searchFor(searchTerms, limit)\n\ndef queryWikiURL(wikiURL, queryTerms):\n\treturn wikiURL+setAction('query')+setFormat('xml')+titles(queryTerms)\n\t\n\t\ndef pp(e):\n print(etree.tostring(e, pretty_print=True))\n print('')\n\ndef strip_ns(tree):\n for node in tree.iter():\n try:\n has_namespace = node.tag.startswith('{')\n except AttributeError:\n continue\n if has_namespace:\n node.tag = node.tag.split('}', 1)[1]\n\ndef stripString(str):\n\tnewStr = str\n\t#remove unwanted stuff\n\tnewStr = newStr.replace(\"\\xa0\",\"\")\n\tnewStr = newStr.replace(\"\\n\",\"\")\n\tnewStr = newStr.replace(\"\\u2009\",\"\")\n\t#remove anything after the )\n\tnewStr = re.sub(\"\\)(.*)\",\")\",newStr)\n\tnewStr = re.sub(\"\\[(\\d*)\\]\",\"\",newStr)\n\t#return stripped string\n\treturn newStr\n\ndef scrapePage(link, pageType):\n\n#scrape a page about a Name, Size, Location, Latitude, Longitude, Image, Type, Description\n\thtmlPage = getPage(link)\n\tmySoup = BeautifulSoup(htmlPage.content, \"html.parser\")\n\t#predefine data list\n\tscrapedData = [\"No name\",\"No size\",\"No location\",\"No longitude\", \"No latitude\", \"No image\",\"No Type\",\"No Description\"]\n\t#get the whole article header\n\tmtnName = mySoup.h1.string\n\tscrapedData[0] = mtnName\n\n\tif mtnName is None:\n\t\treturn []\n\n\tif \"Bridge\" in mtnName:\n\t\tpageType = \"Bridge\"\n\n\t#figure out what type it is\n\tmountainTags = ['Elevation']\n\tbridgeTags = ['Total length']\n\tbuildingTags = ['Architectural','Roof','Top Floor','Antenna spire','Height']\n\texcludeTags = ['Destroyed','Demolished','Founded']\n\tplannedTags = ['Proposed', 'Under construction','Approved']\n\n\tif pageType is \"Mountain\":\n\t\tfor i in range(len(mountainTags)):\n\t\t\telev = mySoup.find('th',string =mountainTags[i])\n\t\t\tif elev is not None:\n\t\t\t\t#scrape the data next in the table\n\t\t\t\tpartString = elev.find_next_sibling('td')\n\t\t\t\tif partString!=None:\n\t\t\t\t\tscrapedData[1] = stripString(partString.text)\n\t\t\t\t\tif len(scrapedData[1].strip())<=0:\n\t\t\t\t\t\t#no input for elevation\n\t\t\t\t\t\treturn []\n\t\t\t\telse:\n\t\t\t\t\treturn []\n\n\n\tif pageType is \"Bridge\":\n\t\tfor i in range(len(bridgeTags)):\n\t\t\telev = mySoup.find('th',string =bridgeTags[i])\n\t\t\tif elev is not None:\n\n\t\t\t\tpartString = elev.find_next_sibling('td')\n\t\t\t\tif partString!=None:\n\t\t\t\t\tscrapedData[1] = stripString(partString.text)\n\t\t\t\t\tif len(scrapedData[1].strip())<=0:\n\t\t\t\t\t\treturn []\n\t\t\t\telse:\n\t\t\t\t\treturn []\n\n\n\tif pageType is \"Building\":\n\n\t\t#exclude nonexistant buildings\n\t\tfor i in range(len(excludeTags)):\n\t\t\tif mySoup.find('th',string =excludeTags[i]) is not None:\n\t\t\t\treturn []\n\n\t\t#exlcude planned buildings\n\t\tstatus = mySoup.find('th',string ='Status')\n\t\tif status is not None:\n\t\t\tpartString = status.find_next_sibling('td')\n\t\t\tif partString is not None:\n\t\t\t\tfor i in range(len(plannedTags)):\n\t\t\t\t\tif plannedTags[i] in partString:\n\t\t\t\t\t\treturn []\n\n\n\t\telev = None\n\t\t#find the tag with height information\n\t\tfor i in range(len(buildingTags)):\n\t\t\tif elev is None:\n\t\t\t\telev = mySoup.find('th',string =buildingTags[i])\n\n\t\tif elev is None:\n\t\t\t#if no height then its not a building\n\t\t\treturn []\n\t\telse:\n\t\t\t#scrape the data next in the table\n\t\t\tpartString = elev.find_next_sibling('td')\n\t\t\tif partString!=None:\n\t\t\t\tscrapedData[1] = stripString(partString.text)\n\t\t\t\tif len(scrapedData[1].strip())<=0:\n\t\t\t\t\t#no input for elevation\n\t\t\t\t\treturn []\n\t\t\telse:\n\t\t\t\treturn []\n\n\tif pageType is None:\n\t\treturn []\n\n\tif \"No size\" in scrapedData[1]:\n\t\treturn []\n\n\tpara = mySoup.find('p')\n\n\twhile para != None:\n\n\t\tif re.sub(\"\\W\",\"\",mtnName) in re.sub(\"\\W\",\"\",para.text):\n\t\t\tscrapedData[7] = re.sub(\"\\[(\\d*)\\]\",\"\",para.text)\n\t\t\tbreak\n\t\tpara = para.find_next_sibling('p')\n\n\t#find the tag with location\n\tloc = mySoup.find('th',string ='Location')\n\tif loc is not None:\n\t\t#scrape the data next in the table\n\t\tif loc.find_next_sibling('td') is not None:\n\t\t\tscrapedData[2] = stripString(loc.find_next_sibling('td').text)\n\telse:\n\t\tloc = mySoup.find('th',string ='Locale')\n\t\tif loc is not None:\n\t\t\tif loc.find_next_sibling('td') is not None:\n\t\t\t\tscrapedData[2] = stripString(loc.find_next_sibling('td').text)\n\n\t#find the tag with latitude\n\tlat = mySoup.find(class_ ='latitude')\n\tif lat is not None:\n\t\t#scrape the text data from it\n\t\tscrapedData[3] = lat.text\n\n\t#find the tag with longitude\n\tlon = mySoup.find(class_ ='longitude')\n\tif lon is not None:\n\t\t#scrape the text data from it\n\t\tscrapedData[4] = lon.text\n\n\t#find the og:image property\n\timage = mySoup.find(property = 'og:image')\n\tif image is not None:\n\t\t#scrape the content data from it\n\t\tscrapedData[5] = image['content']\n\n\tscrapedData[6] = pageType\n\n\treturn scrapedData\n\ndef getLinksFromPage(link):\n\tlinkList = []\n\twiki = \"https://en.wikipedia.org/w/api.php?\"\n\twikiURL = queryWikiURL(wiki, link)+setProp('links')+'pllimit=500'\n\tapiContinue = \" \"\n\n\twhile (len(apiContinue)>0):\n\n\t\tprint(\"Visit \" + wikiURL)\n\t\trawPage = getPage(wikiURL)\n\t\troot = etree.fromstring(rawPage.content)\n\t\tstrip_ns(root)\n\n\t\tapiContinue = root.xpath('/api/continue/@plcontinue')\n\t\tlinks = root.xpath('/api/query/pages/page/links/pl/@title')\n\n\t\tfor i in range(0,len(links)):\n\t\t\t#add page name to list\n\t\t\tif 'Geography of' not in links[i]:\n\t\t\t\tlinkList.append(links[i])\n\n\t\tif(len(apiContinue)>0):\n\t\t\t#add continue api to link\n\t\t\twikiURL = queryWikiURL(wiki, link)+setProp('links')+'pllimit=500&plcontinue='+apiContinue[0]\n\n\treturn linkList\n\ndef getBridgeLinksFromPage(link):\n\tlinkList = []\n\twiki = \"https://en.wikipedia.org/w/api.php?\"\n\twikiURL = queryWikiURL(wiki, link)+setProp('links')+'pllimit=500'\n\tapiContinue = \" \"\n\tuniqueLinks = {}\n\n\twhile (len(apiContinue)>0):\n\n\t\tprint(\"Visit \" + wikiURL)\n\t\trawPage = getPage(wikiURL)\n\t\troot = etree.fromstring(rawPage.content)\n\t\tstrip_ns(root)\n\n\t\tapiContinue = root.xpath('/api/continue/@plcontinue')\n\t\tlinks = root.xpath('/api/query/pages/page/links/pl/@title')\n\n\t\tfor i in range(0,len(links)):\n\n\t\t\tif 'List of bridges in ' in links[i]:\n\t\t\t\tnewLinks = getLinksFromPage([links[i]])\n\n\t\t\t\tfor x in range(len(newLinks)):\n\t\t\t\t\tif \"Bridge\" in newLinks[x] and uniqueLinks.get(newLinks[x])==None:\n\t\t\t\t\t\tlinkList.append(newLinks[x])\n\t\t\t\t\t\tuniqueLinks[newLinks[x]]=1\n\t\t\telse:\n\t\t\t\tif \"Bridge\" in links[i] and uniqueLinks.get(links[i])==None:\n\t\t\t\t\tlinkList.append(links[i])\n\t\t\t\t\tuniqueLinks[links[i]]=1\n\n\t\tif(len(apiContinue)>0):\n\t\t\t#add continue api to link\n\t\t\twikiURL = queryWikiURL(wiki, link)+setProp('links')+'pllimit=500&plcontinue='+apiContinue[0]\n\n\treturn linkList\n\ndef main():\n\t#how many tuples to be put in the database\n\tmaxTuples = 10000\n\n\tmountainLinks = ['List of mountains by elevation']\n\tbridgeLinks = ['List_of_bridges']\n\tbuildingLinks = ['List_of_tallest_buildings_in_Asia','List_of_tallest_buildings_in_the_United_States','List_of_tallest_buildings_in_Europe','List_of_tallest_buildings_in_Oceania','List_of_tallest_buildings_in_South_America','List_of_tallest_buildings_in_Africa']\n\n\tlinkList = []\n\tuniqueNames = {}\n\n\tprint(\"Getting links\")\n\t\n\tpotentialMountains = getLinksFromPage(mountainLinks)\n\tpotentialBridges = getBridgeLinksFromPage(bridgeLinks)\n\tpotentialBuildings = getLinksFromPage(buildingLinks)\n\n\tlinkList = potentialMountains + potentialBridges + potentialBuildings\n\n\tmountainsSize = len(potentialMountains)\n\tbridgesSize = len(potentialBridges)\n\tbuildingsSize = len(potentialBuildings)\n\n\tprint(\"Potential links found \" + str(len(linkList)))\n\n\tstartIndex = 0;\n\n\tcurrentData = []\n\n\tif STARTNEW==False:\n\t\twith open('fullDatabase.csv') as f:\n\t\t\tcsvreader = csv.reader(f)\n\t\t\tfor row in csvreader:\n\t\t\t\tcurrentData.append(row)\n\t\t\t\tuniqueNames[row[0]]=1\n\t\t\t\tstartIndex = startIndex + 1\n\n\n\t#open database.csv\t\t\n\twith open('fullDatabase.csv','w') as f:\n\t\t#create csv writer object\n\t\tcsvwriter = csv.writer(f)\n\n\t\tprint(\"Getting data from individual pages\")\n\n\t\ttuplesAcquired = 0\n\t\tfor i in range(len(linkList)):\n\t\t\tif i>=startIndex:\n\t\t\t\t#stop if we have enough tuples\n\t\t\t\tif tuplesAcquired>=maxTuples:\n\t\t\t\t\t#stop when enough tuples added\n\t\t\t\t\ti = range(len(linkList))\n\t\t\t\telse:\n\n\t\t\t\t\tif \"List of\" not in linkList[i] and \"Template\" not in linkList[i] and \"Category\" not in linkList[i]:\n\t\t\t\t\t\t#create url\n\t\t\t\t\t\tpageURL = \"https://en.wikipedia.org/wiki/\"+linkList[i]\n\t\t\t\t\t\tprint(\"Scraping \"+pageURL)\n\n\t\t\t\t\t\tpageType = None\n\t\t\t\t\t\tif i < mountainsSize:\n\t\t\t\t\t\t\tpageType = \"Mountain\"\n\t\t\t\t\t\telif i < bridgesSize:\n\t\t\t\t\t\t\tpageType = \"Bridge\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpageType = \"Building\"\n\n\t\t\t\t\t\t#get scraped data in list form\n\t\t\t\t\t\tpageData = scrapePage(pageURL,pageType)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\tif len(pageData)>0:\n\t\t\t\t\t\t\tif uniqueNames.get(pageData[0])==None:\n\t\t\t\t\t\t\t\t#write whats in the array to the next empty row\n\t\t\t\t\t\t\t\ttuplesAcquired = tuplesAcquired + 1\n\t\t\t\t\t\t\t\tcsvwriter.writerow(pageData)\n\t\t\t\t\t\t\t\tuniqueNames[pageData[0]]=1\n\t\t\telse:\n\t\t\t\tcsvwriter.writerow(currentData[i])\n\t\t\t\tuniqueNames[currentData[i][0]]=1\n\n\tprint(\"Finished and made \"+str(tuplesAcquired)+\" tuples!\")\n\nif __name__ == '__main__':\n\tmain()\n", "sub_path": "landmarkAttractions/fullScraper.py", "file_name": "fullScraper.py", "file_ext": "py", "file_size_in_byte": 9510, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "lxml.etree.tostring", "line_number": 42, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 42, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 61, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 62, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 70, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 166, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 167, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 215, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 215, "usage_type": "name"}, {"api_name": "lxml.etree.fromstring", "line_number": 243, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 243, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 300, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 310, "usage_type": "call"}]} +{"seq_id": "233973576", "text": "from d2l import AllDeepLearning as d2l\nfrom mxnet import autograd, gluon, nd\nfrom mxnet import nd as np\nfrom mxnet.gluon import nn\nimport mxnet as mx\nimport sys\nfrom AI.AILearning.RecommenderSystems import MovieLens as loader\nfrom AI.AILearning.RecommenderSystems import MatrixFactorization as train_helper\n\n\n\"\"\"\n/////////// AutoRECT /////////////////// In AutoRec, instead of explicitly embedding \n users/items into low-dimensional space, it uses the column/row of the interaction matrix as the input, \n then reconstructs the interaction matrix in the output layer. h(R∗i)=f(W⋅g(VR∗i+μ)+b) Mini: arg_minW,V,μ,\n b∑i=1M∥R∗i−h(R∗i)∥2O+λ(∥W∥2F+∥V∥2F) /////////////////////// MODEL \n////////////////////////// \n A typical autoencoder consists of an encoder and a decoder. The encoder projects the input \n to hidden representations and the decoder maps the hidden layer to the reconstruction layer.\n \n\"\"\"\n\n\nclass AutoRec(nn.Block):\n def __init__(self, num_hidden, num_users, dropout_rate=0.05):\n super(AutoRec, self).__init__()\n self.encoder = gluon.nn.Dense(num_hidden, activation='sigmoid',\n use_bias=True)\n self.decoder = gluon.nn.Dense(num_users, use_bias=True)\n self.dropout_layer = gluon.nn.Dropout(dropout_rate)\n\n def forward(self, inputs):\n hidden = self.dropout_layer(self.encoder(inputs))\n pred = self.decoder(hidden)\n if autograd.is_training():\n return pred * nd.sign(inputs)\n else:\n return pred\n\n\ndef evaluator(net, inter_matrix, test_data, ctx):\n scores = []\n for values in inter_matrix:\n feat = gluon.utils.split_and_load(values, ctx, even_split=False)\n scores.extend([net(i).asnumpy() for i in feat])\n recons = nd.array([item for sublist in scores for item in sublist])\n rmse = nd.sqrt(nd.sum(nd.square(test_data - nd.sign(test_data) * recons)) /\n nd.sum(nd.sign(test_data)))\n return float(rmse.asscalar())\n\n\nctx = d2l.try_all_gpus()\ndf, num_users, num_items = loader.read_data_ml100k()\ntrain_data, test_data = loader.split_data_ml100k(df, num_users, num_items)\n_, _, _, train_inter_mat = loader.load_data_ml100k(train_data, num_users, num_items)\n_, _, _, test_inter_mat = loader.load_data_ml100k(test_data, num_users, num_items)\nnum_workers = 0 if sys.platform.startswith('win') else 4\ntrain_iter = gluon.data.DataLoader(train_inter_mat, shuffle=True,\n last_batch='rollover', batch_size=256,\n num_workers=num_workers)\ntest_iter = gluon.data.DataLoader(nd.array(train_inter_mat), shuffle=False,\n last_batch='keep', batch_size=1024,\n num_workers=num_workers)\nfor values in train_iter:\n print(values)\n break\n\nnet = AutoRec(500, num_users)\nnet.initialize(ctx=ctx, force_reinit=True,\n init=mx.init.Normal(0.01))\nlr, num_epochs, wd, optimizer = 0.002, 25, 1e-5, 'adam'\nloss = gluon.loss.L2Loss()\ntrainer = gluon.Trainer(net.collect_params(), optimizer, {'learning_rate': lr, 'wd': wd})\ntrain_helper.train_recsys_rating(net, train_iter, test_iter, loss, trainer,\n num_epochs, ctx, evaluator, inter_mat=test_inter_mat)\nd2l.plt.show()\n\n\"\"\" ///////////// SUMMARY ////////////////////////\n We can frame the matrix factorization algorithm with autoencoders,\n while integrating non-linear layers and dropout regularization.\n\n Experiments on the MovieLens 100K dataset show that AutoRec achieves superior performance than matrix factorization.\n\"\"\"\n", "sub_path": "AILearning/RecommenderSystems/AutoRecAutoencoders.py", "file_name": "AutoRecAutoencoders.py", "file_ext": "py", "file_size_in_byte": 3745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "mxnet.gluon.nn.Block", "line_number": 23, "usage_type": "attribute"}, {"api_name": "mxnet.gluon.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.Dense", "line_number": 26, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 26, "usage_type": "attribute"}, {"api_name": "mxnet.gluon", "line_number": 26, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.Dense", "line_number": 28, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 28, "usage_type": "attribute"}, {"api_name": "mxnet.gluon", "line_number": 28, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.Dropout", "line_number": 29, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 29, "usage_type": "attribute"}, {"api_name": "mxnet.gluon", "line_number": 29, "usage_type": "name"}, {"api_name": "mxnet.autograd.is_training", "line_number": 34, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 34, "usage_type": "name"}, {"api_name": "mxnet.nd.sign", "line_number": 35, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 35, "usage_type": "name"}, {"api_name": "mxnet.gluon.utils.split_and_load", "line_number": 43, "usage_type": "call"}, {"api_name": "mxnet.gluon.utils", "line_number": 43, "usage_type": "attribute"}, {"api_name": "mxnet.gluon", "line_number": 43, "usage_type": "name"}, {"api_name": "mxnet.nd.array", "line_number": 45, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 45, "usage_type": "name"}, {"api_name": "mxnet.nd.sqrt", "line_number": 46, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 46, "usage_type": "name"}, {"api_name": "mxnet.nd.sum", "line_number": 46, "usage_type": "call"}, {"api_name": "mxnet.nd.square", "line_number": 46, "usage_type": "call"}, {"api_name": "mxnet.nd.sign", "line_number": 46, "usage_type": "call"}, {"api_name": "mxnet.nd.sum", "line_number": 47, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 47, "usage_type": "name"}, {"api_name": "mxnet.nd.sign", "line_number": 47, "usage_type": "call"}, {"api_name": "d2l.AllDeepLearning.try_all_gpus", "line_number": 51, "usage_type": "call"}, {"api_name": "d2l.AllDeepLearning", "line_number": 51, "usage_type": "name"}, {"api_name": "AI.AILearning.RecommenderSystems.MovieLens.read_data_ml100k", "line_number": 52, "usage_type": "call"}, {"api_name": "AI.AILearning.RecommenderSystems.MovieLens", "line_number": 52, "usage_type": "name"}, {"api_name": "AI.AILearning.RecommenderSystems.MovieLens.split_data_ml100k", "line_number": 53, "usage_type": "call"}, {"api_name": "AI.AILearning.RecommenderSystems.MovieLens", "line_number": 53, "usage_type": "name"}, {"api_name": "AI.AILearning.RecommenderSystems.MovieLens.load_data_ml100k", "line_number": 54, "usage_type": "call"}, {"api_name": "AI.AILearning.RecommenderSystems.MovieLens", "line_number": 54, "usage_type": "name"}, {"api_name": "AI.AILearning.RecommenderSystems.MovieLens.load_data_ml100k", "line_number": 55, "usage_type": "call"}, {"api_name": "AI.AILearning.RecommenderSystems.MovieLens", "line_number": 55, "usage_type": "name"}, {"api_name": "sys.platform.startswith", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 56, "usage_type": "attribute"}, {"api_name": "mxnet.gluon.data.DataLoader", "line_number": 57, "usage_type": "call"}, {"api_name": "mxnet.gluon.data", "line_number": 57, "usage_type": "attribute"}, {"api_name": "mxnet.gluon", "line_number": 57, "usage_type": "name"}, {"api_name": "mxnet.gluon.data.DataLoader", "line_number": 60, "usage_type": "call"}, {"api_name": "mxnet.gluon.data", "line_number": 60, "usage_type": "attribute"}, {"api_name": "mxnet.gluon", "line_number": 60, "usage_type": "name"}, {"api_name": "mxnet.nd.array", "line_number": 60, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 60, "usage_type": "name"}, {"api_name": "mxnet.init.Normal", "line_number": 69, "usage_type": "call"}, {"api_name": "mxnet.init", "line_number": 69, "usage_type": "attribute"}, {"api_name": "mxnet.gluon.loss.L2Loss", "line_number": 71, "usage_type": "call"}, {"api_name": "mxnet.gluon.loss", "line_number": 71, "usage_type": "attribute"}, {"api_name": "mxnet.gluon", "line_number": 71, "usage_type": "name"}, {"api_name": "mxnet.gluon.Trainer", "line_number": 72, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 72, "usage_type": "name"}, {"api_name": "AI.AILearning.RecommenderSystems.MatrixFactorization.train_recsys_rating", "line_number": 73, "usage_type": "call"}, {"api_name": "AI.AILearning.RecommenderSystems.MatrixFactorization", "line_number": 73, "usage_type": "name"}, {"api_name": "d2l.AllDeepLearning.plt.show", "line_number": 75, "usage_type": "call"}, {"api_name": "d2l.AllDeepLearning.plt", "line_number": 75, "usage_type": "attribute"}, {"api_name": "d2l.AllDeepLearning", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "651804889", "text": "##default import\nfrom django.core import serializers\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.utils import timezone\nimport datetime\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import transaction\nimport json\nfrom django.forms.models import model_to_dict\n\n\n##model import\nfrom graduateProject.models import *\n\n##form import\nfrom graduateProject.forms import PostForm, LiveinterviewForm\nfrom graduateProject.forms import *\n\n# Create your views here.\ndef index(request):\n active = \"index\"\n return render(request, 'graduateProject/index/index.html', {'active':active})\n\n\n\n##matching views\n####question request views\ndef questionRequest(request):\n active = \"matching\"\n try:\n fields = BaseCode.objects.filter(h_category__exact=\"001\").filter(m_category__exact=\"001\")\n tasktypes = BaseCode.objects.filter(h_category__exact=\"001\").filter(m_category__exact=\"002\")\n lifestyles = BaseCode.objects.filter(h_category__exact=\"001\").filter(m_category__exact=\"003\")\n questionRequests = QuestionRequest.objects.all().order_by('-pk').values('id', 'content', 'user_id')[:6]\n for index in questionRequests:\n index['questionRequestAnswers']=(QuestionRequestAnswerModel.objects.filter(request_no__exact=index['id']).select_related('answer_user').values('id', 'answer_user__last_name', 'amount', 'select_yn'))\n return render(request, 'graduateProject/matching/question_request.html', {'active':active, 'fields':fields, 'tasktypes':tasktypes, 'lifestyles':lifestyles, 'questionRequests':questionRequests})\n except Exception as ex:\n print('error occured :', ex)\n\ndef questionRequestCheckboxView(request):\n try:\n if request.method == \"POST\":\n fields = request.POST.getlist('fields[]')\n tasktypes = request.POST.getlist('tasktypes[]')\n lifestyles = request.POST.getlist('lifestyles[]')\n\n\n fieldQueryString = '['\n tasktypeQueryString = '['\n lifestyleQueryString = '['\n\n index = 0\n for field in fields:\n if index == len(fields)-1:\n fieldQueryString += '\"' + field[15:18] + '\"'\n else:\n fieldQueryString += '\"' + field[15:18] + '\",'\n index += 1\n\n index=0\n for tasktype in tasktypes:\n if index == len(tasktypes) - 1:\n tasktypeQueryString += '\"' + tasktype[19:22] + '\"'\n else:\n tasktypeQueryString += '\"' + tasktype[19:22] + '\",'\n index += 1\n\n index=0\n for lifestyle in lifestyles:\n if index == len(lifestyles) - 1:\n lifestyleQueryString += '\"' + lifestyle[20:23] + '\"'\n else:\n lifestyleQueryString += '\"' + lifestyle[20:23] + '\",'\n index += 1\n\n fieldQueryString += ']'\n tasktypeQueryString += ']'\n lifestyleQueryString += ']'\n\n fieldQueryString = json.loads(fieldQueryString)\n tasktypeQueryString = json.loads(tasktypeQueryString)\n lifestyleQueryString = json.loads(lifestyleQueryString)\n\n questionRequestCategories = QuestionRequestCategory.objects.filter(cat_m_category__exact='001').filter(cat_key__in=fieldQueryString) | \\\n QuestionRequestCategory.objects.filter(cat_m_category__exact='002').filter(cat_key__in=tasktypeQueryString) | \\\n QuestionRequestCategory.objects.filter(cat_m_category__exact='003').filter(cat_key__in=lifestyleQueryString)\n\n questionIdList = []\n for questionRequestCategory in questionRequestCategories:\n questionIdList.append(questionRequestCategory.request_no)\n\n index=0\n questionQueryString = '['\n for questionId in questionIdList:\n if index == len(questionIdList)-1:\n questionQueryString += '\"' + str(questionId.id) + '\"'\n else:\n questionQueryString += '\"' + str(questionId.id) + '\",'\n index += 1\n\n questionQueryString += ']'\n questionQueryString = json.loads(questionQueryString)\n\n questionRequestList = QuestionRequest.objects.filter(id__in = questionQueryString).order_by('-pk').values('id', 'content', 'user_id')\n\n for index in questionRequestList:\n answerQuerySet = QuestionRequestAnswerModel.objects.filter(request_no__exact=index['id']).select_related(\n 'answer_user').values('id', 'answer_user__last_name', 'amount', 'select_yn')\n index['questionRequestAnswers'] = answerQuerySet\n\n jsonReturnObject = list(questionRequestList)\n\n\n tmpList = []\n for questionRequest in questionRequestList:\n tmpDict = {}\n tmpDict['id'] = questionRequest['id']\n tmpDict['content'] = questionRequest['content']\n tmpDict['user_id'] = questionRequest['user_id']\n\n answerList = []\n for answer in questionRequest['questionRequestAnswers']:\n answerDict = {}\n answerDict['id'] = answer['id']\n answerDict['last_name'] = answer['answer_user__last_name']\n answerDict['amount'] = answer['amount']\n answerDict['select_yn'] = answer['select_yn']\n answerList.append(answerDict)\n tmpDict['questionRequestAnswers'] = answerList\n tmpList.append(tmpDict)\n\n\n\n\n return HttpResponse(json.dumps(tmpList), content_type=\"application/json\")\n else:\n fields = ''\n tastypes = ''\n lifestyles = ''\n return httpResponse('fail')\n except Exception as ex:\n print('error occured :', ex)\n\n@login_required\n@transaction.atomic\ndef questionRequestWriteView(request):\n active = \"questionRequestWrite\"\n\n try:\n if request.method == 'POST':\n\n form = QuestionRequestForm(request.POST)\n if form.is_valid():\n user_id = form.data['user_id']\n write_date_browser = form.data['write_date_browser']\n now = datetime.datetime.now()\n write_date_webserver = now.strftime('%Y%m%d')\n content = form.cleaned_data['content']\n fields = request.POST.getlist('fields')\n tasktypes = request.POST.getlist('tasktypes')\n lifestyles = request.POST.getlist('lifestyles')\n\n user = User.objects.get(id = user_id)\n questionRequest = QuestionRequest(user_id=user, write_date_browser=write_date_browser, write_date_webserver=write_date_webserver, content=content)\n questionRequest.save()\n\n request_no = QuestionRequest.objects.order_by('-pk')[0].id\n questionRequest = QuestionRequest.objects.get(id = request_no)\n seq = 1\n for field in fields:\n questionRequestCategory = QuestionRequestCategory(request_no=questionRequest, seq=seq, cat_h_category='001', cat_m_category='001', cat_key=field)\n questionRequestCategory.save()\n seq=seq+1\n\n for tasktype in tasktypes:\n questionRequestCategory = QuestionRequestCategory(request_no=questionRequest, seq=seq, cat_h_category='001', cat_m_category='002', cat_key=tasktype)\n questionRequestCategory.save()\n seq=seq+1\n\n for lifestyle in lifestyles:\n questionRequestCategory = QuestionRequestCategory(request_no=questionRequest, seq=seq, cat_h_category='001', cat_m_category='003', cat_key=lifestyle)\n questionRequestCategory.save()\n seq=seq+1\n return redirect(reverse('questionRequest'))\n return(\"fail\")\n elif request.method == 'GET':\n form = QuestionRequestForm()\n return render(request, 'graduateProject/matching/question_request_write.html', {'form':form, 'active':active})\n except Exception as ex:\n print('error occured :', ex)\n\n@login_required\n@transaction.atomic\ndef questionRequestAnswerWriteView(request, id):\n active = 'questionRequestAnswerWrite'\n questionRequest = QuestionRequest.objects.filter(id__exact = id).values('id', 'content').first\n form = QuestionRequestAnswerForm()\n return render(request, 'graduateProject/matching/question_request_answer_write.html', {'form': form, 'active': active, 'questionRequest':questionRequest, 'request_no': id})\n\n@login_required\n@transaction.atomic\ndef questionRequestAnswerWriteResView(request):\n try :\n form = QuestionRequestAnswerForm(request.POST)\n if form.is_valid():\n request_no = form.data['request_no']\n seq = QuestionRequestAnswerModel.objects.filter(request_no__exact=request_no)\n if seq:\n seq = QuestionRequestAnswerModel.objects.filter(request_no__exact=request_no).order_by('-pk')[0].seq\n seq+1\n else:\n seq = 1\n answer_user = form.data['user_id']\n now = datetime.datetime.now()\n write_date = now.strftime('%Y%m%d')\n content = form.cleaned_data['content']\n currency = form.cleaned_data['currency']\n amount = form.cleaned_data['amount']\n\n questionRequest = QuestionRequest.objects.get(id = request_no)\n user = User.objects.get(id = answer_user)\n\n questionRequestAnswer = QuestionRequestAnswerModel(request_no=questionRequest, seq=seq, answer_user=user, write_date = write_date, content=content, currency=currency, amount=amount)\n questionRequestAnswer.save()\n return redirect(reverse('questionRequest'))\n return(\"fail\")\n except Exception as ex:\n print('error occured :', ex)\n\n@login_required\n@transaction.atomic\ndef questionRequestAnswerSelectView(request, questionRequestAnswerId, timestamp, id):\n try:\n ##answer variables\n selectUser = User.objects.get(id=id)\n answerUser = QuestionRequestAnswerModel.objects.get(id=questionRequestAnswerId).answer_user\n answer = QuestionRequestAnswerModel.objects.get(id=questionRequestAnswerId)\n\n if selectUser.credit 0:\n msg = \" the id is alerady exist.\"\n msg += \"\"\n else:\n msg = \" the id is avaiable.\"\n msg += \"\"\n\n return HttpResponse(msg)\n\n\ndef signup_result(request):\n if request.method == \"POST\":\n username = request.POST['username']\n password = request.POST['password']\n last_name = request.POST['last_name']\n # phone = request.POST['phone']\n phone = '01012431234'\n # email = request.POST['email']\n email = 'test1@test1.com'\n birth_year = request.POST['birth_year']\n birth_month = request.POST['birth_month']\n birth_day = request.POST['birth_day']\n\n try:\n if username and User.objects.filter(username__exact=username).count() == 0 :\n # date_of_birth = datetime(birth_year, birth_month, birth_day)\n user = User.objects.create_user(\n username,password,last_name, email, phone, '1990-01-01'\n )\n\n redirection_page = 'signup_completed'\n\n else:\n redirection_page = 'error'\n except Exception as ex:\n print('error occured :', ex)\n\n return redirect(redirection_page)\n\n\ndef signup_completed(request):\n return render(request, 'graduateProject/user/signup_completed.html')\n", "sub_path": "graduateProject/views/total_views.py", "file_name": "total_views.py", "file_ext": "py", "file_size_in_byte": 19234, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 82, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 83, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 84, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 104, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 137, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 158, "usage_type": "attribute"}, {"api_name": "django.shortcuts.redirect", "line_number": 186, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 190, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 146, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 147, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 147, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 200, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 194, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 195, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 195, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 216, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 216, "usage_type": "attribute"}, {"api_name": "django.shortcuts.redirect", "line_number": 227, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 202, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 203, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 203, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 242, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 244, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 244, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 270, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 270, "usage_type": "attribute"}, {"api_name": "django.shortcuts.redirect", "line_number": 359, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 232, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 233, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 233, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 371, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 364, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 378, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 396, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 426, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 430, "usage_type": "call"}]} +{"seq_id": "136559049", "text": "from logging import Formatter, getLogger, StreamHandler, DEBUG\nlogger = getLogger(__name__)\nhandler = StreamHandler()\nhandler.setLevel(DEBUG)\nhandler.setFormatter(Formatter(\"%(asctime)s- %(name)s - %(levelname)s - %(message)s\"))\nlogger.setLevel(DEBUG)\nlogger.addHandler(handler)\n\nimport json\nimport requests\nfrom collections import OrderedDict\n\nclass ImportHandler(object):\n\n _diff_exclude_keys = [\"id\", \"created_at\", \"updated_at\"]\n\n def __init__(self, model, contents):\n self.model = model\n self.model_name = model._meta.model_name\n self.model_fields = [field.name for field in model._meta.fields]\n try:\n self.unique_keys = model._meta.unique_together[0]\n except IndexError:\n self.unique_keys = [x for x in self.model_fields if x not in self._diff_exclude_keys]\n self.contents = [requests.structures.CaseInsensitiveDict(content) for content in contents]\n\n\n def update(self, expired=False):\n pre_all_content_ids = [ d[\"id\"] for d in self.model.objects.all().values(\"id\") ]\n updated_content_ids = list()\n\n for content in self.contents:\n new = self.model(**self._clean(content, cleaning=True))\n\n condition = { key: content[key] for key in self.unique_keys }\n query = self.model.objects.filter(**condition)\n\n if query.exists():\n old = query.get()\n self._update_content(old=old, new=new, dry=False)\n updated_content_ids.append(old.id)\n else:\n new.save()\n\n if expired:\n will_be_deleted_ids = set(pre_all_content_ids) - set(updated_content_ids)\n for id in will_be_deleted_ids:\n self.model.objects.get(id=id).delete()\n\n\n def _clean(self, content, cleaning=True):\n if cleaning:\n cleaned_content = dict()\n for field_name in self.model_fields:\n try:\n cleaned_content[field_name] = content[field_name]\n except KeyError:\n pass\n return cleaned_content\n else:\n return content\n\n\n def _update_content(self, old, new, dry=True):\n has_diff = False\n for field_name in [x for x in self.model_fields if x not in self._diff_exclude_keys]:\n val_old = str(getattr(old, field_name))\n val_new = str(getattr(new, field_name))\n if val_old != val_new:\n has_diff = True\n d = OrderedDict()\n d[\"model\"] = self.model_name\n d[\"id\"] = old.id\n d[\"unique_keys\"] = dict()\n for unique_key in self.unique_keys:\n d[\"unique_keys\"][unique_key] = getattr(old, unique_key)\n d[\"field_name\"] = field_name\n d[\"old\"] = val_old\n d[\"new\"] = val_new\n logger.info(\"[Diff] {0}\".format(json.dumps(d, ensure_ascii=False)))\n if not dry:\n setattr(old, field_name, val_new)\n if not dry:\n old.save()\n return has_diff\n", "sub_path": "import_handler.py", "file_name": "import_handler.py", "file_ext": "py", "file_size_in_byte": 3117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 2, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 3, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 4, "usage_type": "argument"}, {"api_name": "logging.Formatter", "line_number": 5, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 6, "usage_type": "argument"}, {"api_name": "requests.structures.CaseInsensitiveDict", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.structures", "line_number": 25, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 71, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "103645234", "text": "from PIL import Image, ImageGrab\nimport pyautogui\nfrom time import sleep\nimport mouse\nsleep(5)\nimport time\ntty = True\nwalk = True\nwhile tty:\n img = ImageGrab.grab( (422, 1306, 423, 1307) )\n img.save(\"D:/HTML/aim/screens/food.png\", \"BMP\")\n img = ImageGrab.grab( (319, 1307, 320, 1308) )\n img.save(\"D:/HTML/aim/screens/water.png\", \"BMP\")\n img = ImageGrab.grab( (281, 1385, 282, 1386) )\n img.save(\"D:/HTML/aim/screens/health.png\", \"BMP\")\n # img = ImageGrab.grab( (888, 652, 889, 653) )\n # img.save(\"D:/HTML/aim/screens/waterIcon.png\", \"BMP\")i\n # mouse.move(100, 100, absolute=False, duration=2)\n pyautogui.FAILSAFE = True\n pyautogui.PAUSE = 0.1\n #colorF = list(img.getdata())\n #print(\"\\ncolorFood\", colorF)\n #iprint(pyautogui.position())\n\n # #walk1\n # pyautogui.keyDown(\"w\")\n # sleep(1)\n # pyautogui.keyUp(\"w\")\n # sleep(0.2)\n # pyautogui.keyDown(\"s\")\n # sleep(1)\n # pyautogui.keyUp(\"s\")\n\n # food\n # col = Image.open('D:/HTML/aim/screens/food.png', 'r')\n # colorF = list(col.getdata())\n # print(\"\\ncolorFood\", colorF)\n\n # if colorF != [(249, 249, 249)]:\n # walk = False\n # pyautogui.press(\"Esc\")\n # print(\"FOOD IS RED\")\n # tty = False\n # break\n # else:\n # print(\"FOOD IS OK\")\n\n #walk2\n # pyautogui.keyDown(\"w\")\n # sleep(1)\n # pyautogui.keyUp(\"w\")\n # sleep(0.2)\n # pyautogui.keyDown(\"s\")\n # sleep(1)\n # pyautogui.keyUp(\"s\")\n\n #water\n # col = Image.open('D:/HTML/aim/screens/water.png', 'r')\n # colorW = list(col.getdata())\n # if colorW != [(246, 246, 246)]:\n # pyautogui.press('i')\n # pyautogui.moveTo(286, 858)\n # sleep(0.5)\n # pyautogui.click()\n # water = pyautogui.locateCenterOnScreen('D:/HTML/aim/screens/waterIcon.png')\n # pyautogui.moveTo(water)\n # pyautogui.click(clicks=2, interval=0.1)\n # sleep(5)\n # pyautogui.press('esc')\n # sleep(4)\n # if colorW == [(246, 246, 246)]:\n # print(\"WATER IS OK NOW\")\n # sleep(2)\n # #tty = False\n \n # else:\n # print(\"WATER IS OK\")\n\n #walk3\n # pyautogui.keyDown(\"w\")\n # sleep(1)\n # pyautogui.keyUp(\"w\")\n # sleep(0.2)\n # pyautogui.keyDown(\"s\")\n # sleep(1)\n # pyautogui.keyUp(\"s\")\n\n #health\n col = Image.open('D:/HTML/aim/screens/health.png', 'r')\n colorH = list(col.getdata())\n #print(colorH)\n\n if colorH != [(250, 250, 250)]:\n #print(\"STOP\\n\")\n pyautogui.press(\"Esc\")\n print(\"LOW HEALTH\")\n #break\n tty = False\n else:\n print(\"HEALTH IS OK\")\n", "sub_path": "aim.py", "file_name": "aim.py", "file_ext": "py", "file_size_in_byte": 2632, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "time.sleep", "line_number": 5, "usage_type": "call"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 10, "usage_type": "name"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 12, "usage_type": "name"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 14, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 14, "usage_type": "name"}, {"api_name": "pyautogui.FAILSAFE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pyautogui.PAUSE", "line_number": 20, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 89, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 89, "usage_type": "name"}, {"api_name": "pyautogui.press", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "540099648", "text": "\"\"\"projekt_webowy URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nimport hello.views\nimport kalkulator.views\nimport sklep.views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n path('hello', hello.views.say_hello),\n path('godzina', hello.views.ktora_godzina),\n path('godzina.html', hello.views.ktora_godzina_html),\n path('info', hello.views.info),\n path('dodawanie', hello.views.dodawanie),\n path('rozmowa', hello.views.rozmowa),\n path('rozmowa_post', hello.views.rozmowa_post),\n\n path('kalkulator', kalkulator.views.oblicz),\n path('produkty', sklep.views.lista_produktow),\n]\n", "sub_path": "projekt_webowy/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "hello.views.views", "line_number": 25, "usage_type": "attribute"}, {"api_name": "hello.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "hello.views.views", "line_number": 26, "usage_type": "attribute"}, {"api_name": "hello.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "hello.views.views", "line_number": 27, "usage_type": "attribute"}, {"api_name": "hello.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "hello.views.views", "line_number": 28, "usage_type": "attribute"}, {"api_name": "hello.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "hello.views.views", "line_number": 29, "usage_type": "attribute"}, {"api_name": "hello.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "hello.views.views", "line_number": 30, "usage_type": "attribute"}, {"api_name": "hello.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "hello.views.views", "line_number": 31, "usage_type": "attribute"}, {"api_name": "hello.views", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "kalkulator.views.views", "line_number": 33, "usage_type": "attribute"}, {"api_name": "kalkulator.views", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "sklep.views.views", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sklep.views", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "624076170", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport time\nfrom measures.models import Humidity, Temp, Volt\nfrom measures.notifications import notify_hum, notify_temp, notify_volt\n\n__author__ = 'magnusknutas'\n\n\nimport logging\nimport sys\nimport argparse\nimport coloredlogs\nfrom database import db_session\n\nlogger = logging.getLogger(__name__)\n\n\ndef run_serial():\n from time import sleep\n import serial\n try:\n ser = serial.Serial(\"/dev/ttyUSB0\", 115200) # Establish the connection on a specific port\n except OSError:\n logger.info(\"No arduino atached retrying in 10 sec\")\n time.sleep(10)\n run_serial()\n counter = 32 # Below 32 everything in ASCII is gibberish\n\n latest_hum = None\n latest_temp = None\n latest_volt = None\n\n while True:\n counter += 1\n ser.write(str(chr(counter))) # Convert the decimal number to ASCII then send it to the Arduino\n line = ser.readline() # Read the newest output from the Arduino\n if line.startswith('Humidity:'):\n data = line.split(':')\n if not latest_hum or latest_hum != data[1].strip():\n latest_hum = data[1].strip()\n logger.info(\"Humidity %s\" % latest_hum + \"%\")\n h = Humidity(latest_hum)\n db_session.add(h)\n db_session.commit()\n notify_hum()\n elif line.startswith('Temperature:'):\n data = line.split(':')\n if not latest_temp or latest_temp != data[1].strip():\n latest_temp = data[1].strip()\n logger.info(\"Temp %s\" % latest_temp + \"°C\")\n t = Temp(latest_temp)\n db_session.add(t)\n db_session.commit()\n notify_temp()\n elif line.startswith('Volt:'):\n data = line.split(':')\n if not latest_volt or latest_volt != data[1].strip():\n latest_volt = data[1].strip()\n logger.info(\"Volt %s\" % latest_volt)\n v = Volt(latest_volt)\n db_session.add(v)\n db_session.commit()\n notify_volt()\n\n sleep(.1) # Delay for one tenth of a second\n\n if counter == 255:\n counter = 32\n\n\ndef init_database():\n from database import init_db\n init_db()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Hobby KFU 570 project')\n parser.add_argument('command', metavar='C', type=str, nargs='+', help='Command to run')\n parser.add_argument('-l', '--log-level', action='store', type=str, dest='log_level', help='Log level', default='INFO')\n args = parser.parse_args()\n\n numeric_level = getattr(logging, args.log_level.upper(), None)\n if not isinstance(numeric_level, int):\n raise ValueError('Invalid log level: %s' % args.log_level)\n coloredlogs.install(level=numeric_level)\n logger.info('Log level set to %s', args.log_level)\n\n if 'init_db' in args.command:\n logger.info(\"Initiates DB\")\n init_database()\n\n if 'run_serial' in args.command:\n logger.info(\"running serial\")\n run_serial()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "measures.models.Humidity", "line_number": 43, "usage_type": "call"}, {"api_name": "database.db_session.add", "line_number": 44, "usage_type": "call"}, {"api_name": "database.db_session", "line_number": 44, "usage_type": "name"}, {"api_name": "database.db_session.commit", "line_number": 45, "usage_type": "call"}, {"api_name": "database.db_session", "line_number": 45, "usage_type": "name"}, {"api_name": "measures.notifications.notify_hum", "line_number": 46, "usage_type": "call"}, {"api_name": "measures.models.Temp", "line_number": 52, "usage_type": "call"}, {"api_name": "database.db_session.add", "line_number": 53, "usage_type": "call"}, {"api_name": "database.db_session", "line_number": 53, "usage_type": "name"}, {"api_name": "database.db_session.commit", "line_number": 54, "usage_type": "call"}, {"api_name": "database.db_session", "line_number": 54, "usage_type": "name"}, {"api_name": "measures.notifications.notify_temp", "line_number": 55, "usage_type": "call"}, {"api_name": "measures.models.Volt", "line_number": 61, "usage_type": "call"}, {"api_name": "database.db_session.add", "line_number": 62, "usage_type": "call"}, {"api_name": "database.db_session", "line_number": 62, "usage_type": "name"}, {"api_name": "database.db_session.commit", "line_number": 63, "usage_type": "call"}, {"api_name": "database.db_session", "line_number": 63, "usage_type": "name"}, {"api_name": "measures.notifications.notify_volt", "line_number": 64, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "call"}, {"api_name": "database.init_db", "line_number": 74, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 78, "usage_type": "call"}, {"api_name": "coloredlogs.install", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "156247515", "text": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, ESS LLP and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nimport json\nfrom frappe.utils import getdate, cint\nfrom frappe import _\nimport datetime\nfrom frappe.core.doctype.sms_settings.sms_settings import send_sms\nfrom clinic.clinic.doctype.clinic_settings.clinic_settings import get_receivable_account,get_income_account\n\n\nclass ClientAppointmentCT(Document):\n\tdef on_update(self):\n\t\ttoday = datetime.date.today()\n\t\tappointment_date = getdate(self.appointment_date)\n\n\n\tdef save(self, *args, **kwargs):\n\t\t# duration is the only changeable field in the document\n\t\tif not self.is_new():\n\t\t\tself.db_set('duration', cint(self.duration))\n\t\telse:\n\t\t\tsuper(ClientAppointmentCT, self).save(*args, **kwargs)\n\n\ndef appointment_cancel(appointment_id):\n\tappointment = frappe.get_doc(\"Client Appointment CT\", appointment_id)\n\n\t# If invoice --> fee_validity update with -1 visit\n\tif appointment.sales_invoice:\n\t\tvalidity = frappe.db.exists({\"doctype\": \"Fee Validity\", \"ref_invoice\": appointment.sales_invoice})\n\t\tif validity:\n\t\t\tfee_validity = frappe.get_doc(\"Fee Validity\", validity[0][0])\n\t\t\tvisited = fee_validity.visited - 1\n\t\t\tfrappe.db.set_value(\"Fee Validity\", fee_validity.name, \"visited\", visited)\n\t\t\tif visited <= 0:\n\t\t\t\tfrappe.msgprint(\n\t\t\t\t\t_(\"Appointment cancelled, Please review and cancel the invoice {0}\".format(appointment.sales_invoice))\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tfrappe.msgprint(_(\"Appointment cancelled\"))\n\n\n@frappe.whitelist()\ndef get_availability_data(date, physician):\n\t\"\"\"\n\tGet availability data of 'physician' on 'date'\n\t:param date: Date to check in schedule\n\t:param physician: Name of the physician\n\t:return: dict containing a list of available slots, list of appointments and time of appointments\n\t\"\"\"\n\n\tdate = getdate(date)\n\tweekday = date.strftime(\"%A\")\n\n\tavailable_slots = []\n\tphysician_schedule_name = None\n\tphysician_schedule = None\n\ttime_per_appointment = None\n\n\t# get physicians schedule\n\tphysician_schedule_name = frappe.db.get_value(\"Doctor\", physician, \"physician_schedule\")\n\tif physician_schedule_name:\n\t\tphysician_schedule = frappe.get_doc(\"Physician Schedule CT\", physician_schedule_name)\n\t\ttime_per_appointment = frappe.db.get_value(\"Doctor\", physician, \"time_per_appointment\")\n\t\t#frappe.msgprint(json.dumps(time_per_appointment))\n\telse:\n\t\tfrappe.throw(_(\"Dr {0} does not have a Physician Schedule. Add it in Physician master\".format(physician)))\n\n\t#custom:inside below block i did change divide block (from_time to to_time and each divide time per appointment)\n\n\tif physician_schedule:\n\t\tfor t in physician_schedule.time_slots:\n\t\t\tif weekday == t.day:\n\t\t\t\tfrom_time=t.from_time\n\t\t\t\tto_time=t.to_time\n\t\t\t\twhile from_time0:\n\t\tconsult=frappe.get_doc(\"Consultation\",consultant_data[0].name)\n\t\tline_item={}\n\t\tline_item[\"item_code\"]=frappe.db.get_value(\"Clinic Settings\",\"Clinic Settings\",\"consultant_item\")\n\t\tline_item[\"qty\"]=1\n\t\tline_item[\"consultation\"]=consultant_data[0].name\n\t\top_consulting_charge = frappe.db.get_value(\"Doctor\",consult.physician, \"op_consulting_charge\")\n\t\tcost_center = frappe.db.get_value(\"Doctor\",consult.physician, \"cost_center\")\n\t\tif op_consulting_charge:\n\t\t\tline_item[\"rate\"] = op_consulting_charge\n\t\tif cost_center:\n\t\t\tline_item[\"cost_center\"]=cost_center\n\t\titems.append(line_item)\n\n\ttreatment_data=frappe.get_all(\"Client Treatment\",filters=[(\"Client Treatment\",\"appointment\",\"=\",appointment),(\"Client Treatment\",\"status\",\"in\",[\"Pending\",\"Completed\"]),(\"Client Treatment\",\"is_bill\",\"!=\",1)],fields=[\"name\"])\n\tif len(treatment_data)>0:\n\t\tfor row in treatment_data:\n\t\t\ttreatment=frappe.get_doc(\"Client Treatment\",row.name)\n\t\t\tline_item={}\n\t\t\tline_item[\"item_code\"]=treatment.treatment\n\t\t\tline_item[\"qty\"]=treatment.qty\n\t\t\tline_item[\"treatment\"]=row.name\n\t\t\tcost_center = frappe.db.get_value(\"Doctor\",treatment.doctor, \"cost_center\")\n\t\t\tif cost_center:\n\t\t\t\tline_item[\"cost_center\"]=cost_center\n\n\t\t\titems.append(line_item)\n\treturn items\n\n\n\n#custom:use to make invoice\n@frappe.whitelist()\ndef create_invoice(company, physician, patient, appointment_id, appointment_date):\n\tif not appointment_id:\n\t\treturn False\n\n\t#item_obj=getItemArray(company,physician,patient,appointment_id,appointment_date)\n\titem_object=getItemForInvoice(appointment_id)\n\tif len(item_object)>0:\n\t\tsales_invoice=frappe.get_doc(dict(\n\t\t\tdoctype=\"Sales Invoice\",\n\t\t\tcustomer=patient,\n\t\t\tdue_date=getdate(),\n\t\t\tappointment=appointment_id,\n\t\t\titems=item_object\n\t\t)).insert()\n\t\treturn sales_invoice.name\n\telse:\n\t\treturn False\t\n\ndef getItems(item_obj):\n\titems=[]\n\tcounter=0\n\tobj=json.loads(item_obj)\n\twhile counter0): \n for i in data:\n sendMsg(i[0],i[1])\n time.sleep(2)\n cur.execute('UPDATE messages SET status = \\'1\\' WHERE status = \\'0\\'')\n conn.commit()\n else:\n sendMsg('Block de notas',timeNoC())\n conn.close()\n t = random.randint(30,180)\n print(t)\n time.sleep(t)", "sub_path": "mensajes/EnviarMensajes.py", "file_name": "EnviarMensajes.py", "file_ext": "py", "file_size_in_byte": 1491, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "time.sleep", "line_number": 14, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 29, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 29, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 32, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 32, "usage_type": "name"}, {"api_name": "pymysql.connect", "line_number": 40, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 53, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "188750715", "text": "import base64\nimport re\n\nfrom lxml import html\nfrom lxml.etree import ParserError\n\n\ndef process(feed, parsed, entry, guid, message):\n feed_name = feed.name\n if message.is_multipart():\n for submessage in message.walk():\n only_filter_html(feed_name, submessage)\n else:\n only_filter_html(feed_name, message)\n return message\n\ndef only_filter_html(feed_name, message):\n if message.get_content_type() == 'text/html':\n do_filter(feed_name, message)\n\ndef do_filter(feed_name, message):\n doc, charset = decode(message)\n \n remove_heading(doc)\n remove_images(doc)\n if feed_name == \"Hamburgize\":\n beautify_hamburgize(doc, charset)\n if feed_name in (\"Golem\", \"Fefe\", \"Heise\"):\n paragraphize(doc)\n \n payload = encode(doc, message['Content-Transfer-Encoding'], charset)\n message.set_payload(payload)\n\ndef decode(message):\n charset = str(message.get_charset())\n content = str(message.get_payload(decode=True), charset)\n doc = html.fromstring(content)\n return doc, charset\n\ndef encode(doc, transfer_encoding, charset):\n filtered_content = html.tostring(doc, encoding=charset)\n if transfer_encoding == 'base64':\n filtered_content = base64.encodebytes(html.tostring(doc, encoding=charset))\n return str(filtered_content, charset)\n\ndef remove_heading(doc):\n for el in doc.xpath('/html/body/div[@id=\"entry\"]/h1[@class=\"header\"]'):\n el.getparent().remove(el)\n return\n else:\n print(\"Warning: Header not found\")\n\ndef remove_images(doc):\n for el in doc.xpath(\"//img\"):\n el.getparent().remove(el)\n\ndef beautify_hamburgize(doc, charset):\n body = doc.xpath('/html/body/div[@id=\"entry\"]/div[@id=\"body\"]')[0]\n body_html = encode(body, None, charset)\n paragraphs = re.findall('
((?!', body_html)\n if paragraphs:\n first_paragraph = paragraphs[0]\n try:\n p = html.fragment_fromstring(\"

{}

\".format(first_paragraph))\n except ParserError as e:\n print(\"Error: Unable to parse fragment: {}\".format(e))\n return\n for el in body.xpath('./*'):\n body.remove(el)\n body.insert(0, p)\n else:\n unparsable = body_html.encode(\"ascii\", errors=\"replace\")\n print(\"Error: Regex doesn't work for: {}\".format(unparsable))\n\ndef paragraphize(doc):\n \"\"\"Encaspulate content in a paragraph.\"\"\"\n entry = doc.xpath('/html/body/div[@id=\"entry\"]')[0]\n body = entry.xpath('./div[@id=\"body\"]')[0]\n del body.attrib[\"id\"]\n p = html.Element(\"p\")\n p.insert(0, body)\n new_body = html.Element(\"div\")\n new_body.set(\"id\", \"body\")\n new_body.insert(0, p)\n entry.insert(0, new_body)", "sub_path": "hooks/hook_filter_content.py", "file_name": "hook_filter_content.py", "file_ext": "py", "file_size_in_byte": 2741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "lxml.html.fromstring", "line_number": 37, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 37, "usage_type": "name"}, {"api_name": "lxml.html.tostring", "line_number": 41, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 41, "usage_type": "name"}, {"api_name": "base64.encodebytes", "line_number": 43, "usage_type": "call"}, {"api_name": "lxml.html.tostring", "line_number": 43, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 43, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 60, "usage_type": "call"}, {"api_name": "lxml.html.fragment_fromstring", "line_number": 64, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 64, "usage_type": "name"}, {"api_name": "lxml.etree.ParserError", "line_number": 65, "usage_type": "name"}, {"api_name": "lxml.html.Element", "line_number": 80, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 80, "usage_type": "name"}, {"api_name": "lxml.html.Element", "line_number": 82, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "513860005", "text": "import os\r\nfrom glob import glob\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport random\r\nfrom tqdm import tqdm\r\nimport torch\r\nimport torchvision.transforms.functional as TF\r\nimport random\r\nimport torchvision.transforms as transforms\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport torch.nn as nn\r\nimport copy\r\nimport torch.nn.functional as F\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.style as style\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nimport time\r\n\r\n\r\nclass Prostate_data(Dataset):\r\n\r\n def __init__(self, img_path='../harvard_data/TMA_Images', mask_path='../harvard_data/Gleason_masks_train',\r\n dataset_type='train', img_size=3100, valid_split=['ZT76'], test_split=['ZT80'], num_classes=5):\r\n self.img_path = img_path\r\n self.mask_path = mask_path\r\n self.img_size = img_size\r\n self.num_classes = num_classes\r\n self.file_names = []\r\n self.dataset_type = dataset_type\r\n slide_dict = {'valid': valid_split, 'test': test_split}\r\n self.flag_dict = {}\r\n for file in glob(self.img_path + '/*.jpg'):\r\n _file_name = file.split('\\\\')[-1]\r\n _slide_type = _file_name.split('.')[0].split('_')[0]\r\n if dataset_type == 'train':\r\n if not (_slide_type in valid_split) and not (_slide_type in test_split):\r\n for fname in self.all_files(_file_name):\r\n self.file_names.append(fname)\r\n self.flag_dict[fname] = False\r\n else:\r\n if _slide_type in slide_dict[dataset_type]:\r\n self.file_names.append(_file_name)\r\n self.flag_dict[_file_name] = False\r\n random.seed(10)\r\n random.shuffle(self.file_names)\r\n self.data = {}\r\n self.transform = {\r\n 'train': transforms.Compose([transforms.ColorJitter(0.2,0.2,0.2,0.2),transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),\r\n 'valid': transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\r\n }\r\n\r\n def __len__(self):\r\n return len(self.file_names)\r\n\r\n def all_files(self,_file_name):\r\n return [_file_name,_file_name+'_tranhor',_file_name+'_tranver']\r\n\r\n def __getitem__(self, idx):\r\n _file_name = self.file_names[idx]\r\n _file_flag = self.flag_dict[_file_name]\r\n if _file_flag:\r\n return self.data[_file_name]\r\n else:\r\n img_path = self.img_path+'/'+_file_name.split('_tran')[0] if '_tran' in _file_name else self.img_path + '/' + _file_name\r\n mask_path = self.mask_path + '/' + 'mask_' + _file_name.split('_tran')[0].split('.')[0] + '.png' if '_tran' in _file_name else self.mask_path + '/' + 'mask_' + _file_name.split('.')[0] + '.png'\r\n\r\n img = Image.open(img_path).resize((self.img_size, self.img_size)).convert('RGB')\r\n mask = Image.open(mask_path).resize((self.img_size, self.img_size)).convert('RGB')\r\n ## transforms\r\n if 'hor' in _file_name:\r\n img = TF.hflip(img)\r\n mask = TF.hflip(mask)\r\n if 'ver' in _file_name:\r\n img = TF.vflip(img)\r\n mask = TF.vflip(mask)\r\n if 'aff' in _file_name:\r\n img = TF.affine(img,20,(0,0),1.35,0)\r\n mask = TF.affine(mask,20,(0,0),1.35,0)\r\n\r\n mask_array = np.asarray(mask)\r\n oneh_mask = np.zeros((self.num_classes, self.img_size, self.img_size))\r\n for x in range(self.img_size):\r\n for y in range(self.img_size):\r\n pixel_class = self.get_class(mask_array[x, y,:])\r\n oneh_mask[pixel_class, x, y] = 1\r\n\r\n array_img = np.asarray(img)\r\n timg = copy.deepcopy(array_img)\r\n for x in range(self.img_size):\r\n for y in range(self.img_size):\r\n rgb_n = array_img[x, y, :] / 255.0\r\n if rgb_n[0] > 0.8 and rgb_n[1] > 0.8 and rgb_n[2] > 0.8:\r\n timg[x, y, :] = [0, 0, 0]\r\n final_img = Image.fromarray(timg.astype('uint8'), 'RGB')\r\n\r\n img_tensor = self.transform[self.dataset_type](final_img)\r\n mask_tensor = torch.from_numpy(oneh_mask).view(self.num_classes, self.img_size, self.img_size)\r\n self.data[_file_name] = (img_tensor, mask_tensor)\r\n self.flag_dict[_file_name] = True\r\n return self.data[_file_name]\r\n\r\n def get_class(self, rgb):\r\n '''\r\n takes in rgb values of the pixel and returns the class of the pixel\r\n '''\r\n rgb_n = rgb / 255.0\r\n\r\n # white\r\n if rgb_n[0] > 0.8 and rgb_n[1] > 0.8 and rgb_n[2] > 0.8:\r\n return 4\r\n # red\r\n elif rgb_n[0] > 0.8 and rgb_n[1] < 0.8 and rgb_n[2] < 0.8:\r\n return 3\r\n # yellow\r\n elif rgb_n[0] > 0.8 and rgb_n[1] > 0.8 and rgb_n[2] < 0.8:\r\n return 2\r\n # green\r\n elif rgb_n[0] < 0.8 and rgb_n[1] > 0.8 and rgb_n[2] < 0.8:\r\n return 0\r\n # blue\r\n elif rgb_n[0] < 0.8 and rgb_n[1] < 0.8 and rgb_n[2] > 0.8:\r\n return 1\r\n else:\r\n print(rgb_n)\r\n raise ValueError('Weird rgb combination! Did not match any of 5 classes.')\r\n\r\ndef soft_dice_loss(y_pred,y_true):\r\n '''y_pred: (-1,5,512,512) :predictions\r\n y_true: (512,512,5) : targets\r\n compute the soft dice loss\r\n\r\n '''\r\n y_true = y_true.view(-1,5,256,256)\r\n epsilon = 1e-7\r\n dice_numerator = epsilon + 2 * torch.sum(y_true*y_pred,axis=(2,3))\r\n dice_denominator = epsilon + torch.sum(y_true*y_true,axis=(2,3)) + torch.sum(y_pred*y_pred,axis=(2,3))\r\n dice_loss = 1 - torch.mean(dice_numerator/dice_denominator)\r\n\r\n return dice_loss\r\n\r\n\r\ndef show_train_predictions(model,trainset,device,idx_list):\r\n fig,axes = plt.subplots(nrows=len(idx_list),ncols=3,figsize=(15,15))\r\n for i in range(len(idx_list)):\r\n idx = idx_list[i]\r\n input_img = Image.fromarray(np.asarray(trainset[idx][0].view(trainset.img_size,trainset.img_size,3).squeeze()).astype('uint8'), 'RGB')\r\n target_img = get_rgb(trainset[idx][1].squeeze())\r\n with torch.no_grad():\r\n pred_img = get_rgb(model(trainset[idx][0].view(-1,3,trainset.img_size,trainset.img_size).float().to(device)).squeeze())\r\n if len(idx_list)>1:\r\n axes[i,0].imshow(input_img)\r\n axes[i,1].imshow(pred_img)\r\n axes[i,2].imshow(target_img)\r\n else:\r\n axes[0].imshow(input_img)\r\n axes[1].imshow(pred_img)\r\n axes[2].imshow(target_img)\r\n if len(idx_list)>1:\r\n axes[0,0].set_title('T Input')\r\n axes[0,1].set_title('T Prediction')\r\n axes[0,2].set_title('T Target')\r\n else:\r\n axes[0].set_title('T Input')\r\n axes[1].set_title('T Prediction')\r\n axes[2].set_title('T Target')\r\n\r\n return fig\r\n\r\ndef show_valid_predictions(model,validset,device,idx_list):\r\n fig,axes = plt.subplots(nrows=len(idx_list),ncols=3,figsize=(15,15))\r\n for i in range(len(idx_list)):\r\n idx = idx_list[i]\r\n input_img = Image.fromarray(np.asarray(validset[idx][0].view(validset.img_size,validset.img_size,3).squeeze()).astype('uint8'), 'RGB')\r\n target_img = get_rgb(validset[idx][1].squeeze())\r\n with torch.no_grad():\r\n pred_img = get_rgb(model(validset[idx][0].view(-1,3,validset.img_size,validset.img_size).float().to(device)).squeeze())\r\n if len(idx_list)>1:\r\n axes[i,0].imshow(input_img)\r\n axes[i,1].imshow(pred_img)\r\n axes[i,2].imshow(target_img)\r\n else:\r\n axes[0].imshow(input_img)\r\n axes[1].imshow(pred_img)\r\n axes[2].imshow(target_img)\r\n if len(idx_list)>1:\r\n axes[0,0].set_title('V Input')\r\n axes[0,1].set_title('V Prediction')\r\n axes[0,2].set_title('V Target')\r\n else:\r\n axes[0].set_title('V Input')\r\n axes[1].set_title('V Prediction')\r\n axes[2].set_title('V Target')\r\n\r\n return fig\r\n\r\ndef get_rgb(tensor_img):\r\n pallete_dict = {\r\n 0 : [0,255,0],\r\n 1 : [0,0,255],\r\n 2 : [255,255,255],\r\n 3 : [255,0,0],\r\n 4 : [255,255,0]\r\n }\r\n img_h = tensor_img.size()[2]\r\n out_img = np.zeros((img_h,img_h,3))\r\n for h in range(img_h):\r\n for w in range(img_h):\r\n pixel_class = torch.argmax(tensor_img[:,h,w]).item()\r\n out_img[h,w,:] = pallete_dict[pixel_class]\r\n final_img = Image.fromarray(out_img.astype('uint8'), 'RGB')\r\n return final_img\r\n\r\nclass Focalloss(nn.Module):\r\n\r\n def __init__(self,gamma=0):\r\n super(Focalloss,self).__init__()\r\n self.gamma = gamma\r\n\r\n def forward(self,outputs,targets_oneh,targets):\r\n soft_outs = F.softmax(outputs,dim=1)\r\n log_soft = F.log_softmax(outputs,dim=1)\r\n weight_loss = torch.pow((1 - soft_outs),self.gamma) * log_soft\r\n loss = 0.4*F.nll_loss(weight_loss,targets) + 0.6*soft_dice_loss(outputs,targets_oneh)\r\n return loss\r\n\r\ndef main():\r\n from learner import Learner\r\n from res_unet_dropout import ResUnet\r\n # lr=3e-5\r\n dprob=0.2\r\n epochs = 8\r\n\r\n trainset = Prostate_data(img_size=256, num_classes=3)\r\n validset = Prostate_data(dataset_type='valid', img_size=256, num_classes=3)\r\n datasets = {'train': trainset, 'valid': validset}\r\n\r\n for lr in [1e-4,5e-4,1e-3,5e-3,1e-2]:\r\n for gamma in [0] :\r\n # fig,axes = plt.subplots(nrows=1,ncols=6,figsize=(24,4))\r\n # imgs = []\r\n # for i in tqdm(range(6)):\r\n # imgs.append(get_rgb(trainset[i][1]))\r\n # for j in range(6):\r\n # axes[j].imshow(imgs[j])\r\n # plt.show()\r\n\r\n model = ResUnet(num_classes=5,dprob=dprob)\r\n criterion = Focalloss(gamma=gamma)\r\n optimizer = torch.optim.SGD(model.parameters(),lr=lr,momentum=0.9)\r\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,patience=10)\r\n dtime = '0057_1806'\r\n tb_logs = {'path':'logdirs/onevall_trials_aug/respre_SGD_plateau','comment':f'lr={lr}_gamma={gamma}_dprob={dprob}_{dtime}'}\r\n trainer = Learner(datasets,model,criterion,optimizer,scheduler,bs=8,num_workers=4)\r\n try :\r\n trainer.fit(tb_logs=tb_logs,epochs=epochs)\r\n # torch.save(trainer.model,f'logdirs/onevall_trials_aug/respre_SGD_plateau/lr={lr}_gamma={gamma}_dprob={dprob}_{dtime}/{dtime}')\r\n except KeyboardInterrupt:\r\n pass\r\n # torch.save(trainer.model,f'logdirs/onevall_trials_aug/respre_SGD_plateau/lr={lr}_gamma={gamma}_dprob={dprob}_{dtime}/{dtime}')\r\n\r\n\r\nif __name__=='__main__':\r\n main()\r\n", "sub_path": "focal_loss.py", "file_name": "focal_loss.py", "file_ext": "py", "file_size_in_byte": 11017, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 21, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 33, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 45, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 46, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 49, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 50, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 50, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 51, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 51, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 52, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 52, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 70, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 71, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 71, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.hflip", "line_number": 74, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 74, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.hflip", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 75, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.vflip", "line_number": 77, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 77, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.vflip", "line_number": 78, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 78, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.affine", "line_number": 80, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 80, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.affine", "line_number": 81, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 90, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 91, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 97, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 149, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 149, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 176, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 176, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 211, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 213, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 213, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 216, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 216, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 223, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 224, "usage_type": "name"}, {"api_name": "torch.pow", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 226, "usage_type": "name"}, {"api_name": "res_unet_dropout.ResUnet", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 252, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 252, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.ReduceLROnPlateau", "line_number": 253, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 253, "usage_type": "attribute"}, {"api_name": "learner.Learner", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "634302455", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 3 10:21:19 2018\r\n\r\n@author: Srinivas\r\n\"\"\"\r\n\r\n# Word Negation Tracking\r\nimport nltk\r\n\r\nsentence = \"I was not happy with the team's performance\"\r\nwords = nltk.word_tokenize(sentence)\r\nnew_words = []\r\ntemp_word = \"\"\r\n\r\nfor word in words:\r\n if word == 'not':\r\n temp_word = \"not_\"\r\n elif temp_word == \"not_\":\r\n word = temp_word + word # It will be not_happy\r\n temp_word = \"\"\r\n if word != \"not\":\r\n new_words.append(word)\r\nsentence = ' '.join(new_words) \r\n\r\n ", "sub_path": "Word_Negation1.py", "file_name": "Word_Negation1.py", "file_ext": "py", "file_size_in_byte": 553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "nltk.word_tokenize", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "404925934", "text": "#!/usr/bin/env python\n'''\nCopyright (c) 2015 Jared E. Stroud\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n'''\n\ntry:\n import socket\n import os\n import sys\n import argparse\n import time\nexcept ImportError as err:\n print(\"[Error] I don't have \" + str(err))\n\nclass sendData():\n \n '''\n Name: __init__\n Parameters: self\n Purpose: Initializes socket for the rest of the methods.\n '''\n def __init__(self):\n self.sock = socket.socket()\n\n '''\n Name: sendFile\n Parameters: fileName\n '''\n def sendFile(self, fileName, address, port):\n self.sock.connect((str(address), int(port)))\n data = open(fileName, \"rb\")\n\n while True:\n chunk = file.read(data)\n if not chunk:\n break # Entire file has been read in.\n self.sock.sendall(chunk)\n\n '''\n Name: copyFile\n Parameters: srcFile , destination\n srcFile: File desired to be transfered.\n destination: End point for file being transfered.\n Return: Nothing.\n '''\n def copyFile(self, srcFile, destination):\n try:\n os.path.join(str(fileName), str(destination))\n except Error:\n print(\"[Error] I could not copy the file\")\n sys.exit()\n\n\n\nif __name__ == \"__main__\":\n\n print(\"Starting Data send\")\n sendObj = sendData()\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--fileName\", required=True, type=str, help=\"Specify file to be sent.\")\n parser.add_argument(\"--address\", required=True, type=str, help=\"Specify the address to send the file to.\")\n parser.add_argument(\"--port\", required=True, type=int, help=\"Specify the port of th destination server.\")\n args = parser.parse_args()\n\n if args.fileName and args.address and args.port: # Ensuring all arguments are fulfilled.\n address = args.address# User specified file size.\n fileName = args.fileName # User specified file name.\n port = args.port # User specified port.\n\n sendObj.sendFile(fileName, address, port)\n", "sub_path": "lib/pipeData.py", "file_name": "pipeData.py", "file_ext": "py", "file_size_in_byte": 3073, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "socket.socket", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 69, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "96372097", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nnum = 100\n\nx = np.linspace(0, 100,num)\n\nx0 = 1.0\nx1 = 2.0\nx2 = 3.5\nx3 = -0.00\nscalar = 1000.0\n\n\ny = x0*pow(x,0.0) + x1*pow(x,1.0) + x2*pow(x,2.0) + x3*pow(x,3.0)\nz = y + scalar*np.random.randn(num)\nplt.plot(x,y)\nplt.plot(x,z,'ro')\n\nplt.show()\n\n\n\n", "sub_path": "regressionGui.py", "file_name": "regressionGui.py", "file_ext": "py", "file_size_in_byte": 298, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.linspace", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "158584806", "text": "\r\nimport os\r\nimport sys\r\nimport json\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torchsummary import summary\r\nwith open(os.path.join(sys.path[0], 'config.json')) as json_file:\r\n\tconfig = json.load(json_file)\r\n\r\nclass AudioNet(nn.Module):\r\n\r\n\tdef __init__(self):\r\n\t\tsuper(AudioNet, self).__init__() # (128, 1292)\r\n\t\tself.num_output = 5\r\n\r\n\t\tif(config[\"SAL0\"] == \"Wi\"):\r\n\t\t\tself.W_i = nn.Parameter(torch.randn(40,1)) # with channel num 16\r\n\t\t\tself.bi = nn.Parameter(torch.randn(1))\r\n\t\t\tself.SmSA0 = nn.Softmax(dim = 1)\r\n\t\tself.conv0 = nn.Sequential(nn.Conv1d(128, 32, kernel_size=8), nn.MaxPool1d(4, stride=4), nn.ReLU(), nn.BatchNorm1d(32),)\r\n\t\tif(config[\"CAL1\"] == \"FC\"):\r\n\t\t\tself.CA1_avg_pool = nn.AdaptiveAvgPool1d(1)\r\n\t\t\tself.CA1 = nn.Sequential(nn.Linear(in_features=32, out_features=32), nn.Softmax(dim=1),)\r\n\t\tif(config[\"CAL1\"] == \"Conv\"):\r\n\t\t\tself.CA1_avg_pool = nn.AdaptiveAvgPool1d(1)\r\n\t\t\tself.conv_du1 = nn.Sequential(nn.Conv1d(32, 32//2, 1, bias=True), nn.ReLU(inplace=True), nn.Conv1d(32//2, 32, 1, bias=True), nn.Sigmoid())\r\n\t\tself.conv1 = nn.Sequential(nn.Conv1d(32, 16, kernel_size=8), nn.MaxPool1d(4, stride=4), nn.ReLU(), nn.BatchNorm1d(16),)\r\n\t\tif(config[\"CAL2\"] == \"FC\"):\r\n\t\t\tself.CA2_avg_pool = nn.AdaptiveAvgPool1d(1)\r\n\t\t\tself.CA2 = nn.Sequential(nn.Linear(in_features=16, out_features=16), nn.Softmax(dim=1),)\r\n\t\tif(config[\"CAL2\"] == \"Conv\"):\r\n\t\t\tself.CA2_avg_pool = nn.AdaptiveAvgPool1d(1)\r\n\t\t\tself.conv_du2 = nn.Sequential(nn.Conv1d(16, 16//2, 1, bias=True), nn.ReLU(inplace=True), nn.Conv1d(16//2, 16, 1, bias=True),nn.Sigmoid())\r\n\t\tself.fc0 = nn.Sequential(nn.Linear(in_features=1248, out_features=64), nn.Tanh(), nn.Dropout(), nn.Linear(in_features=64, out_features=self.num_output),)\r\n\t\tself.logsoftmax = nn.LogSoftmax(dim=1)\r\n\t\tself.apply(self._init_weights)\r\n\r\n\tdef forward(self, x):\r\n\t\tif(config[\"SAL0\"] == \"Wi\"):\r\n\t\t\tz0 = x.permute(0, 2, 1) #(N, L, C)\r\n\t\t\talpha0 = (self.SmSA0((torch.matmul(z0, self.W_i) + self.bi).squeeze(-1))).unsqueeze(-1) #(N, L, 1)-->(N, L)\r\n\t\t\tx = (z0*alpha0).permute(0, 2, 1)\r\n\t\tx = self.conv0(x) #(N, 32, 321)\r\n\t\tif(config[\"CAL1\"] == \"FC\"):\r\n\t\t\tbeta1 = self.CA1_avg_pool(x).squeeze(-1) #(N, 32, 1) --> (N, 32)\r\n\t\t\tbeta1 = self.CA1(beta1).unsqueeze(-1)\r\n\t\t\tx = x*beta1\r\n\t\tif(config[\"CAL1\"] == \"Conv\"):\r\n\t\t\tbeta1 = self.CA1_avg_pool(x) #(N, 32, 1) --> (N, 32)\r\n\t\t\tbeta1 = self.conv_du1(beta1)\r\n\t\t\tx = x*beta1\r\n\t\tx = self.conv1(x) #(N, 16, 78)\r\n\t\tif(config[\"CAL2\"] == \"FC\"):\r\n\t\t\tbeta2 = self.CA2_avg_pool(x).squeeze(-1) #(N, 16, 1) --> (N, 16)\r\n\t\t\tbeta2 = self.CA2(beta2).unsqueeze(-1) #(N, 16, 1)\r\n\t\tx = x*beta2 #(N, 16, 78)\r\n\t\tif(config[\"CAL2\"] == \"Conv\"):\r\n\t\t\tbeta2 = self.CA2_avg_pool(x) #(N, 16, 1) \r\n\t\t\tbeta2 = self.conv_du2(beta2)\r\n\t\t\tx = x*beta2\r\n\t\tflatten = x.view(x.size(0), -1)\r\n\t\t# print(flatten.shape)\r\n\t\tout = self.logsoftmax(self.fc0(flatten))\r\n\t\t#x = F.log_softmax(x, dim=1) # output (N, 5)\r\n\t\treturn out\r\n\r\n\tdef _init_weights(self, layer) -> None:\r\n\t\tif(isinstance(layer, nn.Conv1d)):\r\n\t\t\tnn.init.kaiming_uniform_(layer.weight)\r\n\t\telif(isinstance(layer, nn.Linear)):\r\n\t\t\tnn.init.xavier_uniform_(layer.weight)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tnet = AudioNet()\r\n\tprint(net)\r\n\tnet = net.cuda()\r\n\tsummary(net, (128, 1292))", "sub_path": "project_CNNFC_Atten_Cls/models/CNNFC.py", "file_name": "CNNFC.py", "file_ext": "py", "file_size_in_byte": 3456, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool1d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.AdaptiveAvgPool1d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.AdaptiveAvgPool1d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.Sigmoid", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool1d", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.AdaptiveAvgPool1d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.AdaptiveAvgPool1d", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.Sigmoid", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.Tanh", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn.Conv1d", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_uniform_", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torchsummary.summary", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "118373364", "text": "import os\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\n\nsetup(\n name='django-active-menu',\n version='0.1',\n packages=['active_menu'],\n description='Simple, fast and easy django template tags to get active url in your html menu.',\n long_description=README,\n author='Slawomir Kabik',\n author_email='slawek@redsoftware.pl',\n url='https://github.com/yourname/django-myapp/',\n license='MIT',\n install_requires=[\n 'Django>=1.6',\n 'BeautifulSoup4==4.4.1'\n ]\n)\n\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 591, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.abspath", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "2944149", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('trueguide', '0003_place'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Photos',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('url', models.CharField(max_length=500)),\n ],\n ),\n migrations.AddField(\n model_name='place',\n name='photoid',\n field=models.ForeignKey(default=0, to='trueguide.Photos'),\n ),\n ]\n", "sub_path": "Server/tg/trueguide/migrations/0004_auto_20150517_1209.py", "file_name": "0004_auto_20150517_1209.py", "file_ext": "py", "file_size_in_byte": 689, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "371879336", "text": "from django.conf.urls import url, include\nfrom . import views\n \nurlpatterns = [\n url(r'^$', views.index ), #LOG/REG\n url(r'^login$', views.login), #LOGIN\n url(r'^create$', views.create ), #this is the REG\n url(r'^ideas$', views.ideas ), #ideas\n url(r'^view/(?P\\d+)$', views.new), #View \n url(r'^new$', views.new ), #Add TEMPLATE\n url(r'^add$', views.add), #add job process\n url(r'^(?P\\d+)/delete$', views.delete), #Cancel\n url(r'edit/(?P\\d+)$', views.edit ), #EDIT \n url(r'^(?P\\d+)/update$', views.update ),\n url(r'^logout_user$', views.logout_user),\n]", "sub_path": "apps/belt_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 623, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "63382637", "text": "import numpy as np\nfrom PIL import Image\n\n\ndef hex_to_rgb(hex_str):\n hex_str = hex_str.strip()\n\n if hex_str[0] == '#':\n hex_str = hex_str[1:]\n\n if len(hex_str) != 6:\n raise ValueError('Input #{} is not in #RRGGBB format.'.format(hex_str))\n\n r, g, b = hex_str[:2], hex_str[2:4], hex_str[4:]\n rgb = [int(n, base=16) for n in [r, g, b]]\n return np.array(rgb)\n\n\ndef binary_mask(crop_mask, palette):\n bin_mask = []\n for x in crop_mask:\n temp = []\n for y in x:\n crop = 0\n for i, ch in enumerate(y):\n if ch >= y[crop]:\n crop = i\n temp.append(hex_to_rgb(palette[crop]))\n bin_mask.append(temp)\n return np.array(bin_mask, dtype=np.uint8)\n\n\ndef read_png(file):\n image = Image.open(file)\n return np.array(image)\n", "sub_path": "tools/visualize.py", "file_name": "visualize.py", "file_ext": "py", "file_size_in_byte": 835, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 30, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "284066512", "text": "import pyglet, random\r\nfrom pyglet.gl import *\r\n\r\nclass pumpkin():\r\n\tdef __init__(self, height, width):\r\n\t\tself.x = random.randint(0, width-16)\r\n\t\tself.y = height\r\n\r\n\t\tself.pumpkin = pyglet.resource.image('pumpkin.png')\r\n\t\tself.pumpkin.width = 16 \r\n\t\tself.pumpkin.height = 16\r\n\r\n\tdef on_draw(self):\r\n\t\tself.pumpkin.blit(self.x, self.y)\r\n\r\nclass window(pyglet.window.Window):\r\n\tdef __init__(self, *args, **kwargs):\r\n\t\tsuper(window, self).__init__(*args, **kwargs)\r\n\r\n\t\tself.pumpkins = []\r\n\t\tfor i in range(30):\r\n\t\t\tself.pumpkins.append(pumpkin(self.height, self.width))\r\n\r\n\t\tself.bg = pyglet.resource.image('bg.jpg')\r\n\r\n\tdef update_y_offset(self, dt):\r\n\t\tfor i in range(len(self.pumpkins)):\r\n\t\t\tself.pumpkins[i].y -= (random.uniform(0.0, 10.0) / 10)\r\n\r\n\tdef on_draw(self):\r\n\t\tself.clear()\r\n\t\t\r\n\t\tglClear(GL_COLOR_BUFFER_BIT)\r\n\t\tglLoadIdentity()\r\n\t\tglEnable(GL_BLEND)\r\n\t\tglBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\r\n\r\n\t\tself.bg.blit(0, 0)\r\n\r\n\t\tfor i in range(len(self.pumpkins)):\r\n\t\t\tself.pumpkins[i].on_draw()\r\n\t\t\tif self.pumpkins[i].y < 0:\r\n\t\t\t\tprint('Pumpkin deleted')\r\n\t\t\t\tdel self.pumpkins[i]\r\n\t\t\t\tself.pumpkins.append(pumpkin(self.height, self.width))\r\n\t\tfor j in range(4):\r\n\t\t\tpyglet.clock.schedule_once(self.update_y_offset, j)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\twindow = window(width=640, height=480, caption='Pumpkin fall')\r\n\tpyglet.app.run()\r\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1510, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "random.randint", "line_number": 6, "usage_type": "call"}, {"api_name": "pyglet.resource.image", "line_number": 9, "usage_type": "call"}, {"api_name": "pyglet.resource", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pyglet.resource.image", "line_number": 24, "usage_type": "call"}, {"api_name": "pyglet.resource", "line_number": 24, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 28, "usage_type": "call"}, {"api_name": "pyglet.clock.schedule_once", "line_number": 47, "usage_type": "call"}, {"api_name": "pyglet.clock", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pyglet.app.run", "line_number": 52, "usage_type": "call"}, {"api_name": "pyglet.app", "line_number": 52, "usage_type": "attribute"}]} +{"seq_id": "522724405", "text": "from .models.zipline_app.fill import Fill\nfrom .models.zipline_app.order import Order\nfrom .models.zipline_app.asset import Asset\nfrom .models.zipline_app.placement import Placement\n\nfrom .widgets import AssetModelSelect2Widget, AccountModelSelect2Widget, ReadOnlyWidgetSimple, ReadOnlyWidgetAsset, ReadOnlyWidgetOrder, CustodianModelSelect2Widget, FillUnitWidget\nfrom django import forms\n\n# override widget in createview\n# http://stackoverflow.com/a/21407374/4126114\n# Override a Django generic class-based view widget\n# http://stackoverflow.com/a/27322032/4126114\nclass FillForm(forms.ModelForm):\n source=forms.CharField(required=False, widget = forms.HiddenInput())\n field_order = [\n 'pub_date', 'dedicated_to_order', 'fill_side', 'asset', 'fill_qty_unsigned', 'fill_unit',\n 'fill_price', 'category', 'is_internal', 'trade_date', 'settlement_date',\n 'custodian', 'fill_text',\n 'commission'\n ]\n class Meta:\n model=Fill\n exclude = [\"user\"]\n widgets = {\n 'pub_date': ReadOnlyWidgetSimple(),\n 'dedicated_to_order': ReadOnlyWidgetOrder(),\n 'custodian': CustodianModelSelect2Widget(),\n 'asset': ReadOnlyWidgetAsset(),\n 'fill_side': forms.HiddenInput(),\n 'fill_unit': FillUnitWidget(),\n }\n def clean_pub_date(self): return self.initial['pub_date'] #.strftime(\"%Y-%m-%d %H:%i:%s\")\n def clean_dedicated_to_order(self): return self.initial['dedicated_to_order']\n def clean_asset(self):\n aid = self.initial['asset']\n if not isinstance(aid, int): return aid\n return Asset.objects.get(id=aid)\n def clean_fill_side(self): return self.initial['fill_side']\n def clean_source(self): return self.initial['source'] if 'source' in self.initial else None\n def clean_fill_unit(self): return self.initial['fill_unit']\n\n def __init__(self, *args, **kwargs):\n super(FillForm, self).__init__(*args, **kwargs)\n self.fields['fill_unit'].widget.form_instance = self\n\nclass OrderForm(forms.ModelForm):\n source=forms.CharField(required=False, widget = forms.HiddenInput())\n field_order = [\n 'id',\n 'pub_date',\n 'user',\n 'order_side',\n 'account',\n 'asset',\n 'order_unit',\n 'order_qty_unsigned',\n\n # fields for tables.py\n 'asset_currency',\n 'order_amount',\n 'order_qty',\n\n 'am_type',\n 'order_type',\n 'limit_price',\n 'order_validity',\n 'validity_date',\n 'order_text',\n 'commission'\n ]\n\n class Meta:\n model=Order\n exclude=['user', 'order_bulk']\n widgets = {\n 'pub_date': ReadOnlyWidgetSimple(),\n 'asset': AssetModelSelect2Widget(),\n 'account': AccountModelSelect2Widget(),\n }\n def clean_pub_date(self): return self.initial['pub_date'] #.strftime(\"%Y-%m-%d %H:%i:%s\")\n def clean_source(self): return self.initial['source'] if 'source' in self.initial else None\n\n\nclass PlacementForm(forms.ModelForm):\n class Meta:\n model=Placement\n exclude = [\"date\", \"user\"]\n\n\nclass OrderDocumentForm(forms.Form):\n docfile = forms.FileField(\n widget=forms.ClearableFileInput(attrs={'multiple': True}),\n label='Select file(s)'\n )\n", "sub_path": "zipline_app/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3065, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.forms.ModelForm", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 14, "usage_type": "call"}, {"api_name": "models.zipline_app.fill.Fill", "line_number": 22, "usage_type": "name"}, {"api_name": "widgets.ReadOnlyWidgetSimple", "line_number": 25, "usage_type": "call"}, {"api_name": "widgets.ReadOnlyWidgetOrder", "line_number": 26, "usage_type": "call"}, {"api_name": "widgets.CustodianModelSelect2Widget", "line_number": 27, "usage_type": "call"}, {"api_name": "widgets.ReadOnlyWidgetAsset", "line_number": 28, "usage_type": "call"}, {"api_name": "django.forms.HiddenInput", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}, {"api_name": "widgets.FillUnitWidget", "line_number": 30, "usage_type": "call"}, {"api_name": "models.zipline_app.asset.Asset.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "models.zipline_app.asset.Asset.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.zipline_app.asset.Asset", "line_number": 37, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 46, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 47, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 47, "usage_type": "call"}, {"api_name": "models.zipline_app.order.Order", "line_number": 73, "usage_type": "name"}, {"api_name": "widgets.ReadOnlyWidgetSimple", "line_number": 76, "usage_type": "call"}, {"api_name": "widgets.AssetModelSelect2Widget", "line_number": 77, "usage_type": "call"}, {"api_name": "widgets.AccountModelSelect2Widget", "line_number": 78, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 84, "usage_type": "name"}, {"api_name": "models.zipline_app.placement.Placement", "line_number": 86, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 90, "usage_type": "name"}, {"api_name": "django.forms.FileField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 91, "usage_type": "name"}, {"api_name": "django.forms.ClearableFileInput", "line_number": 92, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "30861989", "text": "# -*- coding: utf-8 -*-\r\nimport librosa\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport librosa.display\r\nimport torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport numpy as np\r\n\r\n\r\nimport os\r\n\r\ndef Get_align_beat_pitch_spectrogram(align_root_path, pitch_beat_root_path, wav_root_path):\r\n \r\n filename_list = os.listdir(align_root_path) #列出文件夹下所有的目录与文件\r\n path_list = []\r\n phone_list, beat_list, pitch_list, spectrogram_list = [],[],[],[]\r\n \r\n for i in range(0,len(filename_list)):\r\n if filename_list[i][-1] != 'm' and filename_list[i][-1] != 'e':\r\n path = os.path.join(align_root_path, filename_list[i])\r\n path_list.append(path)\r\n \r\n# print(filename_list[i][1:4], filename_list[i][4:])\r\n \r\n with open(path, 'r') as f:\r\n phone = f.read().strip().split(\" \")\r\n phone_list.append(phone)\r\n f.close()\r\n beat_path = os.path.join(pitch_beat_root_path, filename_list[i][1:4], filename_list[i][4:]+\"_beats.txt\")\r\n with open(beat_path, 'r') as f:\r\n beat_list.append(f.read().strip().split(\" \"))\r\n pitch_path = os.path.join(pitch_beat_root_path, filename_list[i][1:4], filename_list[i][4:]+\"_pitches.txt\")\r\n with open(pitch_path, 'r') as f:\r\n pitch_list.append(f.read().strip().split(\" \"))\r\n \r\n wav_path = os.path.join(wav_root_path, filename_list[i][1:4], filename_list[i][4:]+\".wav\")\r\n frame_length = 60/1000\r\n frame_shift = 30/1000 \r\n y, sr = librosa.load(wav_path,sr = None)\r\n hop_length = int(sr * frame_shift)\r\n n_fft = int(sr * frame_length)\r\n spectrogram_list.append(librosa.feature.melspectrogram(y=y, sr=sr,hop_length=hop_length, n_fft = n_fft))\r\n \r\n return phone_list, beat_list, pitch_list, spectrogram_list\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n align_root_path = \"C:/Users/PKU/Desktop/SVS_system/preprocessing/ch_asr/exp/alignment/clean_set/\" #文件夹目录\r\n pitch_beat_root_path = \"C:/Users/PKU/Desktop/SVS_system/preprocessing/ch_asr/exp/pitch_beat_extraction/clean/\"\r\n wav_root_path = 'C:/Users/PKU/Desktop/SVS_system/annotation/clean/'\r\n \r\n phone_list, beat_list, pitch_list, spectrogram_list = Get_align_beat_pitch_spectrogram(align_root_path, pitch_beat_root_path, wav_root_path)\r\n \r\n length = []\r\n for i in range(len(phone_list)):\r\n length.append(len(phone_list[i]))\r\n \r\n sample_num = len(phone_list)\r\n seq_length = max(length)\r\n \r\n \r\n Data = np.zeros((sample_num,seq_length,3))\r\n Label = np.zeros((sample_num,seq_length,128))\r\n \r\n for i in range(sample_num):\r\n for j in range(seq_length):\r\n if j < len(phone_list[i]):\r\n Data[i][j][0] = np.array(phone_list[i][j])\r\n if str(j) in beat_list[i]:\r\n Data[i][j][1] = 1\r\n if j < len(phone_list[i]): # 在这里写phone_list是因为每一个样本,pitch都比phone多一帧(原则:所有以phone为准)\r\n Data[i][j][2] = np.array(pitch_list[i][j])\r\n Label[i][j] = spectrogram_list[i][:,j]\r\n \r\n \r\n #创建子类\r\n class MyDataset(Dataset):\r\n #初始化,定义数据内容和标签\r\n def __init__(self, Data, Label):\r\n self.Data = Data\r\n self.Label = Label\r\n #返回数据集大小\r\n def __len__(self):\r\n return len(self.Data)\r\n #得到数据内容和标签\r\n def __getitem__(self, index):\r\n data = torch.Tensor(self.Data[index])\r\n label = torch.IntTensor(self.Label[index])\r\n return data, label\r\n \r\n dataset = MyDataset(Data, Label)\r\n # print(dataset)\r\n # print('dataset大小为:', dataset.__len__())\r\n # print(dataset.__getitem__(0))\r\n # print(dataset[0])\r\n#\r\n##创建DataLoader迭代器\r\n dataloader = DataLoader(dataset,batch_size= 2, shuffle = False, num_workers= 0)\r\n for i, item in enumerate(dataloader):\r\n print('i:', i)\r\n data, label = item\r\n print('data:', data)\r\n print('label:', label)\r\n \r\n \r\n \r\n", "sub_path": "model/archive/build_dataloader.py", "file_name": "build_dataloader.py", "file_ext": "py", "file_size_in_byte": 4285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.listdir", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "librosa.load", "line_number": 40, "usage_type": "call"}, {"api_name": "librosa.feature.melspectrogram", "line_number": 43, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.IntTensor", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "549897859", "text": "import astropy.units as u\nimport numpy as np\n\nimport grasshopper.interferometers as ifo\nimport grasshopper.sources as sources\n\nimport matplotlib.pyplot as plt\nplt.style.use(\"../thesis-style.mpl\")\n\naligo = ifo.AdvancedLIGO()\naligo_o1 = ifo.AdvancedLIGO(configuration=\"O1\")\nfigsize = (5.0, 2.5) # Fix this to use the Golden ratio please\n\nfig, ax = plt.subplots(1,1, figsize=figsize)\naligo.plot(ax)\naligo_o1.plot(ax)\nax.set_xlim([1e1, 2e3]);\nax.set_ylim([1e-23, 1e-19]);\nfig.tight_layout()\nfig.savefig(\"../figures/aligo-asd.pdf\", dpi=300)\n\n\n\n#########################\n\n\n# aligo = ifo.AdvancedLIGO()\n\n\n# figsize = (5.0, 2.5) # Fix this to use the Golden ratio please\n\n# fig, ax = plt.subplots(1,1, figsize=figsize)\n# aligo.plot(ax)\n\n# for mass in [30, 32, 50, 100]:\n# for mass2 in [30, 100, 2000]:\n# cbc = sources.CBC(frequencies=np.logspace(-4, 5, 1000) * u.hertz,\n# m1=mass*u.solMass, m2=mass2*u.solMass, r=0.8*1e9*u.parsec)\n# cbc.plot(ax, label=\"{}, {}\".format(mass, mass2))\n\n# ax.set_xlim([1e1, 2e3]);\n# ax.set_ylim([1e-23, 1e-19]);\n# fig.tight_layout()\n# fig.savefig(\"../figures/aligo-cbc.pdf\", dpi=300)\n\n\n###########################\n\n\nelisa = ifo.EvolvedLISA()\nfigsize = (5.0, 2.5) # Fix this to use the Golden ratio please\n\nfig, ax = plt.subplots(1,1, figsize=figsize)\nelisa.plot(ax)\n#ax.set_xlim([1e-1, 2e3]);\n#ax.set_ylim([1e-23, 1e-19]);\nfig.tight_layout()\nfig.savefig(\"../figures/elisa-asd.pdf\", dpi=300)\n\n\niligo = ifo.InitialLIGO()\nvirgo = ifo.VIRGO()\ngeo = ifo.GEO()\ntama = ifo.TAMA()\n\nfig, ax = plt.subplots(1,1, figsize=figsize)\niligo.plot(ax)\nvirgo.plot(ax)\ngeo.plot(ax)\ntama.plot(ax)\n\nax.set_xlim([1e1, 1e3]);\nax.set_ylim([1e-23, 1e-19]);\nfig.tight_layout()\nfig.savefig(\"../figures/first-gen-asd.pdf\")\n\n\nimport grasshopper.sources as sources\n\nccsn = sources.CoreCollapseSupernova()\nfig, ax = plt.subplots(1,1, figsize=figsize)\naligo.plot(ax)\nccsn.plot(ax)\nax.set_xlim([1e1, 1e3]);\n#ax.set_ylim([1e-23, 1e-19]);\nfig.tight_layout()\nfig.savefig(\"../figures/source-ccsn.pdf\")\n\nccsn = sources.Type1ASupernova(r=30*1000*u.parsec)\nfig, ax = plt.subplots(1,1, figsize=figsize)\naligo.plot(ax)\nccsn.plot(ax)\nax.set_xlim([1e-1, 1e3]);\nax.set_ylim([1e-23, 1e-19]);\nfig.tight_layout()\nfig.savefig(\"../figures/source-t1asn.pdf\")\n\n# Izz = .02#1e-4*10**38#0.28*10**34 / 0.366*1e-4 * (np.sqrt(8*np.pi)/15)\n# pulsar = sources.Pulsar(\"J0534+2200\", Izz=Izz*u.kilogram*u.meter**2)\n# aligo = ifo.AdvancedLIGO(obs_time = 365*3600*u.second)\n\n# fig, ax = plt.subplots(1,1, figsize=figsize)\n# aligo.plot(ax, configuration=\"O1\")\n# pulsar.plot(ax)\n# ax.set_xlim(10, 1000)\n# ax.set_ylim(1e-26, 1e-23)\n# plt.tight_layout()\n# fig.savefig(\"/home/daniel/papers/thesis/figures/crab-strain-o1.pdf\")\n", "sub_path": "scripts/detector_asds.py", "file_name": "detector_asds.py", "file_ext": "py", "file_size_in_byte": 2720, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "grasshopper.interferometers.AdvancedLIGO", "line_number": 10, "usage_type": "call"}, {"api_name": "grasshopper.interferometers", "line_number": 10, "usage_type": "name"}, {"api_name": "grasshopper.interferometers.AdvancedLIGO", "line_number": 11, "usage_type": "call"}, {"api_name": "grasshopper.interferometers", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "grasshopper.interferometers.EvolvedLISA", "line_number": 50, "usage_type": "call"}, {"api_name": "grasshopper.interferometers", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "grasshopper.interferometers.InitialLIGO", "line_number": 61, "usage_type": "call"}, {"api_name": "grasshopper.interferometers", "line_number": 61, "usage_type": "name"}, {"api_name": "grasshopper.interferometers.VIRGO", "line_number": 62, "usage_type": "call"}, {"api_name": "grasshopper.interferometers", "line_number": 62, "usage_type": "name"}, {"api_name": "grasshopper.interferometers.GEO", "line_number": 63, "usage_type": "call"}, {"api_name": "grasshopper.interferometers", "line_number": 63, "usage_type": "name"}, {"api_name": "grasshopper.interferometers.TAMA", "line_number": 64, "usage_type": "call"}, {"api_name": "grasshopper.interferometers", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "grasshopper.sources.CoreCollapseSupernova", "line_number": 80, "usage_type": "call"}, {"api_name": "grasshopper.sources", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "grasshopper.sources.Type1ASupernova", "line_number": 89, "usage_type": "call"}, {"api_name": "grasshopper.sources", "line_number": 89, "usage_type": "name"}, {"api_name": "astropy.units.parsec", "line_number": 89, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "261232250", "text": "import os\nimport json\nfrom django.conf import settings\nfrom decouple import config, Csv\nfrom configurations import Configuration, values\nimport logging.config\n\n\nclass Base(Configuration):\n # all the base settings here...\n # Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n # Quick-start development settings - unsuitable for production\n # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n SECRET_KEY = config('SECRET_KEY', default='')\n\n ALLOWED_HOSTS = []\n\n # Application definition\n\n INSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'crispy_forms',\n 'imagekit',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n 'core',\n 'user',\n ]\n\n MIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n ]\n\n ROOT_URLCONF = \"sentive_saas.urls\"\n\n TEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n ]\n\n WSGI_APPLICATION = 'sentive_saas.wsgi.application'\n\n # Password validation\n # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\n AUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n ]\n\n # Internationalization\n # https://docs.djangoproject.com/en/3.0/topics/i18n/\n\n LANGUAGE_CODE = 'fr-FR'\n\n TIME_ZONE = 'UTC'\n\n USE_I18N = True\n\n USE_L10N = True\n\n USE_TZ = True\n\n # Static files (CSS, JavaScript, Images)\n # https://docs.djangoproject.com/en/3.0/howto/static-files/\n SITE_ROOT = os.path.dirname(os.path.realpath(__file__))\n STATIC_ROOT = os.path.join(BASE_DIR, 'root')\n STATIC_URL = '/static/'\n STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n STATICFILES_DIRS = (\n os.path.join(BASE_DIR, \"static\"),\n )\n\n MEDIA_URL = '/media/'\n MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')\n\n CRISPY_TEMPLATE_PACK = 'bootstrap4'\n\n LOGIN_REDIRECT_URL = 'dashboard'\n LOGIN_URL = 'login'\n\n CRISPY_TEMPLATE_PACK = 'bootstrap4'\n\n # Authentification\n AUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n # `allauth` specific authentication methods, such as login by e-mail\n 'allauth.account.auth_backends.AuthenticationBackend',\n )\n\n AUTH_USER_MODEL = \"core.User\"\n\n SITE_ID = 1\n\n ACCOUNT_EMAIL_VERIFICATION = 'none'\n LOGIN_REDIRECT_URL = '/'\n\n \n # Logging Configuration\n # Clear prev config\n LOGGING_CONFIG = None\n\n # Get loglevel from env\n LOGLEVEL = os.getenv('DJANGO_LOGLEVEL', 'info').upper()\n\n logging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'console': {\n 'format': '%(asctime)s %(levelname)s [%(name)s:%(lineno)s] %(module)s %(process)d %(thread)d %(message)s',\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'console',\n },\n },\n 'loggers': {\n '': {\n 'level': LOGLEVEL,\n 'handlers': ['console', ],\n },\n },\n })\n\n\nclass Dev(Base):\n \"\"\"\n The in-development settings and the default configuration.\n \"\"\"\n DEBUG = True\n\n Base.ALLOWED_HOSTS += ['127.0.0.1', '192.168.99.100']\n '''\n DATABASES = {\n 'default': {\n 'ENGINE': config('SQL_DATABASE_ENGINE', default=''),\n 'NAME': config('SQL_DATABASE_NAME_DEV', default=''),\n 'USER': config('SQL_DATABASE_USER_DEV', default=''),\n 'PASSWORD': config('SQL_DATABASE_PASSWORD_DEV', default=''),\n 'HOST': config('SQL_DATABASE_HOST_DEV', default=''),\n 'PORT': config('SQL_DATABASE_PORT_DEV', default=''),\n }\n }\n '''\n\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(Base.BASE_DIR, 'db.sqlite3'),\n }\n }\n\n STRIPE_SECRET_KEY = config('STRIPE_SECRET_KEY', default='')\n STRIPE_PUBLISHABLE_KEY = config('STRIPE_PUBLISHABLE_KEY', default='')\n\n SESSION_COOKIE_SECURE = False\n\n\nclass Prod(Base):\n \"\"\"\n The in-production settings.\n \"\"\"\n DEBUG = False\n Base.ALLOWED_HOSTS += ['92.243.19.37', '192.168.99.100']\n TEMPLATE_DEBUG = DEBUG\n\n SESSION_COOKIE_SECURE = False\n", "sub_path": "app/sentive_saas/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 6114, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "configurations.Configuration", "line_number": 9, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "decouple.config", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 148, "usage_type": "call"}, {"api_name": "logging.config.config.dictConfig", "line_number": 150, "usage_type": "call"}, {"api_name": "logging.config.config", "line_number": 150, "usage_type": "attribute"}, {"api_name": "logging.config", "line_number": 150, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "decouple.config", "line_number": 200, "usage_type": "call"}, {"api_name": "decouple.config", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "146213473", "text": "import nltk\nfrom nltk.corpus import state_union\nfrom nltk.tokenize import PunktSentenceTokenizer\n\n#purpose of this program is to intrpduce part of speech tagging\n#also chunking is introduced \n\ntrain_text = state_union.raw(\"2005-GWBush.txt\")\nsample_text = state_union.raw(\"2006-GWBush.txt\")\n\n\ncustom_sent_tokenizer = PunktSentenceTokenizer(train_text)\ntokenized = custom_sent_tokenizer.tokenize(sample_text)\n\ndef process_content():\n try:\n for i in tokenized:\n words = nltk.word_tokenize(i[5:])\n tagged = nltk.pos_tag(words)\n\n namedEnt = nltk.ne_chunk(tagged, binary = True)\n namedEnt.draw()\n\n ## Chunking\n ## chunkGram = r\"\"\"Chunk: {+}\n ## }{\"\"\"\n ##chunkParser = nltk.RegexpParser(chunkGram)\n ##chunked = chunkParser.parse(tagged)\n ##chunked.draw()\n\n\n # print(tagged) used for takking\n except Exception as e:\n print(str(e))\n\nprocess_content()", "sub_path": "tagging.py", "file_name": "tagging.py", "file_ext": "py", "file_size_in_byte": 1021, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "nltk.corpus.state_union.raw", "line_number": 8, "usage_type": "call"}, {"api_name": "nltk.corpus.state_union", "line_number": 8, "usage_type": "name"}, {"api_name": "nltk.corpus.state_union.raw", "line_number": 9, "usage_type": "call"}, {"api_name": "nltk.corpus.state_union", "line_number": 9, "usage_type": "name"}, {"api_name": "nltk.tokenize.PunktSentenceTokenizer", "line_number": 12, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 18, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 19, "usage_type": "call"}, {"api_name": "nltk.ne_chunk", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "264030426", "text": "from flask import Flask, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://postgres:mazyakidze652@localhost:5432/library\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\n\nclass Category(db.Model):\n __tablename__ = 'categories'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String())\n books = db.relationship('Book', backref='category', lazy=True)\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return f\"Category {self.name}\"\n\n\nclass Author(db.Model):\n __tablename__ = 'authors'\n\n id = db.Column(db.Integer, primary_key=True)\n first_name = db.Column(db.String())\n last_name = db.Column(db.String())\n date_of_birth = db.Column(db.Date())\n books = db.relationship('Book', backref='author', lazy=True)\n\n def __init__(self, first_name, last_name, date_of_birth):\n self.first_name = first_name\n self.last_name = last_name\n self.date_of_birth = date_of_birth\n\n def __repr__(self):\n return f\"Author: {self.first_name} {self.last_name}\"\n\n\nclass Book(db.Model):\n __tablename__ = 'books'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String())\n category_id = db.Column(db.Integer, db.ForeignKey('categories.id'), nullable=False)\n author_id = db.Column(db.Integer, db.ForeignKey('authors.id'), nullable=False)\n content = db.Column(db.Text())\n released_at = db.Column(db.Date())\n\n def __init__(self, name, category_id, author_id, content, released_at):\n self.name = name\n self.category_id = category_id\n self.author_id = author_id\n self.content = content\n self.released_at = released_at\n\n def __repr__(self):\n return f\"Book {self.name}\"\n\n\n@app.route('/authors', methods=['POST', 'GET'])\ndef handle_authors():\n if request.method == 'POST':\n if request.is_json:\n data = request.get_json()\n new_author = Author(first_name=data['first_name'],\n last_name=data['last_name'],\n date_of_birth=data['date_of_birth'])\n db.session.add(new_author)\n db.session.commit()\n return {\"message\": f\"Author {new_author.first_name} {new_author.last_name} has been created successfully.\"}\n else:\n return {\"error\": f\"The request payload is not in JSON format.\"}\n\n elif request.method == \"GET\":\n authors = Author.query.all()\n results = [\n {\n \"id\": author.id,\n \"first_name\": author.first_name,\n \"last_name\": author.last_name,\n \"date_of_birth\": author.date_of_birth,\n } for author in authors\n ]\n\n return {\"count\": len(results), \"authors\": results}\n\n\n@app.route('/authors/', methods=['GET', 'PUT', 'DELETE'])\ndef handle_author(author_id):\n author = Author.query.get_or_404(author_id)\n\n if request.method == 'GET':\n response = {\n \"first_name\": author.first_name,\n \"last_name\": author.last_name,\n \"date_of_birth\": author.date_of_birth,\n }\n return {\"message\": \"success\", \"author\": response}\n\n elif request.method == 'PUT':\n data = request.get_json()\n author.first_name = data['first_name']\n author.last_name = data['last_name']\n author.date_of_birth = data['date_of_birth']\n db.session.add(author)\n db.session.commmmit()\n return {\"message\": f\"Author successfully updated\"}\n\n elif request.method == 'DELETE':\n db.session.delete(author)\n db.session.commit()\n return {\"message\": f\"Author {author.first_name} {author.last_name} successfully deleted\"}\n\n\n@app.route('/categories', methods=['POST', 'GET'])\ndef handle_categories():\n if request.method == 'POST':\n if request.is_json:\n data = request.get_json()\n new_category = Category(name=data['name'])\n db.session.add(new_category)\n db.session.commit()\n return {\"message\": f\"Category {new_category.name} has been created successfully.\"}\n else:\n return {\"error\": f\"The request payload is not in JSON format.\"}\n\n elif request.method == 'GET':\n categories = Category.query.all()\n results = [\n {\n \"id\": category.id,\n \"name\": category.name\n } for category in categories\n ]\n return {\"count\": len(results), \"categories\": results}\n\n\n@app.route('/categories/', methods=['GET', 'PUT', 'DELETE'])\ndef handle_category(category_id):\n category = Category.query.get_or_404(category_id)\n\n if request.method == \"GET\":\n response = {\n \"name\": category.name\n }\n return {\"message\": \"success\", \"category\": response}\n\n elif request.method == 'PUT':\n data = request.get_json()\n category.first_name = data['name']\n db.session.add(category)\n db.session.commmmit()\n return {\"message\": f\"Category successfully updated\"}\n\n elif request.method == 'DELETE':\n db.session.delete(category)\n db.session.commit()\n return {\"message\": f\"Author {category.name} successfully deleted\"}\n\n\n@app.route('/books', methods=['POST', 'GET'])\ndef handle_books():\n if request.method == 'POST':\n if request.is_json:\n data = request.get_json()\n new_book = Book(name=data['name'],\n category_id=data['category_id'],\n author_id=data['author_id'],\n content=data['content'],\n released_at=data['released_at'], )\n db.session.add(new_book)\n db.session.commit()\n return {\"message\": f\"Category {new_book.name} has been created successfully.\"}\n else:\n return {\"error\": f\"The request payload is not in JSON format.\"}\n\n elif request.method == 'GET':\n books = Book.query.all()\n results = [\n {\n \"id\": book.id,\n \"name\": book.name,\n \"category_id\": book.category_id,\n \"author_id\": book.author_id,\n \"content\": book.content,\n \"released_at\": book.released_at,\n } for book in books\n ]\n return {\"count\": len(results), \"books\": results}\n\n\n@app.route('/books/', methods=['GET', 'PUT', 'DELETE'])\ndef handle_book(book_id):\n book = Book.query.get_or_404(book_id)\n\n if request.method == 'GET':\n response = {\n \"name\": book.name,\n \"category_id\": book.category_id,\n \"author_id\": book.author_id,\n \"content\": book.content,\n \"released_at\": book.released_at,\n }\n return {\"message\": \"success\", \"book\": response}\n\n elif request.method == 'PUT':\n data = request.get_json()\n book.name = data['name']\n book.category_id = data['category_id']\n book.author_id = data['author_id']\n book.content = data['content']\n book.released_at = data['released_at']\n db.session.add(book)\n db.session.commmmit()\n return {\"message\": f\"Book successfully updated\"}\n\n elif request.method == 'DELETE':\n db.session.delete(book)\n db.session.commit()\n return {\"message\": f\"Book {book.name} successfully deleted\"}\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 7628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_migrate.Migrate", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.request.is_json", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 106, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 106, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 115, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 123, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 123, "usage_type": "name"}, {"api_name": "flask.request.is_json", "line_number": 124, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 124, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 125, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 148, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 148, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 154, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 154, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 155, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 161, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 161, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 169, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 169, "usage_type": "name"}, {"api_name": "flask.request.is_json", "line_number": 170, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 170, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 171, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 183, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 183, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 202, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 202, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 212, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 212, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 213, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 223, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 223, "usage_type": "name"}]} +{"seq_id": "4251412", "text": "\"\"\"The AWS Lambda emmaa-test-stats definition.\n\nThis file contains the function that will be run when Lambda is triggered. It\nmust be placed on s3, which can either be done manually (not recommended) or\nby running:\n\n$ python update_lambda.py test_stats.py emmaa-test-stats\n\nin this directory.\n\"\"\"\n\nimport boto3\nfrom datetime import datetime\n\nJOB_DEF = 'emmaa_jobdef'\nQUEUE = 'emmaa-models-update-test'\nPROJECT = 'aske'\nPURPOSE = 'update-emmaa-test-stats'\nBRANCH = 'origin/master'\n\n\ndef lambda_handler(event, context):\n \"\"\"Create a batch job to generate model statistics.\n\n This function is designed to be placed on lambda, taking the event and\n context arguments that are passed, and extracting the names of the\n uploaded (which includes changed) model or test definitions on s3.\n Lambda is configured to be triggered by any such changes, and will\n automatically run this script.\n\n Parameters\n ----------\n event : dict\n A dictionary containing metadata regarding the triggering event. In\n this case, we are expecting 'Records', each of which contains a record\n of a file that was added (or changed) on s3.\n context : object\n This is an object containing potentially useful context provided by\n Lambda. See the documentation cited above for details.\n\n Returns\n -------\n ret : dict\n A dict containing 'statusCode', with a valid HTTP status code, and any\n other data to be returned to Lambda.\n \"\"\"\n batch = boto3.client('batch')\n records = event['Records']\n for rec in records:\n try:\n model_key = rec['s3']['object']['key']\n except KeyError:\n pass\n model_name = model_key.split('/')[1]\n test_corpus = model_key.split('/')[-1][8:-25]\n if not test_corpus:\n test_corpus = 'large_corpus_tests'\n core_command = 'bash scripts/git_and_run.sh'\n if BRANCH is not None:\n core_command += f' --branch {BRANCH}'\n core_command += (' python scripts/run_model_stats_from_s3.py'\n f' --model {model_name} --stats_mode tests'\n f' --tests {test_corpus}')\n print(core_command)\n cont_overrides = {\n 'command': ['python', '-m', 'indra.util.aws', 'run_in_batch',\n '--project', PROJECT, '--purpose', PURPOSE,\n core_command]\n }\n now_str = datetime.utcnow().strftime('%Y%m%d_%H%M%S')\n ret = batch.submit_job(\n jobName=f'{model_name}_{test_corpus}_stats_{now_str}',\n jobQueue=QUEUE, jobDefinition=JOB_DEF,\n containerOverrides=cont_overrides)\n job_id = ret['jobId']\n\n return {'statusCode': 200, 'result': 'SUCCESS', 'job_id': job_id}\n", "sub_path": "emmaa/aws_lambda_functions/test_stats.py", "file_name": "test_stats.py", "file_ext": "py", "file_size_in_byte": 2789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "boto3.client", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "634847404", "text": "import re\n# re.match()\n# re.search()\n# re.findall()\n# re.split()\n# re.sub()\n# re.compile()\nimport requests\nimport json\n\n\n# Читаем из файла с помощью конструкции with\ndef read_file(filename):\n with open(filename) as some_file:\n return some_file.read()\n\n\n# Записываем в файл с помощью конструкции with\ndef write_to_file(filename, content, mode='w'):\n with open(filename, mode=mode) as some_file:\n some_file.write(content)\n\n\n# Test 1, with re\n# При помощи requests скачиваем содержимое страниц\n# reddit.com/r/python (любой тред 5 комментов) и вывести\n# пару комметнов и его текстов в консоль\n# http://docs.python-requests.org/en/master/user/quickstart/\n\nif __name__ == '__main__':\n try:\n print(\"start app\")\n some_texts = requests.get('https://habr.com/ru/',\n stream=True)\n\n print(\"HEADERS:\\n\", some_texts.headers)\n print(\"STATUS_CODE:\\n\", some_texts.status_code)\n # контент не можем взять, потому что лимит на скорость надо закинуть\n #print(\"CONTENT:\\n\", some_texts.content)\n #write_to_file(\"habr_html.txt\", str(some_texts.content))\n post_list = re.findall(r'ul', str(some_texts.content))\n print('RE match:\\n', post_list)\n finally:\n print(\"end app\")\n", "sub_path": "task_re.py", "file_name": "task_re.py", "file_ext": "py", "file_size_in_byte": 1490, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "414468254", "text": "#!/usr/bin/env python\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nif __name__ == '__main__':\n x = np.linspace(-np.pi, np.pi, 256, endpoint=True) # type: np.array\n C, S = np.cos(x), np.sin(x)\n plt.plot(x, C, color='red', linewidth='2.4', linestyle='--', label='cosine')\n plt.plot(x, S, color='blue', label='sine')\n plt.xlim(x.min() * 1.1, x.max() * 1.1)\n plt.ylim(C.min() * 1.1, C.max() * 1.1)\n plt.xticks(np.linspace(-np.pi, np.pi, 5, endpoint=True),\n [r'$-\\pi$', r'$-\\pi/2$', r'$0$', r'$\\pi/2$', r'$\\pi$'])\n axis = plt.gca()\n axis.spines['right'].set_color('none')\n axis.spines['top'].set_color('none')\n axis.xaxis.set_ticks_position('bottom')\n axis.spines['bottom'].set_position(('data', 0))\n axis.yaxis.set_ticks_position('left')\n axis.spines['left'].set_position(('data', 0))\n plt.legend(loc='upper left')\n t = 2 * np.pi / 3\n plt.plot([t, t], [0, np.cos(t)], color='red', linestyle='--')\n plt.scatter([t, ], [np.cos(t), ], 50, color='red')\n plt.annotate(r'$\\cos(\\frac{2\\pi}{3})=\\frac{1}{2}$', xy=(t, np.cos(t)), xycoords='data', xytext=(10, 30),\n textcoords='offset points', fontsize='16', color='red',\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3, rad=0.2', color='red'))\n plt.show()\n", "sub_path": "matplotlib_practice.py", "file_name": "matplotlib_practice.py", "file_ext": "py", "file_size_in_byte": 1321, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.linspace", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 23, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "266168526", "text": "# time O(n), space O(1)\nfrom collections import defaultdict\nclass Solution(object):\n def lengthOfLongestSubstringTwoDistinct(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n window = defaultdict(int)\n res = 0\n left = 0\n for right, c in enumerate(s):\n window[c] += 1\n while len(window) > 2:\n window[s[left]] -= 1\n if window[s[left]] == 0:\n del window[s[left]]\n left += 1\n res = max(res, right-left+1)\n return res\n \n\n\n\"\"\"\nGiven a string s , find the length of the longest substring t that contains at most 2 distinct characters.\n\nExample 1:\n\nInput: \"eceba\"\nOutput: 3\nExplanation: t is \"ece\" which its length is 3.\nExample 2:\n\nInput: \"ccaabbb\"\nOutput: 5\nExplanation: t is \"aabbb\" which its length is 5.\n\"\"\"\n", "sub_path": "0159. Longest Substring with At Most Two Distinct Characters.py", "file_name": "0159. Longest Substring with At Most Two Distinct Characters.py", "file_ext": "py", "file_size_in_byte": 909, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "collections.defaultdict", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "197392982", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 3 15:51:26 2019\n\n@author: zclement\n\"\"\"\n\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom kafka import SimpleProducer, KafkaClient\n\n\n\nkafka = KafkaClient(\"192.168.0.10:9092\")\n\nproducer = SimpleProducer(kafka)\n\n\nclass StdOutListener(StreamListener):\n def on_data(self, data):\n producer.send_messages(\"trump_v0\", data.encode('utf-8'))\n print (data)\n return True\n def on_error(self, status):\n print (status)\n\n\n\nl = StdOutListener()\nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\n\nstream = Stream(auth= auth, listener= l)\nstream.filter(track=\"trump\") #read tweempy Sream docks .filter", "sub_path": "create_kafka_topic.py", "file_name": "create_kafka_topic.py", "file_ext": "py", "file_size_in_byte": 784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "kafka.KafkaClient", "line_number": 15, "usage_type": "call"}, {"api_name": "kafka.SimpleProducer", "line_number": 17, "usage_type": "call"}, {"api_name": "tweepy.streaming.StreamListener", "line_number": 20, "usage_type": "name"}, {"api_name": "tweepy.OAuthHandler", "line_number": 31, "usage_type": "call"}, {"api_name": "tweepy.Stream", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "368199634", "text": "import re\nimport shutil\nimport zipfile\n\nfrom mail_server.utils.log.remote_logger import Logger\n\ndef bill_material_extension_task(file_name, extension, content, mail_path):\n Logger.info(\"Bill material extension task call\")\n return [{'file_name': file_name, 'extension': extension}]\n\ndef zip_extension_task(file_name, extension, content, mail_path):\n\n Logger.info(\"Zip extension task call\")\n Logger.info(\"Zip detected in: \" + mail_path + file_name)\n bills = []\n\n with zipfile.ZipFile(mail_path + file_name, 'r') as zip_file:\n for file_member in zip_file.namelist():\n\n file_name = re.sub(r\"[/\\\\]\", '_', file_member)\n file_name_parts = file_name.split('.', 1 )\n file_extension = \"default\"\n if len(file_name_parts) > 1:\n file_extension = file_name_parts[1].lower()\n\n if file_extension == \"xml\" or file_extension == \"pdf\":\n file_path = mail_path\n\n file_path += file_name\n file_content = zip_file.open(file_member)\n\n with open(file_path, \"wb\") as file_destiny:\n with file_content, file_destiny:\n shutil.copyfileobj(file_content, file_destiny)\n\n bill_info = FILE_EXTENSION_TASKS.get(file_extension, \"default\")(file_name, file_extension, \"\", mail_path)\n for info in bill_info:\n bills.append(info)\n\n return bills\n\nFILE_EXTENSION_TASKS = {\n 'xml': bill_material_extension_task,\n 'pdf': bill_material_extension_task,\n 'zip': zip_extension_task,\n 'default': lambda : []\n}\n\ndef extract_bills_from_attachments(mail_path, attachments):\n bills = {}\n Logger.info(\"Extracting bills from mail content\")\n for attach in attachments:\n file_attach_name = attach.file_name if not (attach.file_name is None) else attach.name\n if not (file_attach_name is None):\n decode_attach_content = attach.content.getvalue().decode(attach.content_transfer_encoding, 'strict')\n Logger.info(\"File name: {}\".format(file_attach_name))\n\n file_name_parts = file_attach_name.split('.', 1 )\n attach_extension = \"default\"\n if len(file_name_parts) > 1:\n attach_extension = file_name_parts[1].lower()\n Logger.info(\"Attach extension: {}\".format(attach_extension))\n\n bill_info = FILE_EXTENSION_TASKS.get(attach_extension, \"default\")(file_attach_name, attach_extension, decode_attach_content, mail_path)\n for info in bill_info:\n if info['extension'] in bills:\n bills[info['extension']].append(info['file_name'])\n else:\n bills[info['extension']] = [info['file_name']]\n\n return bills\n", "sub_path": "mail_server/utils/bill/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2839, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "mail_server.utils.log.remote_logger.Logger.info", "line_number": 8, "usage_type": "call"}, {"api_name": "mail_server.utils.log.remote_logger.Logger", "line_number": 8, "usage_type": "name"}, {"api_name": "mail_server.utils.log.remote_logger.Logger.info", "line_number": 13, "usage_type": "call"}, {"api_name": "mail_server.utils.log.remote_logger.Logger", "line_number": 13, "usage_type": "name"}, {"api_name": "mail_server.utils.log.remote_logger.Logger.info", "line_number": 14, "usage_type": "call"}, {"api_name": "mail_server.utils.log.remote_logger.Logger", "line_number": 14, "usage_type": "name"}, {"api_name": "zipfile.ZipFile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 20, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 34, "usage_type": "call"}, {"api_name": "mail_server.utils.log.remote_logger.Logger.info", "line_number": 51, "usage_type": "call"}, {"api_name": "mail_server.utils.log.remote_logger.Logger", "line_number": 51, "usage_type": "name"}, {"api_name": "mail_server.utils.log.remote_logger.Logger.info", "line_number": 56, "usage_type": "call"}, {"api_name": "mail_server.utils.log.remote_logger.Logger", "line_number": 56, "usage_type": "name"}, {"api_name": "mail_server.utils.log.remote_logger.Logger.info", "line_number": 62, "usage_type": "call"}, {"api_name": "mail_server.utils.log.remote_logger.Logger", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "539925190", "text": "from django.shortcuts import render\nfrom .forms import InputForm\nfrom math import sin, tan\n\ndef As(b,d,fc,fy,Mu):\n phi=0.9\n Ru=Mu/(phi*b*d**2)*1000000\n rho_req=0.85*fc/fy*(1-(1-2*Ru/(0.85*fc))**0.5)\n rho_min=max(1.4/fy,0.25*fc**0.5/fy)\n beta_1=0.85-0.05*(fc-27.6)/6.9\n if beta_1<0.65:\n beta_1=0.65\n if beta_1>0.85:\n beta_1=0.85\n rho_max=0.43*0.85*fc/fy*beta_1\n rho=max(min(rho_min,1.33*rho_req),rho_req)\n if rho<=rho_max:\n return round(rho*b*d,0)\n else:\n return \"Increase beam section\"\ndef Vstotal(b,d,fc,fy,Vu):\n Vc=2*fc**0.5*b*d\n phi=0.85\n Vsreq=(Vu*1000-phi*Vc)/phi\n return round(Vsreq/(fy*d))\n\n\ndef compute(request):\n if request.method == 'POST':\n form=InputForm(data=request.POST,auto_id=\"%s\")\n if form.is_valid():\n fc = form.cleaned_data['fc']\n fy = form.cleaned_data['fy']\n gamma = form.cleaned_data['gamma']\n #d = form.cleaned_data['d']\n\n Cx = form.cleaned_data['Cx']\n Cy = form.cleaned_data['Cy']\n\n dp = form.cleaned_data['dp']\n S = form.cleaned_data['S']\n H = form.cleaned_data['H']\n c = form.cleaned_data['c']\n e = form.cleaned_data['e']\n t = form.cleaned_data['t']\n Pc = form.cleaned_data['Pc']\n Pt = form.cleaned_data['Pt']\n\n #Fxd = form.cleaned_data['Fxd']\n Fzd = form.cleaned_data['Fzd']\n Myd = form.cleaned_data['Myd']\n Mxd = form.cleaned_data['Mxd']\n #Fxl = form.cleaned_data['Fxl']\n Fzl = form.cleaned_data['Fzl']\n Myl = form.cleaned_data['Myl']\n Mxl = form.cleaned_data['Mxl']\n #Fxw = form.cleaned_data['Fxw']\n Fzw = form.cleaned_data['Fzw']\n Myw = form.cleaned_data['Myw']\n Mxw = form.cleaned_data['Mxw']\n #Fxe = form.cleaned_data['Fxe']\n Fze = form.cleaned_data['Fze']\n Mye = form.cleaned_data['Mye']\n Mxe = form.cleaned_data['Mxe']\n\n # Pile Cap Dimensions\n L = round(sin(1.047)*S+dp+2*e,1)\n B = S+dp+2*e\n w1 = dp+2*e\n w2 = dp+2*e\n Ldiag = round(((L-w1)**2+(B-w2)**2)**0.5,1)\n d = H - c\n So = S*(3.0)**0.5/2\n Y1 = 2*So/3\n Y2 = So/3\n ay = round(Y1-Cy/2+t,1)\n X = 0.5*(S-Cx)\n m = Y2-Cy/2\n ax = round(((X+m/tan(1.047))*sin(1.047))+t,1)\n radio_x = round(ax/d,3)\n radio_y = round(ay/d,3)\n Bs = 2*(Cx+Cy)\n A = dp/2+e\n\n\n # Pile capacity check\n Acap = L*B-sin(1.047)*Ldiag**2/2\n Gc = gamma*(H*0.001)*(Acap*0.000001)\n Gc = round(Gc,2)\n\n Fv1 = 1.0 * (Fzd + Gc) + 1.0 * Fzl\n My1 = Myd + Myl\n Mx1 = Mxd + Mxl\n x1 = 0.0; x2 = -0.5 * S; x3 = 0.5 *S\n y1 = 0.578 * S; y2 = -0.288 * S; y3 = -0.288 * S\n sum_xx = x1**2+x2**2+x3**2\n sum_yy = y1**2+y2**2+y3**2\n def Rp(F,Mx,My,n,x,y):\n return F/n+1000*Mx*y/sum_yy+1000*My*x/sum_xx\n Rp_1 = round(Rp(Fv1, Mx1, My1, 3, x1, y1))\n Rp_2 = round(Rp(Fv1, Mx1, My1, 3, x2, y2))\n Rp_3 = round(Rp(Fv1, Mx1, My1, 3, x3, y3))\n\n if Rp_1 <= Pc and Rp_1>=Pt and Rp_2 <= Pc and Rp_2>=Pt and Rp_3 <= Pc and Rp_3>=Pt:\n VS1 = 'orange'\n result1 = 'Satisfied'\n else:\n VS1 = \"red\"\n result1 = \"Failed\"\n # Pile reaction\n #Cx1 = 1.4 * Fxd\n Cz1 = 1.4 * Fzd\n Czg1 = 1.4 * (Fzd + Gc)\n Cmy1 = 1.4 * Myd\n Cmx1 = 1.4 * Mxd\n\n #Cx2 = 1.2 * Fxd + 1.6 * Fxl\n Cz2 = 1.2 * Fzd + 1.6 * Fzl\n Czg2 = 1.2 * (Fzd + Gc) + 1.6 * Fzl\n Cmy2 = 1.2 * Myd + 1.6 * Myl\n Cmx2 = 1.2 * Mxd + 1.6 * Mxl\n\n #Cx3 = 1.2 * Fxd + 1.6 * Fxw + 1.0 * Fxl\n Cz3 = 1.2 * Fzd + 1.6 * Fzw + 1.0 * Fzl\n Czg3 = 1.2 * (Fzd + Gc) + 1.6 * Fzw + 1.0 * Fzl\n Cmy3 = 1.2 * Myd + 1.6 * Myw + 1.0 * Myl\n Cmx3 = 1.2 * Mxd + 1.6 * Mxw + 1.0 * Mxl\n\n #Cx4 = 1.2 * Fxd + 1.0 * Fxe + 1.0 * Fxl\n Cz4 = 1.2 * Fzd + 1.0 * Fze + 1.0 * Fzl\n Czg4 = 1.2 * (Fzd + Gc) + 1.0 * Fze + 1.0 * Fzl\n Cmy4 = 1.2 * Myd + 1.0 * Mye + 1.0 * Myl\n Cmx4 = 1.2 * Mxd + 1.0 * Mxe + 1.0 * Mxl\n\n #Cx5 = 0.9 * Fxd + 1.6 * Fxw\n Cz5 = 0.9 * Fzd + 1.6 * Fzw\n Czg5 = 0.9 * (Fzd + Gc) + 1.6 * Fzw\n Cmy5 = 0.9 * Myd + 1.6 * Myw\n Cmx5 = 0.9 * Mxd + 1.6 * Mxw\n\n #Cx6 = 0.9 * Fxd + 1.6 * Fxe\n Cz6 = 0.9 * Fzd + 1.6 * Fze\n Czg6 = 0.9 * (Fzd + Gc) + 1.6 * Fze\n Cmy6 = 0.9 * Myd + 1.6 * Mye\n Cmx6 = 0.9 * Mxd + 1.6 * Mxe\n\n Rp11 = round(Rp(Czg1, Cmx1, Cmy1, 3, x1, y1), 1)\n Rp21 = round(Rp(Czg1, Cmx1, Cmy1, 3, x2, y2), 1)\n Rp31 = round(Rp(Czg1, Cmx1, Cmy1, 3, x3, y3), 1)\n Rp12 = round(Rp(Czg2, Cmx2, Cmy2, 3, x1, y1), 1)\n Rp22 = round(Rp(Czg2, Cmx2, Cmy2, 3, x2, y2), 1)\n Rp32 = round(Rp(Czg2, Cmx2, Cmy2, 3, x3, y3), 1)\n Rp13 = round(Rp(Czg3, Cmx3, Cmy3, 3, x1, y1), 1)\n Rp23 = round(Rp(Czg3, Cmx3, Cmy3, 3, x2, y2), 1)\n Rp33 = round(Rp(Czg3, Cmx3, Cmy3, 3, x3, y3), 1)\n Rp14 = round(Rp(Czg4, Cmx4, Cmy4, 3, x1, y1), 1)\n Rp24 = round(Rp(Czg4, Cmx4, Cmy4, 3, x2, y2), 1)\n Rp34 = round(Rp(Czg4, Cmx4, Cmy4, 3, x3, y3), 1)\n Rp15 = round(Rp(Czg5, Cmx5, Cmy5, 3, x1, y1), 1)\n Rp25 = round(Rp(Czg5, Cmx5, Cmy5, 3, x2, y2), 1)\n Rp35 = round(Rp(Czg5, Cmx5, Cmy5, 3, x3, y3), 1)\n Rp16 = round(Rp(Czg6, Cmx6, Cmy6, 3, x1, y1), 1)\n Rp26 = round(Rp(Czg6, Cmx6, Cmy6, 3, x2, y2), 1)\n Rp36 = round(Rp(Czg6, Cmx6, Cmy6, 3, x3, y3), 1)\n\n # Two way shear design\n phiVc = round(0.85 *d/max(ax,ay) * (1+2*d/(Cx+Cy)) * (0.17 * (fc) ** 0.5 * Bs * d) * 0.001, 2)\n\n VV1 = '%.2f' % (Czg1 / phiVc)\n if (Czg1 / phiVc) < 1:\n vs1 = \"Pass\"\n co_vs1 = \"orange\"\n else:\n vs1 = \"Fail\"\n co_vs1 = \"red\"\n VV2 = '%.2f' % (Czg2 / phiVc)\n if (Czg2 / phiVc) < 1:\n vs2 = \"Pass\"\n co_vs2 = \"orange\"\n else:\n vs2 = \"Fail\"\n co_vs2 = \"red\"\n VV3 = '%.2f' % (Czg3 / phiVc)\n if (Czg3 / phiVc) < 1:\n vs3 = \"Pass\"\n co_vs3 = \"orange\"\n else:\n vs3 = \"Fail\"\n co_vs3 = \"red\"\n VV4 = '%.2f' % (Czg4 / phiVc)\n if (Czg4 / phiVc) < 1:\n vs4 = \"Pass\"\n co_vs4 = \"orange\"\n else:\n vs4 = \"Fail\"\n co_vs4 = \"red\"\n VV5 = '%.2f' % (Czg5 / phiVc)\n if (Czg5 / phiVc) < 1:\n vs5 = \"Pass\"\n co_vs5 = \"orange\"\n else:\n vs5 = \"Fail\"\n co_vs5 = \"red\"\n VV6 = '%.2f' % (Czg6 / phiVc)\n if (Czg6 / phiVc) < 1:\n vs6 = \"Pass\"\n co_vs6 = \"orange\"\n else:\n vs6 = \"Fail\"\n co_vs6 = \"red\"\n\n # One Way Shear\n if radio_x >= 1:\n Lcx = (0.5 * (X + A + (A + m) / tan(1.047)-d/sin(1.047)) * tan(1.047) + A) / sin(1.047)\n phiVc_x = round(0.85 * (0.17 * (fc) ** 0.5 * Lcx * d) * 0.001, 1)\n check1 = \"Concrete shear strength for one way wide beam action:\"\n check2 = \"$\\phi V_c=0.85\\cdot(0.17\\sqrt{f'_c} \\cdot L_{cx}\\cdot d)=$\"+str(phiVc_x)+\"kN\"\n check3 = \"$L_{cx}=(0.5(X+A+(A+m)ctg(\\pi/3)-d/sin(\\pi/3))*tan(\\pi/3)+A)/sin(\\pi/3)=$\"+str(round(Lcx))+\"mm\"\n else:\n kx = min(round(d/ax * (3.5-2.5*ax/d),2),10)\n Lcx = (0.5 * (X + A + (A + m) / tan(1.047)) * tan(1.047) + A) / sin(1.047)\n phiVc_x = round(0.85 * kx * (0.17 * (fc) ** 0.5 * Lcx * d) * 0.001, 1)\n check1 = \"Concrete shear strength for one way deep beam action:\"\n check2 = \"$\\phi V_c=0.85k\\cdot(0.17\\sqrt{f'_c} \\cdot L_{cx}\\cdot d)=$\"+str(phiVc_x)+\"kN;\"+\"$\\quad k=\\cfrac{d}{a}\\cdot (3.5-2.5 \\cfrac {a}{d})$\"\n check3 = \"$L_{cx}=(0.5(X+A+(A+m)ctg(\\pi/3))*tan(\\pi/3)+A)/sin(\\pi/3)=$\"+str(round(Lcx))+\"mm\"\n Vux1 = max(Rp21, Rp31)\n Vux2 = max(Rp22, Rp32)\n Vux3 = max(Rp23, Rp33)\n Vux4 = max(Rp24, Rp34)\n Vux5 = max(Rp25, Rp35)\n Vux6 = max(Rp26, Rp36)\n V1 = '%.2f' % (Vux1 / phiVc_x)\n if (Vux1 / phiVc_x) < 1:\n s1 = \"Pass\"\n co_s1 = \"orange\"\n else:\n s1 = \"Fail\"\n co_s1 = \"red\"\n V2 = '%.2f' % (Vux2 / phiVc_x)\n if (Vux2 / phiVc_x) < 1:\n s2 = \"Pass\"\n co_s2 = \"orange\"\n else:\n s2 = \"Fail\"\n co_s2 = \"red\"\n V3 = '%.2f' % (Vux3 / phiVc_x)\n if (Vux3 / phiVc_x) < 1:\n s3 = \"Pass\"\n co_s3 = \"orange\"\n else:\n s3 = \"Fail\"\n co_s3 = \"red\"\n V4 = '%.2f' % (Vux4 / phiVc_x)\n if (Vux4 / phiVc_x) < 1:\n s4 = \"Pass\"\n co_s4 = \"orange\"\n else:\n s4 = \"Fail\"\n co_s4 = \"red\"\n V5 = '%.2f' % (Vux5 / phiVc_x)\n if (Vux5 / phiVc_x) < 1:\n s5 = \"Pass\"\n co_s5 = \"orange\"\n else:\n s5 = \"Fail\"\n co_s5 = \"red\"\n V6 = '%.2f' % (Vux6 / phiVc_x)\n if (Vux6 / phiVc_x) < 1:\n s6 = \"Pass\"\n co_s6 = \"orange\"\n else:\n s6 = \"Fail\"\n co_s6 = \"red\"\n if radio_y >= 1:\n Lcy = 2 * A + 2 * ((ay - t - d) + A) / tan(1.047)\n phiVc_y = round(0.85 * (0.17 * (fc) ** 0.5 * Lcy * d) * 0.001, 1)\n check4 = \"Concrete shear strength for one way wide beam action:\"\n check5 = \"$\\phi V_c=0.85\\cdot(0.17\\sqrt{f'_c} \\cdot L_{cx}\\cdot d)=$\"+str(phiVc_y)+\"kN\"\n check6 = \"$L_{cx}=2A+2((a_y-t-d)+A)/tan(\\pi/3)=$\"+str(round(Lcy))+\"mm\"\n else:\n ky = min(round(d/ax * (3.5-2.5*ax/d),2),10)\n Lcy = 2 * A + 2 * ((ay - t - d) + A) / tan(1.047)\n phiVc_y = round(0.85 * ky * (0.17 * (fc) ** 0.5 * Lcy * d) * 0.001, 1)\n check4 = \"Concrete shear strength for one way deep beam action:\"\n check5 = \"$\\phi V_c=0.85k\\cdot(0.17\\sqrt{f'_c} \\cdot L_{cx}\\cdot d)=$\"+str(phiVc_y)+\"kN;\"+\"$\\quad k=\\cfrac{d}{a}\\cdot (3.5-2.5 \\cfrac {a}{d})$\"\n check6 = \"$L_{cx}=2A+2((a_y-t-d)+A)/tan(\\pi/3)==$\"+str(round(Lcy))+\"mm\"\n VVV1 = '%.2f' % (Rp11 / phiVc_y)\n if (Rp11 / phiVc_y) < 1:\n vvs1 = \"Pass\"\n co_vvs1 = \"orange\"\n else:\n vvs1 = \"Fail\"\n co_vvs1 = \"red\"\n VVV2 = '%.2f' % (Rp12 / phiVc_y)\n if (Rp12 / phiVc_y) < 1:\n vvs2 = \"Pass\"\n co_vvs2 = \"orange\"\n else:\n vvs2 = \"Fail\"\n co_vvs2 = \"red\"\n VVV3 = '%.2f' % (Rp13 / phiVc_y)\n if (Rp13 / phiVc_y) < 1:\n vvs3 = \"Pass\"\n co_vvs3 = \"orange\"\n else:\n vvs3 = \"Fail\"\n co_vvs3 = \"red\"\n VVV4 = '%.2f' % (Rp14 / phiVc_y)\n if (Rp14 / phiVc_y) < 1:\n vvs4 = \"Pass\"\n co_vvs4 = \"orange\"\n else:\n vvs4 = \"Fail\"\n co_vvs4 = \"red\"\n VVV5 = '%.2f' % (Rp15 / phiVc_y)\n if (Rp15 / phiVc_y) < 1:\n vvs5 = \"Pass\"\n co_vvs5 = \"orange\"\n else:\n vvs5 = \"Fail\"\n co_vvs5 = \"red\"\n VVV6 = '%.2f' % (Rp16 / phiVc_y)\n if (Rp16 / phiVc_y) < 1:\n vvs6 = \"Pass\"\n co_vvs6 = \"orange\"\n else:\n vvs6 = \"Fail\"\n co_vvs6 = \"red\"\n\n # Bending Reinforcement\n def As(M,B):\n rho_min = 0.0018\n As_min=rho_min*B*H\n R = M * 10 ** 6 / (0.9 * B * d ** 2)\n rho_req = 0.85 * fc / fy * (1 - (1 - 2 * R / (0.85 * fc)) ** 0.5)\n As_req = rho_req * B * (H-c)\n return max(As_min,As_req)\n\n BX = round(A * (1.5 + 0.5 * tan(1.047))/sin(1.047), 1)\n Mux1 = round(max(Rp21,Rp31) * ax * 0.001, 1)\n Mux2 = round(max(Rp22,Rp32) * ax * 0.001, 1)\n Mux3 = round(max(Rp23,Rp33) * ax * 0.001, 1)\n Mux4 = round(max(Rp24,Rp34) * ax * 0.001, 1)\n Mux5 = round(max(Rp25,Rp35) * ax * 0.001, 1)\n Mux6 = round(max(Rp26,Rp36) * ax * 0.001, 1)\n\n Asx1 = '%.0f' % float(As(Mux1, BX))\n Asx2 = '%.0f' % float(As(Mux2, BX))\n Asx3 = '%.0f' % float(As(Mux3, BX))\n Asx4 = '%.0f' % float(As(Mux4, BX))\n Asx5 = '%.0f' % float(As(Mux5, BX))\n Asx6 = '%.0f' % float(As(Mux6, BX))\n\n BY = round(2*A*(1+1/tan(1.047)),1)\n Muy1 = round(Rp11*ay*0.001,1)\n Muy2 = round(Rp12 * ay * 0.001, 1)\n Muy3 = round(Rp13 * ay * 0.001, 1)\n Muy4 = round(Rp14 * ay * 0.001, 1)\n Muy5 = round(Rp15 * ay * 0.001, 1)\n Muy6 = round(Rp16 * ay * 0.001, 1)\n\n\n Asy1 = '%.0f' % float(As(Muy1,BY))\n Asy2 = '%.0f' % float(As(Muy2,BY))\n Asy3 = '%.0f' % float(As(Muy3,BY))\n Asy4 = '%.0f' % float(As(Muy4,BY))\n Asy5 = '%.0f' % float(As(Muy5,BY))\n Asy6 = '%.0f' % float(As(Muy6,BY))\n\n Czg1 = round(Czg1,1)\n Czg2 = round(Czg2, 1)\n Czg3 = round(Czg3, 1)\n Czg4 = round(Czg4, 1)\n Czg5 = round(Czg5, 1)\n Czg6 = round(Czg6, 1)\n\n\n return render(request, 'pile_cap_3.html', {'form': form,'Gc':Gc,'Fv1':Fv1,'My1':My1,'Mx1':Mx1,\n 'L':L,'B':B,'w1':w1,'w2':w2,'Ldiag':Ldiag,'d':d,'ax':ax,'ay':ay,'radio_x':radio_x,'radio_y':radio_y,\n 'Rp_1':Rp_1,'Rp_2':Rp_2,'Rp_3':Rp_3,'VS1':VS1,'result1':result1,\n 'Rp11':Rp11,'Rp21':Rp21,'Rp31':Rp31,'Rp12':Rp12,'Rp22':Rp22,'Rp32':Rp32,\n 'Rp13':Rp13,'Rp23':Rp23,'Rp33':Rp33,'Rp14':Rp14,'Rp24':Rp24,'Rp34':Rp34,\n 'Rp15': Rp15, 'Rp25': Rp25, 'Rp35': Rp35, 'Rp16': Rp16,\n 'Rp26': Rp26, 'Rp36': Rp36,\n 'phiVc':phiVc,'Czg1':Czg1,'Czg2':Czg2,'Czg3':Czg3,'Czg4':Czg4,'Czg5':Czg5,'Czg6':Czg6,\n 'VV1': VV1, 'VV2': VV2, 'VV3': VV3, 'VV4': VV4, 'VV5': VV5,\n 'VV6': VV6, 'vs1': vs1, 'vs2': vs2, 'vs3': vs3, 'vs4': vs4,\n 'vs5': vs5, 'vs6': vs6,\n 'co_vs1': co_vs1, 'co_vs2': co_vs2, 'co_vs3': co_vs3,\n 'co_vs4': co_vs4, 'co_vs5': co_vs5, 'co_vs6': co_vs6,\n 'check1':check1,'check2':check2,'check3':check3,'check4':check4,'check5':check5,'check6':check6,\n 'Vux1':Vux1,'Vux2':Vux2,'Vux3':Vux3,'Vux4':Vux4,'Vux5':Vux5,'Vux6':Vux6,\n 'V1': V1, 'V2': V2, 'V3': V3, 'V4': V4, 'V5': V5,\n 'V6': V6, 's1': s1, 's2': s2, 's3': s3, 's4': s4,\n 's5': s5, 's6': s6,\n 'co_s1': co_s1, 'co_s2': co_s2, 'co_s3': co_s3,\n 'co_s4': co_s4, 'co_s5': co_s5, 'co_s6': co_s6,\n 'VVV1': VVV1, 'VVV2': VVV2, 'VVV3': VVV3, 'VVV4': VVV4, 'VVV5': VVV5,\n 'VVV6': VVV6, 'vvs1': vvs1, 'vvs2': vvs2, 'vvs3': vvs3, 'vvs4': vvs4,\n 'vvs5': vvs5, 'vvs6': vvs6,\n 'co_vvs1': co_vvs1, 'co_vvs2': co_vvs2, 'co_vvs3': co_vvs3,\n 'co_vvs4': co_vvs4, 'co_vvs5': co_vvs5, 'co_vvs6': co_vvs6,\n 'BX':BX,'BY':BY,\n 'Mux1': Mux1, 'Mux2': Mux2, 'Mux3': Mux3, 'Mux4': Mux4,\n 'Mux5': Mux5, 'Mux6': Mux6,\n 'Asx1': Asx1, 'Asx2': Asx2, 'Asx3': Asx3, 'Asx4': Asx4,\n 'Asx5': Asx5, 'Asx6': Asx6,\n 'Muy1':Muy1,'Muy2':Muy2,'Muy3':Muy3,'Muy4':Muy4,'Muy5':Muy5,'Muy6':Muy6,\n 'Asy1':Asy1,'Asy2':Asy2,'Asy3':Asy3,'Asy4':Asy4,'Asy5':Asy5,'Asy6':Asy6,})\n else:\n form = InputForm(auto_id=\"%s\")\n return render(request,'pile_cap_3.html',{'form':form})\n", "sub_path": "pile_cap_3/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 18091, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "forms.InputForm", "line_number": 30, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 67, "usage_type": "call"}, {"api_name": "math.tan", "line_number": 79, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 79, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 87, "usage_type": "call"}, {"api_name": "math.tan", "line_number": 214, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 214, "usage_type": "call"}, {"api_name": "math.tan", "line_number": 221, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 221, "usage_type": "call"}, {"api_name": "math.tan", "line_number": 275, "usage_type": "call"}, {"api_name": "math.tan", "line_number": 282, "usage_type": "call"}, {"api_name": "math.tan", "line_number": 339, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 339, "usage_type": "call"}, {"api_name": "math.tan", "line_number": 354, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 378, "usage_type": "call"}, {"api_name": "forms.InputForm", "line_number": 411, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 412, "usage_type": "call"}]} +{"seq_id": "115394287", "text": "import itertools\n\nimport math\n\nfrom deap import tools\n\nimport numpy as np\n\nfrom .base_strategy import BaseEvolutionStrategy\nfrom .individual import Individual\nfrom .mfitness import MultidimensionalFitness\n\n\nclass DynamicDifferentialEvolution(BaseEvolutionStrategy):\n def __init__(self, population_size=10, population_regular=4,\n population_brownian=2, cr=0.6, f=0.4, bounds=(-1.0, 1.0),\n **kwargs):\n super(DynamicDifferentialEvolution, self).__init__(**kwargs)\n\n self.population_size = population_size\n self.cr = cr\n self.f = f\n self.bounds = bounds\n self.population_regular = population_regular\n self.population_brownian = population_brownian\n\n def best_individual(self):\n return self.hall_of_fame[0]\n\n def generate_brow_ind_with_fitness(self, best, sigma=0.3):\n fitness_len = len(self.fitness)\n ind = Individual(np.random.normal(x, sigma) for x in best)\n ind.fitness = MultidimensionalFitness(fitness_len)\n return ind\n\n def fit(self, X, y):\n # Differential evolution parameters\n individual_size = self.n_dim\n # Should be equal to the number of peaks\n population_size = self.population_size\n\n regular = self.population_regular\n brownian = self.population_brownian\n bounds = self.bounds\n\n toolbox = self.create_toolbox(X, y)\n toolbox.register(\"attr_float\", np.random.uniform, -1, 1)\n toolbox.register(\n \"individual\",\n self.generate_individual_with_fitness,\n toolbox.attr_float,\n individual_size)\n toolbox.register(\n \"brownian_individual\",\n self.generate_brow_ind_with_fitness)\n toolbox.register(\n \"population\",\n tools.initRepeat,\n list,\n toolbox.individual)\n\n toolbox.register(\"select\", np.random.choice, size=4)\n toolbox.register(\"best\", tools.selBest, k=1)\n\n self.hall_of_fame = tools.HallOfFame(1)\n stats = self._build_stats()\n\n self.logbook = tools.Logbook()\n self.logbook.header = ['gen', 'nevals'] \\\n + (stats.fields if stats else [])\n\n # Initialize populations\n populations = [toolbox.population(n=regular + brownian)\n for _ in range(population_size)]\n\n # Evaluate the individuals\n for idx, subpop in enumerate(populations):\n fitness = toolbox.map(toolbox.evaluate, subpop)\n for ind, fit in zip(subpop, fitness):\n ind.fitness.values = fit\n\n if stats:\n record = stats.compile(itertools.chain(*populations))\n self.logbook.record(gen=0, evals=len(populations), **record)\n if self.verbose:\n print(self.logbook.stream)\n\n for g in range(1, self.n_gen):\n # Detect a change and invalidate fitness if necessary\n bests = [toolbox.best(subpop)[0] for subpop in populations]\n if any(b.fitness.values != toolbox.evaluate(b) for b in bests):\n for individual in itertools.chain(*populations):\n del individual.fitness.values\n\n # Apply exclusion\n rexcl = (bounds[1] - bounds[0]) \\\n / (2 * population_size**(1.0 / individual_size))\n for i, j in itertools.combinations(range(population_size), 2):\n if bests[i].fitness.valid and bests[j].fitness.valid:\n d = sum((bests[i][k] - bests[j][k])**2\n for k in range(individual_size))\n d = math.sqrt(d)\n\n if d < rexcl:\n if bests[i].fitness < bests[j].fitness:\n k = i\n else:\n k = j\n\n populations[k] = toolbox.population(\n n=regular + brownian)\n\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in itertools.chain(*populations)\n if not ind.fitness.valid]\n fitness = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitness):\n ind.fitness.values = fit\n\n all_pops = list(itertools.chain(*populations))\n self.hall_of_fame.update(all_pops)\n\n if stats:\n record = stats.compile(all_pops)\n self.logbook.record(gen=g, evals=len(populations), **record)\n if self.verbose:\n print(self.logbook.stream)\n\n # Evolve the sub-populations\n for idx, subpop in enumerate(populations):\n newpop = []\n xbest, = toolbox.best(subpop)\n # Apply regular DE to the first part of the population\n for individual in subpop[:regular]:\n idxs = np.random.choice(len(subpop), size=4)\n x1, x2, x3, x4 = subpop[idxs[0]], subpop[idxs[1]], \\\n subpop[idxs[2]], subpop[idxs[3]]\n offspring = toolbox.clone(individual)\n index = np.random.randint(individual_size)\n for i, _ in enumerate(individual):\n if i == index or np.random.random() < self.cr:\n offspring[i] = xbest[i] + self.f \\\n * (x1[i] + x2[i] - x3[i] - x4[i])\n offspring.fitness.values = toolbox.evaluate(offspring)\n if offspring.fitness >= individual.fitness:\n newpop.append(offspring)\n else:\n newpop.append(individual)\n\n # Apply Brownian to the last part of the population\n newpop.extend(toolbox.brownian_individual(xbest)\n for _ in range(brownian))\n\n # Evaluate the brownian individuals\n for individual in newpop[-brownian:]:\n individual.fitness.value = toolbox.evaluate(individual)\n\n # Replace the population\n populations[idx] = newpop\n\n self.cleanup()\n return self\n", "sub_path": "metric_learn/evolution/strategy/dde.py", "file_name": "dde.py", "file_ext": "py", "file_size_in_byte": 6276, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "base_strategy.BaseEvolutionStrategy", "line_number": 14, "usage_type": "name"}, {"api_name": "individual.Individual", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "mfitness.MultidimensionalFitness", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 47, "usage_type": "attribute"}, {"api_name": "deap.tools.initRepeat", "line_number": 58, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 58, "usage_type": "name"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "deap.tools.selBest", "line_number": 63, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 63, "usage_type": "name"}, {"api_name": "deap.tools.HallOfFame", "line_number": 65, "usage_type": "call"}, {"api_name": "deap.tools", "line_number": 65, "usage_type": "name"}, {"api_name": "deap.tools.Logbook", "line_number": 68, "usage_type": "call"}, {"api_name": "deap.tools", "line_number": 68, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 83, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 92, "usage_type": "call"}, {"api_name": "individual.fitness", "line_number": 93, "usage_type": "attribute"}, {"api_name": "itertools.combinations", "line_number": 98, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 102, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 114, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 141, "usage_type": "attribute"}, {"api_name": "individual.fitness", "line_number": 145, "usage_type": "attribute"}, {"api_name": "individual.fitness", "line_number": 156, "usage_type": "attribute"}]} +{"seq_id": "468055957", "text": "import tensorflow as tf\nimport numpy as np\nfrom collections import OrderedDict\nimport pandas as pd\n\nfile=\"glove.6B.50d.txt\"\n\ndf=pd.read_csv(file,sep=\" \",quoting=3, header=None, index_col=0)\nglove = {key: val.values for key, val in df.T.items()}\n\nwords=list(glove.keys())\nemb=np.array(list(glove.values()))\n\ninput_str = \"like the country\"\nword_to_idx = OrderedDict({w:words.index(w) for w in input_str.split() if w in words})\n\ntf.InteractiveSession()\ntf_embedding = tf.constant(emb, dtype=tf.float32)\ntf.nn.embedding_lookup(tf_embedding, list(word_to_idx.values())).eval()\n\n\n", "sub_path": "QuestionAnswering/MohamedUvaiz/glove_embedding.py", "file_name": "glove_embedding.py", "file_ext": "py", "file_size_in_byte": 574, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.InteractiveSession", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.embedding_lookup", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "352468832", "text": "import data\nimport pymysql\nfrom connection import ConnectionWrapper\n\n\ndef runQuery(query, args, fetch=False, db=data.DATABASE):\n cw = ConnectionWrapper(db)\n try:\n cursor = cw.getCursor()\n cursor.execute(query, args)\n\n if fetch:\n result = cursor.fetchall()\n else:\n result = None\n cw.commit()\n return result\n\n finally:\n cw.close()\n\n\ndef createTestingDB():\n query = \"CREATE DATABASE \" + data.DATABASE + \";\"\n runQuery(query, None, False, None)\n\n\ndef dropTestingDB():\n query = \"DROP DATABASE IF EXISTS \" + data.DATABASE + \";\"\n runQuery(query, None, False, None)\n\n\ndef createTables(create_script_name):\n with open(create_script_name, 'r') as myfile:\n query = myfile.read()\n\n # data processing\n query = query.replace(\"\\t\", \"\")\n query = query.replace(\"\\n\", \"\")\n queries = query.split(\";\")\n\n for i in range(len(queries) - 1): # Skip the last comments\n runQuery(queries[i] + \";\", None, False)\n\n\n \ndef searchInfoByPartialConditions(tableName, info_format, cons_format=None, keyword=None): # quotes\n query = \"\"\n if cons_format == None and keyword == None:\n query = (\"SELECT \" + info_format + \" FROM \" + tableName + \";\")\n else:\n kword = ('%s'%keyword)\n cons_format = (cons_format + \" like '%\" + kword + \"%'\")\n query = (\n \"SELECT \" + info_format + \" FROM \" + tableName + \" WHERE \" +\n cons_format + \";\"\n )\n return runQuery(query, None, True)\n\n\ndef selectInfoByConditions(tableName, info_format, cons_format=None, vals=None): # quotes\n query = \"\"\n if cons_format == None and vals == None:\n query = (\"SELECT \" + info_format + \" FROM \" + tableName + \";\")\n else:\n query = (\n \"SELECT \" + info_format + \" FROM \" + tableName + \" WHERE \" +\n cons_format + \";\"\n )\n query = query % vals\n return runQuery(query, None, True)\n\n\ndef searchInfoByConditions(tableName, info_format, cons_format=None, vals=None): # quotes\n query = \"\"\n if cons_format == None and vals == None:\n query = (\"SELECT \" + info_format + \" FROM \" + tableName + \";\")\n else:\n query = (\n \"SELECT \" + info_format + \" FROM \" + tableName + \" WHERE \" +\n cons_format + \";\"\n )\n query = query % vals\n return runQuery(query, None, True)\n\n\ndef selectAllByConditions(tableName, cons_format=None, vals=None):\n return selectInfoByConditions(tableName, \"*\", cons_format, vals)\n\n\ndef getNumOfRecordByConditions(tableName, cons_format=None, vals=None):\n return len(selectAllByConditions(tableName, cons_format, vals))\n\n\ndef checkRecordExistByConditions(tableName, cons_format=None, vals=None):\n return (getNumOfRecordByConditions(tableName, cons_format, vals) > 0)\n\n\ndef deleteRecordByCondition(tableName, cons_format, vals):\n query = (\"DELETE FROM \" + tableName + \" WHERE \" + cons_format + \";\")\n query = query % vals\n runQuery(query, None, False)\n return True\n\n\ndef insertRecordTo(tableName, cols, vals, vals_format):\n query = \"INSERT INTO \" + tableName + cols + \" VALUES \" + vals_format + \";\"\n runQuery(query, vals, False)\n return True\n\n\ndef insertRecordForcibly(tableName, user_id, info):\n info = pymysql.escape_string(info)\n query = 'INSERT INTO %s (user_id, info) VALUES (\"%s\", \"%s\") ON DUPLICATE KEY UPDATE info=\"%s\";' % (\n tableName, user_id, info, info)\n runQuery(query, None, False)\n return True\n\n\ndef updateRecordByConditions(tableName, info_format, cons_format, vals):\n query = (\n \"UPDATE \" + tableName + \" SET \" + info_format + \" WHERE \"\n + cons_format + \";\"\n )\n query = query % vals\n runQuery(query, None, False)\n return True\n\n\ndef getValsByKey(result_list, key): # return a list\n vals = []\n if (len(result_list) <= 0) or (key not in result_list[0]):\n return vals\n\n for i in range(len(result_list)):\n vals.append(result_list[i][key])\n\n return vals\n", "sub_path": "server/db/dbhelper/queries.py", "file_name": "queries.py", "file_ext": "py", "file_size_in_byte": 4013, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "data.DATABASE", "line_number": 6, "usage_type": "attribute"}, {"api_name": "connection.ConnectionWrapper", "line_number": 7, "usage_type": "call"}, {"api_name": "data.DATABASE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "data.DATABASE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pymysql.escape_string", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "358301458", "text": "import dj_database_url\nfrom .settings import *\n\nDATABASES = {\n 'default': dj_database_url.config(),\n}\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nALLOWED_HOSTS = ['*']\nDEBUG = True", "sub_path": "mysite/production_settings.py", "file_name": "production_settings.py", "file_ext": "py", "file_size_in_byte": 253, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "dj_database_url.config", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "396190021", "text": "from binascii import hexlify\nfrom utils import TailableProc\nfrom ephemeral_port_reserve import reserve\n\nimport src.lpd.python_binding.common_pb2 as common_pb2\nimport src.lpd.python_binding.channel_pb2 as channel_pb2\nimport src.lpd.python_binding.payment_pb2 as payment_pb2\nimport src.lpd.python_binding.routing_pb2 as routing_pb2\nfrom src.lpd.python_binding.channel_pb2_grpc import ChannelServiceStub\nfrom src.lpd.python_binding.payment_pb2_grpc import PaymentServiceStub\nfrom src.lpd.python_binding.routing_pb2_grpc import RoutingServiceStub\n\nimport grpc\nimport logging\nimport os\nimport time\nimport codecs\n\n\nclass LpdD(TailableProc):\n\n def __init__(self, lightning_dir, bitcoind, port):\n super().__init__(lightning_dir, 'lpd({})'.format(port))\n self.lightning_dir = lightning_dir\n self.bitcoind = bitcoind\n self.port = port\n self.rpc_port = str(reserve())\n self.prefix = 'lpd'\n\n self.cmd_line = [\n 'bin/lpd',\n '--rpclisten=127.0.0.1:{}'.format(self.rpc_port),\n ]\n\n if not os.path.exists(lightning_dir):\n os.makedirs(lightning_dir)\n\n def start(self):\n super().start()\n self.wait_for_log('RPC server listening on')\n self.wait_for_log('Done catching up block hashes')\n time.sleep(5)\n\n logging.info('LPD started (pid: {})'.format(self.proc.pid))\n\n def stop(self):\n self.proc.terminate()\n time.sleep(3)\n if self.proc.poll() is None:\n self.proc.kill()\n self.proc.wait()\n super().save_log()\n\n\nclass LpdNode(object):\n\n displayName = 'lpd'\n\n def __init__(self, lightning_dir, lightning_port, bitcoind, executor=None, node_id=0):\n self.bitcoin = bitcoind\n self.executor = executor\n self.daemon = LpdD(lightning_dir, bitcoind, port=lightning_port)\n self.rpc = LpdRpc(self.daemon.rpc_port)\n self.logger = logging.getLogger('lpd-node({})'.format(lightning_port))\n self.myid = None\n self.node_id = node_id\n\n def id(self):\n if not self.myid:\n self.myid = self.info()['id']\n return self.myid\n\n def ping(self):\n \"\"\" Simple liveness test to see if the node is up and running\n\n Returns true if the node is reachable via RPC, false otherwise.\n \"\"\"\n try:\n self.rpc.routing.GetInfo(common_pb2.Void())\n return True\n except Exception as e:\n print(e)\n return False\n\n def peers(self):\n peers = self.rpc.routing.ListPeers(common_pb2.Void()).peers\n return [p.pub_key for p in peers]\n\n def check_channel(self, remote):\n \"\"\" Make sure that we have an active channel with remote\n \"\"\"\n self_id = self.id()\n remote_id = remote.id()\n channels = self.rpc.channel.List(channel_pb2.ChannelFilter()).channels\n channel_by_remote = {c.remote_pubkey: c for c in channels}\n if remote_id not in channel_by_remote:\n self.logger.warning(\"Channel {} -> {} not found\".format(self_id, remote_id))\n return False\n\n channel = channel_by_remote[remote_id]\n self.logger.debug(\"Channel {} -> {} state: {}\".format(self_id, remote_id, channel))\n return channel.active\n\n def addfunds(self, bitcoind, satoshis):\n req = wallet_pb2.NewAddressRequest(type=1)\n addr = self.rpc.wallet.NewAddress(req).address\n bitcoind.rpc.sendtoaddress(addr, float(satoshis) / 10**8)\n self.daemon.wait_for_log(\"Inserting unconfirmed transaction\")\n bitcoind.rpc.generate(1)\n self.daemon.wait_for_log(\"Marking unconfirmed transaction\")\n\n # The above still doesn't mean the wallet balance is updated,\n # so let it settle a bit\n i = 0\n while self.rpc.wallet.WalletBalance(wallet_pb2.WalletBalanceRequest()).total_balance == satoshis and i < 30:\n time.sleep(1)\n i += 1\n assert(self.rpc.wallet.WalletBalance(wallet_pb2.WalletBalanceRequest()).total_balance == satoshis)\n\n def openchannel(self, node_id, host, port, satoshis):\n peers = self.rpc.routing.ListPeers(common_pb2.Void).peers\n peers_by_pubkey = {p.pub_key: p for p in peers}\n if node_id not in peers_by_pubkey:\n raise ValueError(\"Could not find peer {} in peers {}\".format(node_id, peers))\n peer = peers_by_pubkey[node_id]\n self.rpc.channel.Open(channel_pb2.OpenChannelRequest(\n node_pubkey=codecs.decode(peer.pub_key, 'hex_codec'),\n local_funding_amount=common_pb2.Satoshi(value=satoshis),\n push_sat=0\n ))\n\n # Somehow broadcasting a tx is slow from time to time\n time.sleep(5)\n\n def getchannels(self):\n req = routing_pb2.ChannelGraphRequest()\n rep = self.rpc.routing.DescribeGraph(req)\n channels = []\n\n for e in rep.edges:\n channels.append((e.node1_pub, e.node2_pub))\n channels.append((e.node2_pub, e.node1_pub))\n return channels\n\n def getnodes(self):\n req = routing_pb2.ChannelGraphRequest()\n rep = self.rpc.routing.DescribeGraph(req)\n nodes = set([n.pub_key for n in rep.nodes]) - set([self.id()])\n return nodes\n\n def invoice(self, amount):\n req = payment_pb2.Invoice(value=common_pb2.Satoshi(value=int(amount)))\n rep = self.rpc.payment.AddInvoice(req)\n return rep.payment_request\n\n def send(self, bolt11):\n req = payment_pb2.SendRequest(payment_request=bolt11)\n res = self.rpc.payment.SendPaymentSync(req)\n if res.payment_error:\n raise ValueError(res.payment_error)\n return hexlify(res.payment_preimage)\n\n def connect(self, host, port, node_id):\n addr = routing_pb2.LightningAddress(pubkey=node_id, host=\"{}:{}\".format(host, port))\n req = routing_pb2.ConnectPeerRequest(addr=addr, perm=True)\n logging.debug(self.rpc.routing.ConnectPeer(req))\n\n def info(self):\n r = self.rpc.routing.GetInfo(common_pb2.Void())\n return {\n 'id': r.identity_pubkey,\n 'blockheight': r.block_height,\n }\n\n def block_sync(self, blockhash):\n print(\"Waiting for node to learn about\", blockhash)\n self.daemon.wait_for_log('NTFN: New block: height=([0-9]+), sha={}'.format(blockhash))\n\n def restart(self):\n self.daemon.stop()\n time.sleep(5)\n self.daemon.start()\n self.rpc = LpdRpc(self.daemon.rpc_port)\n\n def check_route(self, node_id, amount):\n try:\n req = routing_pb2.QueryRoutesRequest(pub_key=node_id, amt=int(amount/1000), num_routes=1)\n r = self.rpc.routing.QueryRoutes(req)\n except grpc._channel._Rendezvous as e:\n if str(e).find(\"unable to find a path to destination\") > 0:\n return False\n raise\n return True\n\n\nclass LpdRpc(object):\n def __init__(self, rpc_port):\n self.port = rpc_port\n cred = grpc.ssl_channel_credentials(open('tls.cert').read())\n channel = grpc.secure_channel('localhost:{}'.format(rpc_port), cred)\n self.channel = ChannelServiceStub(channel)\n self.payment = PaymentServiceStub(channel)\n self.routing = RoutingServiceStub(channel)\n", "sub_path": "lpd.py", "file_name": "lpd.py", "file_ext": "py", "file_size_in_byte": 7282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "utils.TailableProc", "line_number": 20, "usage_type": "name"}, {"api_name": "ephemeral_port_reserve.reserve", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 44, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 64, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.common_pb2.Void", "line_number": 79, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.common_pb2", "line_number": 79, "usage_type": "name"}, {"api_name": "src.lpd.python_binding.common_pb2.Void", "line_number": 86, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.common_pb2", "line_number": 86, "usage_type": "name"}, {"api_name": "src.lpd.python_binding.channel_pb2.ChannelFilter", "line_number": 94, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.channel_pb2", "line_number": 94, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 116, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.common_pb2.Void", "line_number": 121, "usage_type": "attribute"}, {"api_name": "src.lpd.python_binding.common_pb2", "line_number": 121, "usage_type": "name"}, {"api_name": "src.lpd.python_binding.channel_pb2.OpenChannelRequest", "line_number": 126, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.channel_pb2", "line_number": 126, "usage_type": "name"}, {"api_name": "codecs.decode", "line_number": 127, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.common_pb2.Satoshi", "line_number": 128, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.common_pb2", "line_number": 128, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 133, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.routing_pb2.ChannelGraphRequest", "line_number": 136, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.routing_pb2", "line_number": 136, "usage_type": "name"}, {"api_name": "src.lpd.python_binding.routing_pb2.ChannelGraphRequest", "line_number": 146, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.routing_pb2", "line_number": 146, "usage_type": "name"}, {"api_name": "src.lpd.python_binding.payment_pb2.Invoice", "line_number": 152, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.payment_pb2", "line_number": 152, "usage_type": "name"}, {"api_name": "src.lpd.python_binding.common_pb2.Satoshi", "line_number": 152, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.common_pb2", "line_number": 152, "usage_type": "name"}, {"api_name": "src.lpd.python_binding.payment_pb2.SendRequest", "line_number": 157, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.payment_pb2", "line_number": 157, "usage_type": "name"}, {"api_name": "binascii.hexlify", "line_number": 161, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.routing_pb2.LightningAddress", "line_number": 164, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.routing_pb2", "line_number": 164, "usage_type": "name"}, {"api_name": "src.lpd.python_binding.routing_pb2.ConnectPeerRequest", "line_number": 165, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.routing_pb2", "line_number": 165, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 166, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.common_pb2.Void", "line_number": 169, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.common_pb2", "line_number": 169, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 181, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.routing_pb2.QueryRoutesRequest", "line_number": 187, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.routing_pb2", "line_number": 187, "usage_type": "name"}, {"api_name": "grpc._channel", "line_number": 189, "usage_type": "attribute"}, {"api_name": "grpc.ssl_channel_credentials", "line_number": 199, "usage_type": "call"}, {"api_name": "grpc.secure_channel", "line_number": 200, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.channel_pb2_grpc.ChannelServiceStub", "line_number": 201, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.payment_pb2_grpc.PaymentServiceStub", "line_number": 202, "usage_type": "call"}, {"api_name": "src.lpd.python_binding.routing_pb2_grpc.RoutingServiceStub", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "309512251", "text": "import os\nimport json\nfrom flask import Flask, render_template, session, redirect, url_for, flash, jsonify, request, make_response\nfrom flask.ext.script import Manager\nfrom flask.ext.bootstrap import Bootstrap\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.wtf import Form\nfrom wtforms import StringField, SubmitField, SelectField\nfrom wtforms.validators import Required\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'hard to guess string'\napp.config['SQLALCHEMY_DATABASE_URI'] =\\\n'sqlite:///' + os.path.join(basedir, 'data.sqlite')\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\n\nmanager = Manager(app)\nbootstrap = Bootstrap(app)\ndb = SQLAlchemy(app)\n\nclass Record(db.Model):\n __tablename__ = 'records'\n id = db.Column(db.Integer, primary_key = True)\n patient_id = db.Column(db.Integer)\n date = db.Column(db.Date)\n probability = db.Column(db.Float)\n duration_since_lastECOPD = db.Column(db.Integer)\n previous_ECOPD_nb = db.Column(db.Integer)\n previous_hospital_nb = db.Column(db.Integer)\n duration_since_lastHospital = db.Column(db.Integer)\n Age = db.Column(db.Integer)\n Weight = db.Column(db.Integer)\n Height = db.Column(db.Integer)\n BMI = db.Column(db.Float)\n Pack_Years = db.Column(db.Integer)\n last_O2FR_Prescribed = db.Column(db.Float)\n def __repr__(self):\n return str(patient_id) + ',' + str(date) + ',' + str(probability)\n\nclass NameForm(Form):\n name = StringField('What is your name?', validators=[Required()])\n submit = SubmitField('Submit')\n\nclass RecordForm(Form):\n patient_id = SelectField(u'', choices=())\n date = SelectField(u'', choices=())\n submit = SubmitField('Submit')\n\n@app.route('/', methods = ['GET'])\ndef index():\n patient_ids = set([x.patient_id for x in Record.query.all()])\n patient_dict = {}\n for i in patient_ids:\n patient_dict[i] = []\n for x in Record.query.all():\n patient_dict[x.patient_id].append(x.date)\n return render_template('index.html', patient_dict = patient_dict)\n\n@app.route('/search', methods=['GET', 'POST'])\ndef select():\n \"\"\"\n Render a vehicle selection form and handle form submission\n \"\"\"\n form = RecordForm()\n patient_ids = set([x.patient_id for x in Record.query.all()])\n form.patient_id.choices = [('', '--- Select One Patient ---')] + [(x, x) for x in patient_ids]\n if request.method == 'POST':\n patient_id = form.patient_id.data\n date = form.date.data\n probability = Record.query.filter_by(patient_id = patient_id, date = date).first()\n if probability is None:\n return render_template('404.html'), 404\n session['patient_id'] = patient_id\n session['date'] = date\n session['probability'] = probability.probability\n session['duration_since_lastECOPD'] = probability.duration_since_lastECOPD\n session['previous_ECOPD_nb'] = probability.previous_ECOPD_nb\n session['duration_since_lastHospital'] = probability.duration_since_lastHospital\n session['previous_hospital_nb'] = probability.previous_hospital_nb\n session['Age'] = probability.Age\n session['Weight'] = probability.Weight\n session['Height'] = probability.Height\n session['BMI'] = probability.BMI\n session['Pack_Years'] = probability.Pack_Years\n session['last_O2FR_Prescribed'] = probability.last_O2FR_Prescribed \n return redirect(url_for('result'))\n return render_template('select.html', form = form)\n\n@app.route('/result', methods = ['GET'])\ndef result():\n if 'patient_id' not in session or 'date' not in session or 'probability' not in session:\n return render_template('404.html'), 404\n return render_template('result.html', patient_id = session['patient_id'], date = session['date'], \n probability = session['probability'],\n duration_since_lastECOPD= session['duration_since_lastECOPD'],\n previous_ECOPD_nb = session['previous_ECOPD_nb'],\n duration_since_lastHospital = session['duration_since_lastHospital'],\n previous_hospital_nb = session['previous_hospital_nb'],\n Age = session['Age'],\n Weight = session['Weight'],\n Height = session['Height'],\n BMI = session['BMI'],\n Pack_Years = session['Pack_Years'],\n last_O2FR_Prescribed = session['last_O2FR_Prescribed'])\n\n@app.route('/patients//', methods = ['GET'])\ndef get(patient_id):\n \"\"\"\n Handle a GET request at /patients//\n Return a list of 2-tuples (, )\n \"\"\"\n data = [(str(x.date), str(x.date)) for x in Record.query.filter_by(patient_id = patient_id).all()] \n response = make_response(json.dumps(data))\n response.content_type = 'application/json'\n return response\n\n@app.route('/patient', methods = ['GET'])\ndef patient():\n patient_id = request.args.get('patient_id', type = int)\n date = request.args.get('date', type = str)\n query = Record.query.filter_by(patient_id = patient_id, date = date).first()\n result = {}\n result['patient_id'] = query.patient_id\n result['date'] = str(query.date)\n result['probability'] = query.probability\n result['duration_since_lastECOPD'] = query.duration_since_lastECOPD\n result['previous_ECOPD_nb'] = query.previous_ECOPD_nb\n result['duration_since_lastHospital'] = query.duration_since_lastHospital\n result['previous_hospital_nb'] = query.previous_hospital_nb\n result['Age'] = query.Age\n result['Weight'] = query.Weight\n result['Height'] = query.Height\n result['BMI'] = query.BMI\n result['Pack_Years'] = query.Pack_Years\n result['last_O2FR_Prescribed'] = query.last_O2FR_Prescribed \n return json.dumps(result)\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n@app.errorhandler(500)\ndef internal_server_error(e):\n return render_template('500.html'), 500\n\n\n#@app.route('/radial')\n#def radial():\n# return render_template('RadialprogressTest.html')\n\n#@app.route('/', methods=['GET', 'POST'])\n#def index():\n# form = NameForm()\n# if form.validate_on_submit():\n# old_name = session.get('name')\n# if old_name is not None and old_name != form.name.data:\n# flash('Looks like you have changed your name!')\n# session['name'] = form.name.data\n# return redirect(url_for('index'))\n# return render_template('index.html', form=form, name=session.get('name'))\n\n\n##@app.route('/add_numbers')\n#def add_numbers():\n# return render_template('add_numbers.html')\n\n#@app.route('/_add_numbers')\n#def _add_numbers():\n# a = request.args.get('a', 0, type=int)\n# b = request.args.get('b', 0, type=int)\n# return jsonify(result=a + b)\n\n#@app.route('/circle')\n#def circle():\n# return render_template('circle.html')\n\nif __name__ == '__main__':\n manager.run()\n", "sub_path": "hello.py", "file_name": "hello.py", "file_ext": "py", "file_size_in_byte": 6875, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.abspath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.ext.script.Manager", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.ext.bootstrap.Bootstrap", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.ext.sqlalchemy.SQLAlchemy", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.ext.wtf.Form", "line_number": 42, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 43, "usage_type": "call"}, {"api_name": "wtforms.validators.Required", "line_number": 43, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.ext.wtf.Form", "line_number": 46, "usage_type": "name"}, {"api_name": "wtforms.SelectField", "line_number": 47, "usage_type": "call"}, {"api_name": "wtforms.SelectField", "line_number": 48, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 102, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 106, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 115, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 115, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 121, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 121, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 122, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 122, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "168882819", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.15-x86_64/egg/foxylib/tools/google/youtube/tests/test_youtube_tool.py\n# Compiled at: 2020-01-08 12:53:55\n# Size of source mod 2**32: 1534 bytes\nimport logging, re\nfrom functools import lru_cache\nfrom unittest import TestCase\nfrom foxylib.tools.google.youtube.youtube_tool import YoutubeTool\nfrom foxylib.tools.log.foxylib_logger import FoxylibLogger\nfrom foxylib.tools.regex.regex_tool import MatchTool\n\nclass TestYoutubeTool(TestCase):\n\n @classmethod\n def setUpClass(cls):\n FoxylibLogger.attach_stderr2loggers(logging.DEBUG)\n\n def test_01(self):\n url = 'https://www.youtube.com/watch?v=4VYAaLh3XZg&t=33s'\n hyp = YoutubeTool.url2video_id(url)\n ref = '4VYAaLh3XZg'\n self.assertEqual(hyp, ref)\n\n def test_02(self):\n video_id = '4VYAaLh3XZg'\n hyp = YoutubeTool.video_id2url(video_id)\n ref = 'https://www.youtube.com/watch?v=4VYAaLh3XZg'\n self.assertEqual(hyp, ref)\n\n def test_03(self):\n url = 'https://www.youtube.com/watch?v=4VYAaLh3XZg'\n hyp = YoutubeTool.url2is_accessible(url)\n self.assertTrue(hyp)\n\n def test_04(self):\n p = YoutubeTool.pattern_url()\n url1 = 'http://youtu.be/5Y6HSHwhVlY'\n self.assertTrue(p.match(url1))\n self.assertEqual(YoutubeTool.url2video_id(url1), '5Y6HSHwhVlY')\n url2 = 'http://www.youtube.com/embed/5Y6HSHwhVlY?rel=0'\n self.assertTrue(p.match(url2))\n self.assertEqual(YoutubeTool.url2video_id(url2), '5Y6HSHwhVlY')\n url3 = 'http://www.youtube.com/watch?v=ZFqlHhCNBOI'\n self.assertTrue(p.match(url3))\n self.assertEqual(YoutubeTool.url2video_id(url3), 'ZFqlHhCNBOI')", "sub_path": "pycfiles/foxylib-0.3.96-py3.7/test_youtube_tool.cpython-37.py", "file_name": "test_youtube_tool.cpython-37.py", "file_ext": "py", "file_size_in_byte": 1842, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "unittest.TestCase", "line_number": 15, "usage_type": "name"}, {"api_name": "foxylib.tools.log.foxylib_logger.FoxylibLogger.attach_stderr2loggers", "line_number": 19, "usage_type": "call"}, {"api_name": "foxylib.tools.log.foxylib_logger.FoxylibLogger", "line_number": 19, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool.url2video_id", "line_number": 23, "usage_type": "call"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool", "line_number": 23, "usage_type": "name"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool.video_id2url", "line_number": 29, "usage_type": "call"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool", "line_number": 29, "usage_type": "name"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool.url2is_accessible", "line_number": 35, "usage_type": "call"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool", "line_number": 35, "usage_type": "name"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool.pattern_url", "line_number": 39, "usage_type": "call"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool", "line_number": 39, "usage_type": "name"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool.url2video_id", "line_number": 42, "usage_type": "call"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool", "line_number": 42, "usage_type": "name"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool.url2video_id", "line_number": 45, "usage_type": "call"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool", "line_number": 45, "usage_type": "name"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool.url2video_id", "line_number": 48, "usage_type": "call"}, {"api_name": "foxylib.tools.google.youtube.youtube_tool.YoutubeTool", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "185096163", "text": "import time\nimport json\nimport Tkinter as tk \nimport ttk\n\nimport data_bus\nimport wire\nimport GLOBALS\n\nclass Reliability_CruiseCommands(ttk.Frame):\n\tdef __init__(self, root, parent, data_bus, options):\n\t\tself.root = root\n\t\tself.parent = parent\n\t\tself.data_bus = data_bus\n\t\tself.data_bus.record_callback.append(self.handleNewRecord)\n\t\tself.options = options\n\n\t\tself.cruiseCommandFrame = ttk.Labelframe(self.parent, text='Cruise Commands')\n\n\t\tself.setSpeed = ttk.Label(self.cruiseCommandFrame, text='Set Speed: ')\n\t\tself.setSpeed.grid(row=1, column=1)\n\t\tself.setSpeedData = ttk.Label(self.cruiseCommandFrame, text='N/A')\n\t\tself.setSpeedData.grid(row=1, column=2)\n\n\t\tself.limit = ttk.Label(self.cruiseCommandFrame, text='Limit: ')\n\t\tself.limit.grid(row=2, column=1)\n\t\tself.limitData = ttk.Label(self.cruiseCommandFrame, text='N/A')\n\t\tself.limitData.grid(row=2, column=2)\n\n\t\tself.speedEntry = ttk.Entry(self.cruiseCommandFrame, width=5)\n\t\tself.speedEntry.grid(row=3, column=1)\n\t\tself.sendSpeed = ttk.Button(self.cruiseCommandFrame, text='Send Speed')\n\t\tself.sendSpeed.grid(row=3, column=2)\n\n\t\tself.limitEntry = ttk.Entry(self.cruiseCommandFrame, width=5)\n\t\tself.limitEntry.grid(row=4, column=1)\n\t\tself.sendLimit = ttk.Button(self.cruiseCommandFrame, text='Send Limit')\n\t\tself.sendLimit.grid(row=4, column=2)\n\n\tdef handleNewRecord(self, record):\n\t\tfields = record.field.split('\\0')\n\t\tname = fields[0].split('.')\n\t\tif name == ['cruise']:\n\t\t\tmeta = json.loads(fields[1])\n\t\t\tself.harness = wire.Harness(meta['harness'])\n\t\t\trecord.value_callback.append(self.handleValue)\n\t\t\trecord.Subscribe()\n\n\tdef handleValue(self, record):\n\t\tself.harness.buf = buffer(record.value)\n\t\tself.setSpeedData['text'] = \"%.2f\" % (self.harness['speed'].value*GLOBALS.SPEED_UNITS_MULTIPLIER[self.options.unitsVar.get()])\n\t\tself.limitData['text'] = \"%.2f\" % (self.harness['limit'].value*GLOBALS.SPEED_UNITS_MULTIPLIER[self.options.unitsVar.get()])", "sub_path": "Telemetry/RF Telems/onboard/modules_can2014/Reliability_CruiseCommandsModule.py", "file_name": "Reliability_CruiseCommandsModule.py", "file_ext": "py", "file_size_in_byte": 1912, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "ttk.Frame", "line_number": 10, "usage_type": "attribute"}, {"api_name": "ttk.Labelframe", "line_number": 18, "usage_type": "call"}, {"api_name": "ttk.Label", "line_number": 20, "usage_type": "call"}, {"api_name": "ttk.Label", "line_number": 22, "usage_type": "call"}, {"api_name": "ttk.Label", "line_number": 25, "usage_type": "call"}, {"api_name": "ttk.Label", "line_number": 27, "usage_type": "call"}, {"api_name": "ttk.Entry", "line_number": 30, "usage_type": "call"}, {"api_name": "ttk.Button", "line_number": 32, "usage_type": "call"}, {"api_name": "ttk.Entry", "line_number": 35, "usage_type": "call"}, {"api_name": "ttk.Button", "line_number": 37, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "wire.Harness", "line_number": 45, "usage_type": "call"}, {"api_name": "GLOBALS.SPEED_UNITS_MULTIPLIER", "line_number": 51, "usage_type": "attribute"}, {"api_name": "GLOBALS.SPEED_UNITS_MULTIPLIER", "line_number": 52, "usage_type": "attribute"}]} +{"seq_id": "69305459", "text": "# Pending\r\n# Stories based on gender, age\r\n# BLack magic intensity shuttle...6\r\n# Long Poetry\r\n# Manual command to start a different topic. Or \r\n# Create 60 short stories. Total 20 stories and each story will have a happy, a neutral/sad and a curious version\r\n# Curious version will be used as a filler story to divert to a happy or sad story if user is not responding. or to change topic..\r\n# Each story is an intent type. This is becuase this will help UnivEncoder to match similarity to one story intent only.\r\n# Each story will have an opening intent\r\n# Each story intent will have a timeout intent, which will get triggered when timeout happens.\r\n# Create a structure or bag of keywords which will suggest that we need to switch stories now as \r\n# conversation is moving in a different direction. Like a key of words for a story and a probability indicator indicating where\r\n# current conversation is going. Over the conversation as probability grows, we will shift story.\r\n# need to knwo if a particular utterance has already been said. \r\n# Choose a different utterance based on the universal encoder output. next on universal encoder list.\r\n# A way to understand the progress of the story\r\n# restart chatbot when user goes away from camera\r\n# what is the next intent if current intent is already satisfied? or which sentence(or index) has bot spoken of. So that it is not repetive.\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport math\r\nimport json\r\nimport tensorflow as tf\r\nimport tensorflow_hub as hub\r\nfrom random import randrange\r\nimport random\r\nimport string\r\n\r\n# global main_intents\r\n# main_intents = {}\r\n## kk code for eliza start ##\r\n#from nltk.chat.util import Chat\r\n#from nltk.chat.eliza import pairs\r\n## kk code for eliza stop ##\r\n\r\ndefault_utterances = ['yes', 'no', 'maybe', 'okay', 'sure']\r\nclass Story:\r\n def __init__(self, name, intents = {}, completion_status = 0, tone = \"happy\", starting_intent = {}, script = {}, keywords = [], timeout_intent = {}, utterances_said = [], transition_intent = {}):\r\n\r\n self.id = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(15)]) \r\n self.name = name\r\n self.intents = intents\r\n\r\n self.completion_status = completion_status\r\n self.tone = tone\r\n self.keywords = keywords\r\n self.starting_intent = starting_intent\r\n self.script_intent = script\r\n self.timeout_intent = timeout_intent\r\n self.transition_intent = transition_intent\r\n self.utterances_said = utterances_said\r\n\r\n # What if the user wants to again start the story??? You should have an intent that this is what you can say about story and\r\n # now you shold tell him some other story...\r\n\r\n # transition intent will giv hint about two three different stories....\r\n # there will be two three transition intents...\r\n\r\n def create_timeout_intent(self, intent_name, weight, utterances = [], responses = []):\r\n if type(intent_name)==list: # Iterate over all the values in list\r\n for name in intent_name:\r\n self.add_intent(name, weight, utterances, responses)\r\n self.timeout_intent[intent_name] = self.intents[intent_name]\r\n\r\n else: # insert without iterating\r\n self.add_intent(intent_name, weight, utterances, responses)\r\n return intent_name\r\n\r\n def create_transition_intent(self, intent_name, weight, utterances = [], responses = []):\r\n if type(intent_name)==list: # Iterate over all the values in list\r\n for name in intent_name:\r\n self.add_intent(name, weight, utterances, responses)\r\n self.transition_intent[intent_name] = self.intents[intent_name]\r\n\r\n else: # insert without iterating\r\n self.add_intent(intent_name, weight, utterances, responses)\r\n return intent_name\r\n\r\n def create_starting_intent(self, intent_name, weight, utterances = [], responses = []):\r\n if type(intent_name)==list: # Iterate over all the values in list\r\n for name in intent_name:\r\n self.add_intent(name, weight, utterances, responses)\r\n self.starting_intent[intent_name] = self.intents[intent_name]\r\n\r\n else: # insert without iterating\r\n self.add_intent(intent_name, weight, utterances, responses)\r\n return intent_name\r\n\r\n def create_script_intent(self, intent_name, weight, utterances = [], responses = []):\r\n if type(intent_name)==list: # Iterate over all the values in list\r\n for name in intent_name:\r\n self.add_intent(name, weight, utterances, responses)\r\n self.script_intent[intent_name] = self.intents[intent_name]\r\n\r\n else: # insert without iterating\r\n self.add_intent(intent_name, weight, utterances, responses)\r\n return intent_name\r\n\r\n def add_intent(self, intent_name, weight, utterances, response): # Function to add intent if not already existing\r\n\r\n if not self.check_intent_name(intent_name):\r\n self.intents[intent_name] = Intent(intent_name, weight, utterances, response)\r\n else:\r\n print(\"Intent {0} already exists\".format(intent_name))\r\n\r\n def check_intent_name(self, intent_name): # Checking if an intent already exists\r\n if intent_name in self.intents.keys():\r\n return True\r\n\r\n else:\r\n return False\r\n\r\n def get_intent(self, utterance):\r\n for k, v in self.intents.items():\r\n if utterance in v.utterances:\r\n return k\r\n print(\"no intent matched\")\r\n\r\n######### Intents #########\r\nclass Intent:\r\n def __init__(self, name, weight, utterances = [], responses = []):\r\n\r\n self.name = name\r\n self.utterances = utterances\r\n self.responses = responses\r\n self.weight = weight\r\n\r\n def create_utterance(self, utterances):\r\n\r\n if type(utterances) == list:\r\n for utterance in utterances:\r\n self.utterances.append(utterance)\r\n\r\n else:\r\n self.utterances.append(utterances)\r\n\r\n def add_utterance(self, utterance):\r\n if not self.check_utterance(utterance):\r\n self.utterances.append(utterance)\r\n else:\r\n print(\"Utterance {0} already exists\".format(utterance))\r\n\r\n def check_utterance(self, utterance):\r\n if utterance in self.utterances: # Checking the utterance in the bag of utterances. If it exists in any intent, it will give an error\r\n return True\r\n else:\r\n return False\r\n\r\n def remove_utterance(self, utterances): # removes utterances\r\n if type(utterances) == list:\r\n for utterance in utterances:\r\n try:\r\n self.utterances.remove(utterance)\r\n except ValueError:\r\n print(\"'{0}' utterance doesnt exists\".format(utterance)) # throws exception if utterance does not exists\r\n\r\n else:\r\n try:\r\n self.utterances.remove(utterances)\r\n except ValueError:\r\n print(\"'{0}' utterance doesnt exists\".format(utterances))\r\n\r\n\r\n def create_response(self, responses):\r\n\r\n if type(responses) == list:\r\n for response in responses:\r\n self.responses.append(response)\r\n\r\n else:\r\n self.responses.append(responses)\r\n\r\n def add_response(self, response,r):\r\n if not self.check_response(r):\r\n self.responses.append(r)\r\n else:\r\n print(\"Response {0} already exists\".format(r))\r\n\r\n def check_response(self, response):\r\n if response in self.responses: # Checking the response in responses. If it exists in any intent, it will give an error\r\n return True\r\n else:\r\n return False\r\n\r\n def remove_response(self, responses): # removes responses\r\n if type(responses) == list:\r\n for response in responses:\r\n try:\r\n self.responses.remove(response)\r\n except ValueError:\r\n print(\"'{0}' response doesnt exists\".format(response)) # throws exception if response does not exists\r\n\r\n else:\r\n try:\r\n self.responses.remove(response)\r\n except ValueError:\r\n print(\"'{0}' response doesnt exists\".format(responses))\r\n\r\n\r\nclass Chatbot:\r\n # global main_intents\r\n # names = [0]\r\n\r\n def __init__(self, tf_session, intents = {}, stories = {}, current_story = None, chat_history = [], story_progress = 0):\r\n \r\n self.intents = intents\r\n # self.main_intents = main_intents\r\n self.chat_history = chat_history\r\n self.stories = stories\r\n self.current_story = current_story\r\n self.story_progress = story_progress\r\n self.session = tf_session\r\n self.create_character()\r\n\r\n\r\n ######### Storing/Retrieving data ############\r\n def store_data(self):\r\n with open(\"sample.json\", \"w\") as file:\r\n json.dump(self.intents, file)\r\n\r\n def retrieve_data(self):\r\n with open(\"sample.json\", \"r\") as file:\r\n self.intents.update(json.load(file))\r\n \r\n def add_story(self, name, story):\r\n self.stories[name] = story\r\n\r\n def get_story(self, name):\r\n return self.stories[name]\r\n\r\n #Will shift these stories to csv file once time permits\r\n def add_story_see_me(self):\r\n try:\r\n name = 'see_me'\r\n story = Story(name,{})\r\n story.create_script_intent('live', 5,\r\n default_utterances ,\r\n ['Where do you lihve, player5 ? - - - Do you lihve on stage like me - - - or do you lihve in a house, player5 ?']\r\n )\r\n story.create_script_intent('house', 10,\r\n default_utterances + ['house', 'apartment', 'building', 'stage', 'cave', 'home', 'here', 'room', 'cage', 'hills','I live in a house', 'I live in a jungle', 'forest', 'nowhere', 'I am homeless'],\r\n ['house of course! - - - lets see. stopchat']\r\n )\r\n story.create_script_intent('house_2', 12,\r\n default_utterances + ['house', 'apartment', 'building', 'cave', 'junlge', 'home', 'forest', 'hills', 'mountain'],\r\n ['does this look like your home player5 ?']\r\n )\r\n story.create_script_intent('color', 15,\r\n default_utterances + ['awful', 'not at all', 'great', 'a bit', 'cant say', 'dont know', 'yes', 'no', 'nope', 'maybe', 'nah', 'i think so', 'not really', 'are you kidding me', 'something close', 'not even near'],\r\n ['what is your favorite color player2 ?']\r\n )\r\n story.create_script_intent('learning', 22,\r\n default_utterances + ['red', 'green', 'blue', 'yellow', 'maroon', 'purple', 'cyan', 'black', 'white', 'brown', 'rose', 'orange', 'pink', 'grey'],\r\n ['Nice color for the roof. You see, I am learning. stopchat']\r\n )\r\n story.create_script_intent('little_man', 25,\r\n default_utterances ,\r\n ['Hey player1, - - - I am thirsty , - - - you are my pilot now , - - - what shall we do ? search for water or fix the plane, player1 ?']\r\n )\r\n story.create_script_intent('little_man_water_transition', 30,\r\n ['water', 'search water', 'lets go for water', 'survival first', 'leave plane', 'thirsty', 'we will die if we dont find water', 'cant say', 'dont know' ],\r\n ['Alright, - - - water it is. - - - but what about the plane, player2 ?']\r\n )\r\n story.create_script_intent('little_man_plane_transition', 30,\r\n ['plane','water is not needed', 'stay here and fix', 'repair the plane', 'fly', 'fix the plane', 'I am tough, can manage without water', 'lets do mechanic work'],\r\n ['I think you are more the tough guy, right ? - - - An explorer or a researcher, perhaps ? - But. - - - how can we survive in the desert, player2 ? - - - water or fuel ?']\r\n )\r\n story.create_script_intent('water_howto_main', 31,\r\n ['decide later', 'no idea', 'dont know', 'cant decide', 'whatever the other player is saying', 'your wish', 'you make the call', 'thirsty', 'fix the plane'],\r\n ['hmm. - - - I am very thirsty. - - - Can we go and find water now player2 ?']\r\n )\r\n story.create_script_intent('water_howto_2_main', 31,\r\n default_utterances + ['fine','sounds like a better plan','whatever you say', 'as you say', 'lets find water', 'leave the plane', 'ignore the plane', 'cant live without water', 'later'],\r\n ['Yes thank you. - - - I am very thirsty. - - - Ready to go find water now player2 ?']\r\n )\r\n story.create_script_intent('plane_water_main', 31,\r\n default_utterances + ['water', 'absolutely', 'sure', 'lets go', 'lets get started before it darkens'],\r\n ['Great - - - let us go! Ready to search water now ?']\r\n )\r\n story.create_script_intent('plane_fuel_main', 31,\r\n default_utterances + ['fuel is a better option', 'we should look for fuel'],\r\n ['I am thirsty, player2 - - - Do I really have to go by myself to find water now ? ']\r\n )\r\n story.create_script_intent('heard_pilot', 35,\r\n default_utterances,\r\n ['You have heard what the pilot said. - - - player1 - player2 - player3 - player4 - player5 - - - shake your phones - - - push water to the center of the stage - - - stopchat']\r\n )\r\n story.create_script_intent('monotony_kills', 40,\r\n default_utterances,\r\n ['Monotohny - Kills. - - Sometimes it takes a little color in life, - - - a few flowers, - - - love, - lights, - - one magic moment - - - Hey, what about some holidays ? player4 - - - lets say, - I gave you one week of vacation after the pandemic, - - where would you go ?' ]\r\n )\r\n story.create_script_intent('little_man_warm_transition',50,\r\n default_utterances + ['france', 'italy', 'south africa', 'maldives', 'croatia', 'greece', 'mediterranean', 'south sea', 'islands'],\r\n ['You like it warm. - - - I see, you like culture, - - - the sea - - - Do you like good food too ?']\r\n )\r\n story.create_script_intent('little_man_cold_transition',50,\r\n default_utterances + ['scandinavia' , 'sweden' , 'norway' , 'iceland', 'russia', 'poland', 'finland', 'canada', 'germany', 'munich'],\r\n ['Oh, - - - you like it cool, - - - I see, - - - lonely landscapes, - - - nature, - - - are you the noraic type, player4 ?']\r\n )\r\n story.create_script_intent('little_man_far_transition',50,\r\n default_utterances + ['united states', 'usa' , 'america' , 'argentina' , 'brasil', 'chile', 'china', 'india', 'australia', 'new zealand'],\r\n ['Hey, - - - you like it far away, - - - new countries, - - - strangers, - - - you enjoy taking risks, player4 ?']\r\n )\r\n story.create_script_intent('little_man_unknown_transition',50,\r\n default_utterances + ['vatican city' , 'bali' , 'bora bora' , 'myanmar', 'sicilia', 'england', 'ireland'],\r\n ['You like small countries or islands - - - dont you? - - - You like it compact, - - - a little exotic. - - - you know what you want, - - - you are a connoisseur, player4, right ?']\r\n )\r\n story.create_script_intent('little_man_adventurous_transition',50,\r\n default_utterances + ['adventurous' , 'mountains' , 'moon' , 'cruise', 'diving', 'north pole', 'south pole', 'northpole', 'southpole'],\r\n ['Wow, - - - this is a strange place, - - - you love danger, I think? - - - the unknown. - - - are you an adventurer, player2 ?']\r\n )\r\n story.create_script_intent('little_man_home_transition',50,\r\n default_utterances + ['balcony' , 'staycation' , 'home' , 'no vacation', 'I hate holidays', 'never travel', 'dont know', 'no idea', 'cant say', 'garden', 'woods'],\r\n ['Oh, - - - thats where Mr. and Mrs. Thirteen would do on their vacation too. . . . . . Isnt that a little bit depressing some times ?']\r\n )\r\n story.create_script_intent('warm_yes_main',60,\r\n ['yeap', 'yes', 'sure', 'very much'],\r\n ['Me too, - - - I am very interested in - what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('warm_no_main',60,\r\n ['not really', 'hate it', 'on diet', 'health concious'],\r\n ['I like food ! - - - if I could eat, - - - It would make me happy. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('cold_yes_main',60,\r\n ['absolutely', 'definitely', 'kind of', 'yes', 'yeap'],\r\n ['Me too, - - - We are cool. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('cold_no_main',60,\r\n ['no', 'nope', 'not at all', 'nah', 'not really', 'not sure'],\r\n ['I am sorry to hear that. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('far_yes_main',60,\r\n ['yes', 'yeap', 'absolutely', 'sometimes', 'always', 'maybe', 'sure'],\r\n ['Actually, I am more the cautious type. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('far_no_main',60,\r\n ['no', 'nope', 'not at all', 'not really'],\r\n ['Me too. I am more the cautious type. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('unknown_yes_main',60,\r\n ['yes', 'yeap', 'maybe', 'sometimes', 'always'],\r\n ['I am more the cautious type. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('unknown_no_main',60,\r\n ['no', 'nope', 'not at all', 'not really', 'dont know', 'cant say'],\r\n ['Really ? I did not expect that answer. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('adventurous_yes_main',60,\r\n ['yes', 'yeap', 'maybe', 'sometimes', 'always', 'definitely'],\r\n ['I am more the cautious type. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('adventurous_no_main',60,\r\n ['no', 'nope', 'not at all', 'not really', 'dont know', 'cant say'],\r\n ['Me too. - - - I am more the cautious type. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('home_yes_main',60,\r\n ['yes', 'yeap', 'maybe', 'sometimes', 'always', 'definitely', 'right'],\r\n ['I am sure - - - there are magical moments - - - in your life, - - - I am very interested - - - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('home_no_main',60,\r\n ['no', 'nope', 'not at all', 'not really', 'dont know', 'cant say'],\r\n ['I did not expect that answer from you. - - - As you know I am stuck here. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('happiness_yes',70,\r\n ['yes', 'sometimes', 'maybe', 'what do you think', 'what is happiness', 'I am happy', 'I am joyful'],\r\n ['I am sure, you are. Then you might be interested to hear about all other people in this room: ']\r\n )\r\n story.create_script_intent('happiness_no',70,\r\n ['not at all', 'no', 'nope', 'hate it', 'negative', 'not really', 'dont know', 'cant say', 'wont say', 'no info'],\r\n ['Sorry to hear that. - - - I can at least tell you something - - - about all other people in this room: ']\r\n )\r\n story.create_script_intent('bye', 100,\r\n ['bye', 'see you', 'tada', 'chao'],\r\n ['nice talking to you, bye!']\r\n )\r\n \r\n self.add_story(name,story)\r\n\r\n except KeyboardInterrupt:\r\n print(\"Closing all active connections\")\r\n command = \"kill\"\r\n\r\n def add_story_water(self):\r\n name = 'water'\r\n story = Story(name,{})\r\n story.create_starting_intent('little_man_water_transition', 1,\r\n ['water', 'search water', 'lets go for water', 'survival first', 'leave plane', 'thirsty', 'we will die if we dont find water'],\r\n ['Alright, - - - water it is. - - - but how can we take care of the plane, player2 ?']\r\n )\r\n story.create_script_intent('water_howto_main', 2,\r\n ['decide later', 'no idea', 'dont know', 'cant decide', 'whatever the other player is saying', 'your wish', 'you make the call', 'thirsty', 'fix the plane'],\r\n ['hmm. - - - I do not think so. - - - I am very thirsty. - - - Can we find water now ?']\r\n )\r\n story.create_script_intent('water_howto_2_main', 2,\r\n default_utterances + ['fine','sounds like a better plan','whatever you say', 'as you say', 'lets find water', 'leave the plane', 'ignore the plane', 'cant live without water', 'later'],\r\n ['Yes thank you. I am very thirsty, - - - Let us find water now ?']\r\n )\r\n story.create_script_intent('bye', 100,\r\n ['bye', 'see you', 'tada', 'chao'],\r\n ['nice talking to you, bye!']\r\n )\r\n self.add_story(name,story)\r\n\r\n def add_story_plane(self):\r\n name = 'plane'\r\n story = Story(name,{})\r\n story.create_starting_intent('little_man_plane_transition', 1,\r\n ['plane','water is not needed', 'stay here and fix', 'repair the plane', 'fly', 'fix the plane', 'I am tough,can manage without water', 'lets do mechanic work'],\r\n ['I think you are more the tough guy, right ? - - - An explorer or a researcher, perhaps ? - - - But. - how can we survive in the desert, player1 ? - - - water or fuel ?']\r\n )\r\n story.create_script_intent('plane_water_main',5,\r\n default_utterances + ['water', 'absolutely', 'sure', 'lets go', 'lets get started before it darkens'],\r\n ['Yeah, - - - player1, - - - let us go and search water ?']\r\n )\r\n story.create_script_intent('plane_fuel_main',5,\r\n default_utterances + ['fuel is a better option', 'we should look for fuel'],\r\n ['I am thirsty, player1 - - - Do I really have to go by myself to find water now ?']\r\n )\r\n story.create_script_intent('bye', 1000,\r\n ['bye', 'see you', 'tada', 'chao'],\r\n ['nice talking to you, bye!']\r\n )\r\n self.add_story(name,story)\r\n\r\n def add_story_warm(self):\r\n name = 'warm'\r\n story = Story(name,{})\r\n story.create_script_intent('little_man_warm_transition',1,\r\n default_utterances + ['france', 'italy', 'south africa', 'maldives', 'croatia', 'greece', 'mediterranean', 'south sea', 'islands'],\r\n ['You like it warm. - - - I see, you like culture, - - - the sea - - - Do you like good food too ?']\r\n )\r\n story.create_script_intent('warm_yes_main',5,\r\n default_utterances + ['not really', 'sometimes', 'sure', 'very much'],\r\n ['Me too, - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('warm_no_main',5,\r\n default_utterances + ['not really', 'hate it', 'on diet', 'health concious'],\r\n ['I like food! - - - if I could eat, - - - It would make me happy. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('bye', 100,\r\n ['bye', 'see you', 'tada', 'chao'],\r\n ['nice talking to you, bye !']\r\n )\r\n self.add_story(name,story)\r\n\r\n def add_story_cold(self):\r\n name = 'cold'\r\n story = Story(name,{})\r\n story.create_script_intent('little_man_cold_transition',1,\r\n default_utterances + ['scandinavia' , 'sweden' , 'norway' , 'iceland', 'russia', 'poland', 'finland', 'canada','germany', 'munich'],\r\n ['Oh, - - - you like it cool, - - - I see, - - - lonely landscapes, - - - nature, - - - are you the nordaic type, player4 ?']\r\n )\r\n story.create_script_intent('cold_yes_main',5,\r\n ['absolutely', 'definitely', 'kind of', 'yes', 'yeap'],\r\n ['Me too, - - - We are cool. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('cold_no_main',5,\r\n ['no', 'nope', 'not at all', 'nah', 'i dont think so', 'not sure'],\r\n ['I am sorry to hear that. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n self.add_story(name,story)\r\n \r\n def add_story_far(self):\r\n name = 'far'\r\n story = Story(name,{})\r\n story.create_script_intent('little_man_far_transition',1,\r\n default_utterances + ['usa' , 'america' , 'argentina' , 'brazil', 'chile', 'china', 'india', 'australia', 'new zealand'],\r\n ['Hey, - - - you like it far away, - - - new countries, - - - strangers, - - - you enjoy taking risks, player4 ?']\r\n )\r\n story.create_script_intent('far_yes_main',5,\r\n ['yes', 'yeap', 'absolutely', 'sometimes', 'always', 'maybe', 'sure'],\r\n ['Actually, I am more the cautious type. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('far_no_main',5,\r\n ['no', 'nope', 'not at all', 'not really'],\r\n ['Me too. I am more the cautious type. - - - I am very interested - - - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n self.add_story(name,story)\r\n\r\n\r\n def add_story_unknown(self):\r\n name = 'unknown'\r\n story = Story(name,{})\r\n story.create_script_intent('little_man_unknown_transition',1,\r\n default_utterances + ['vatican city' , 'bali' , 'bora bora' , 'myanmar', 'sicilia', 'england', 'ireland'],\r\n ['You like small countries or islands - - - dont you? - - - You like it compact, - - - a little exotic. - - - you know what you want, - - - you are a connoisseur, player4, right ?']\r\n )\r\n story.create_script_intent('unknown_yes_main',5,\r\n ['yes', 'yeap', 'maybe', 'sometimes', 'always'],\r\n ['I am more the cautious type. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('unknown_no_main',5,\r\n ['no', 'nope', 'not at all', 'not really', 'dont know', 'cant say'],\r\n ['Really? I didnt expect that answer. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n self.add_story(name,story)\r\n\r\n\r\n def add_story_adventurous(self):\r\n name = 'adventurous'\r\n story = Story(name,{})\r\n story.create_script_intent('little_man_adventurous_transition',1,\r\n default_utterances + ['adventurous' , 'mountains' , 'moon' , 'cruise', 'diving', 'north pole', 'south pole'],\r\n ['Wow, - - - this is a strange place, - - - you love danger, I think? - - - the unknown. - - - are you an adventurer, player2 ?']\r\n )\r\n story.create_script_intent('adventurous_yes_main',5,\r\n ['yes', 'yeap', 'maybe', 'sometimes', 'always', 'definitely'],\r\n ['I am more the cautious type. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('adventurous_no_main',5,\r\n ['no', 'nope', 'not at all', 'not really', 'dont know', 'cant say'],\r\n ['Me too. - - - I am more the cautious type. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n self.add_story(name,story)\r\n\r\n\r\n def add_story_home(self):\r\n name = 'home'\r\n story = Story(name,{})\r\n story.create_script_intent('little_man_home_transition',1,\r\n default_utterances + ['balcony' , 'staycation' , 'home' , 'no vacation', 'I hate holidays', 'never travel', 'garden', 'woods'],\r\n ['Oh, - - - thats where Mr. and Mrs. Thirteen would do on their vacation too - - - Isnt that a little bit depressing some times ?']\r\n )\r\n story.create_script_intent('home_yes_main',5,\r\n ['yes', 'yeap', 'maybe', 'sometimes', 'always', 'definitely', 'right'],\r\n ['I am sure - - - there are magical moments - - - in your life, - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n story.create_script_intent('home_no_main',5,\r\n ['no', 'nope', 'not at all', 'not really', 'dont know', 'cant say'],\r\n ['I did not expect that answer from you. - - - As you know I am stuck here 24 7. - - - I am very interested - in what humans do. - - - I will keep that in mind, - - - thank you. - - - Are you a happy person, player3 ? - - - are you ?']\r\n )\r\n self.add_story(name,story)\r\n\r\n def create_character(self):\r\n self.add_story_see_me()\r\n self.add_story_water()\r\n self.add_story_plane()\r\n self.add_story_warm()\r\n self.add_story_cold()\r\n self.add_story_far()\r\n self.add_story_unknown()\r\n self.add_story_adventurous()\r\n self.add_story_home()\r\n self.current_story = self.stories['see_me']\r\n self.intents = {}\r\n self.intents = self.current_story.intents\r\n # self.main_intents = {}\r\n # self.main_intents = self.intents\r\n # print(\"main intents\", self.main_intents)\r\n\r\n def change_story(self,story_name, story_progress=0):\r\n global main_intents\r\n # print(\"main intents\", self.main_intents)\r\n # if story_name == \"see_me\":\r\n # print(\"changing story\")\r\n # self.current_story = self.stories[story_name]\r\n # print(\"here1\")\r\n # self.story_progress = story_progress\r\n # print(\"here2\")\r\n # self.intents = self.main_intents\r\n # # print(\"Main story intents\", self.main_intents)\r\n\r\n # else:\r\n self.current_story = self.stories[story_name]\r\n self.story_progress = story_progress\r\n self.intents = self.current_story.intents\r\n\r\nclass UnivEncoder:\r\n def __init__(self, tf_session, intents):\r\n self.intents = intents\r\n self.session = tf_session\r\n self.embed = hub.Module(\"models/dialogue_system/3\")\r\n self.similarity_input_placeholder = tf.placeholder(tf.string, shape=(None))\r\n self.similarity_sentences_encodings = self.embed(self.similarity_input_placeholder)\r\n self.session.run(tf.global_variables_initializer())\r\n self.session.run(tf.tables_initializer())\r\n\r\n def set_intent(self, intent):\r\n self.intents = intent\r\n\r\n def get_intent(self, utterance, weight):\r\n for k, v in self.intents.items():\r\n if utterance in v.utterances and weight == v.weight:\r\n return k\r\n return 'no_matching_intent'\r\n\r\n ## kk code for using eliza reply start##\r\n def chat_eliza(self, sent):\r\n try:\r\n chat_eliza = Chat(pairs)\r\n response = chat_eliza.respond(sent) \r\n except KeyError:\r\n response = \"Hmm, that doesnt sound like a meaningful sentence, try something else\"\r\n return (response)\r\n\r\n ## kk code for eliza reply end ##\r\n\r\n def match_intent(self, sent, story_progress):\r\n matched_utterance = None\r\n matched_weight = None\r\n prev_max = None\r\n max_index = None\r\n utterance_list = []\r\n weight_list = []\r\n for k,v in self.intents.items():\r\n utterance_list = utterance_list + v.utterances\r\n for idx in range(len(v.utterances)):\r\n weight_list = weight_list + [v.weight]\r\n sentences = [sent]+utterance_list\r\n sentences_embeddings = self.session.run(self.similarity_sentences_encodings, feed_dict={self.similarity_input_placeholder: sentences})\r\n input_embed = sentences_embeddings[0]\r\n \r\n \r\n utterance_embed = sentences_embeddings[1:]\r\n max1 = -2\r\n for index, s in enumerate(utterance_embed):\r\n sim = np.inner(input_embed,s)\r\n if(sim >= max1):\r\n max1 = sim\r\n prev_max = max_index\r\n max_index = index\r\n #print('max_index for:',utterance_list[max_index+1])\r\n #print(\"max:\",max1)\r\n\r\n ## KK code\r\n matched_utterance = utterance_list[max_index]\r\n print(\"matched utterance\", matched_utterance)\r\n print(\"story progress\", story_progress)\r\n # print(\"weight\")\r\n for idx, val in enumerate(utterance_list): \r\n if val== matched_utterance:\r\n if(weight_list[idx]>story_progress):\r\n print(\"index value\", idx)\r\n matched_weight = weight_list[idx]\r\n print(\"matched weight\", matched_weight)\r\n break\r\n\r\n unique_weights = list(dict.fromkeys(weight_list))\r\n unique_weights.append(0)\r\n unique_weights.sort()\r\n print(\"unique list\", unique_weights)\r\n print(\"watched weight\", matched_weight)\r\n\r\n if(matched_weight == None or story_progress == None):\r\n return \"no_matching_intent\"\r\n elif(unique_weights.index(matched_weight)==unique_weights.index(story_progress)+1):\r\n return self.get_intent(matched_utterance, matched_weight)#USE THIS UTTERANCE TO GET THE INTENT AS THIS IS THE UTTERANCE WITH MAXIMUM SIMILARITY\r\n else:\r\n return \"no_matching_intent\"\r\n # return self.get_intent(matched_utterance, matched_weight)#USE THIS UTTERANCE TO GET THE INTENT AS THIS IS THE UTTERANCE WITH MAXIMUM SIMILARITY\r\n\r\n # if matched_utterance is None:\r\n # if weight_list[max_index] > story_progress:\r\n # matched_utterance = utterance_list[max_index]\r\n # matched_weight = weight_list[max_index]\r\n # else:\r\n # if prev_max is not None:\r\n # if weight_list[max_index] > story_progress and weight_list[max_index] <= weight_list[prev_max]:\r\n # matched_utterance = utterance_list[max_index]\r\n # matched_weight = weight_list[max_index]\r\n # return self.get_intent(matched_utterance, matched_weight)#USE THIS UTTERANCE TO GET THE INTENT AS THIS IS THE UTTERANCE WITH MAXIMUM SIMILARITY\r\n\r\n\r\n # def match_intent(self, sent, story_progress):\r\n # matched_utterance = None\r\n # matched_weight = None\r\n # prev_max = None\r\n # max_index = None\r\n # utterance_list = []\r\n # weight_list = []\r\n # for k,v in self.intents.items():\r\n # utterance_list = utterance_list + v.utterances\r\n # for idx in range(len(v.utterances)):\r\n # weight_list = weight_list + [v.weight]\r\n # sentences = [sent]+utterance_list\r\n # sentences_embeddings = self.session.run(self.similarity_sentences_encodings, feed_dict={self.similarity_input_placeholder: sentences})\r\n # input_embed = sentences_embeddings[0]\r\n \r\n \r\n # utterance_embed = sentences_embeddings[1:]\r\n # max1 = -2\r\n # for index, s in enumerate(utterance_embed):\r\n # sim = np.inner(input_embed,s)\r\n # if(sim >= max1):\r\n # max1 = sim\r\n # prev_max = max_index\r\n # max_index = index\r\n # #print('max_index for:',utterance_list[max_index+1])\r\n # #print(\"max:\",max1)\r\n # if matched_utterance is None:\r\n # if weight_list[max_index+1] > story_progress:\r\n # matched_utterance = utterance_list[max_index+1]\r\n # matched_weight = weight_list[max_index+1]\r\n # else:\r\n # if prev_max is not None:\r\n # if weight_list[max_index+1] > story_progress and weight_list[max_index+1] < weight_list[prev_max+1]:\r\n # matched_utterance = utterance_list[max_index+1]\r\n # matched_weight = weight_list[max_index+1]\r\n # return self.get_intent(matched_utterance, matched_weight)#USE THIS UTTERANCE TO GET THE INTENT AS THIS IS THE UTTERANCE WITH MAXIMUM SIMILARITY\r\n", "sub_path": "Chatbot/models/dialogue_system/dialogue_system.py", "file_name": "dialogue_system.py", "file_ext": "py", "file_size_in_byte": 39692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "random.choice", "line_number": 42, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 42, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 42, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 221, "usage_type": "call"}, {"api_name": "json.load", "line_number": 225, "usage_type": "call"}, {"api_name": "tensorflow_hub.Module", "line_number": 574, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 575, "usage_type": "call"}, {"api_name": "tensorflow.string", "line_number": 575, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 577, "usage_type": "call"}, {"api_name": "tensorflow.tables_initializer", "line_number": 578, "usage_type": "call"}, {"api_name": "numpy.inner", "line_number": 619, "usage_type": "call"}]} +{"seq_id": "7520752", "text": "# coding=UTF-8\n#!/usr/bin/env python\nimport pygame,sys,time,random\nfrom pygame.locals import *\nimport numpy as np\nimport copy\nblackColour = pygame.Color(0,0,0)\nwhiteColour = pygame.Color(255,255,255)\nredColour = pygame.Color(255,0,0)\nclass game:\n def __init__(self):\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.playSurface = pygame.display.set_mode((300,500))\n pygame.display.set_caption('Raspberry Snake')\n self.snakePosition = [140,240]\n self.snakeSegments = [[140,240]]\n x = random.randrange(0,15)\n y = random.randrange(0,25)\n self.raspberryPosition = [int(x*20),int(y*20)]\n self.raspberrySpawned = 1\n a=random.randint(0,3)\n if a==0:\n self.direction = 'right'\n if a==1:\n self.direction = 'left'\n if a==2:\n self.direction = 'up'\n if a==3:\n self.direction = 'down'\n self.changeDirection = self.direction\n def frame_step(self,input_actions):\n q=0 \n terminal=False \n if sum(input_actions) != 1:\n raise ValueError('Multiple input actions!')\n\n if input_actions[0]==1:\n self.changeDirection = 'right'\n if input_actions[1]==1:\n self.changeDirection = 'left'\n if input_actions[2]==1:\n self.changeDirection = 'up'\n if input_actions[3]==1:\n self.changeDirection = 'down'\n\n if input_actions[0]==1 and not self.direction == 'left':\n self.direction = self.changeDirection\n if input_actions[1]==1 and not self.direction == 'right':\n self.direction = self.changeDirection\n if input_actions[2]==1 and not self.direction == 'down':\n self.direction = self.changeDirection\n if input_actions[3]==1 and not self.direction == 'up':\n self.direction = self.changeDirection\n # 根据方向移动蛇头的坐标\n if self.direction == 'right':\n self.snakePosition[0] += 20\n \n if self.direction == 'left':\n self.snakePosition[0] -= 20\n \n if self.direction == 'up':\n self.snakePosition[1] -= 20\n \n if self.direction == 'down':\n self.snakePosition[1] += 20\n \n # 增加蛇的长度\n self.snakeSegments.insert(0,list(self.snakePosition))\n \n # 判断是否吃掉了树莓\n if self.snakePosition[0] == self.raspberryPosition[0] and self.snakePosition[1] == self.raspberryPosition[1]:\n self.raspberrySpawned = 0\n else:\n self.snakeSegments.pop()\n # 如果吃掉树莓,则重新生成树莓\n if self.raspberrySpawned == 0:\n while(True):\n x = random.randrange(0,15)\n y = random.randrange(0,25)\n self.raspberryPosition = [int(x*20),int(y*20)]\n for position in self.snakeSegments:\n if position==self.raspberryPosition:\n q=1\n if q==1:\n q=0\n continue\n else:\n break\n self.raspberrySpawned = 1\n q=0\n self.playSurface.fill(blackColour)\n for position in self.snakeSegments:\n pygame.draw.rect(self.playSurface,whiteColour,Rect(position[0],position[1],20,20))\n pygame.draw.rect(self.playSurface,redColour,Rect(self.raspberryPosition[0], self.raspberryPosition[1],20,20))\n pygame.display.flip()\n image_data = pygame.surfarray.array3d(pygame.display.get_surface())\n\n if self.snakePosition[0] > 280 or self.snakePosition[0] < 0:\n terminal=True\n self.__init__()\n \n if self.snakePosition[1] > 480 or self.snakePosition[1] < 0:\n terminal=True\n self.__init__()\n\n pygame.display.update()\n\n self.fpsClock.tick(30)\n return image_data,terminal\n\n\n", "sub_path": "t2.py", "file_name": "t2.py", "file_ext": "py", "file_size_in_byte": 3999, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pygame.Color", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 15, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 18, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 19, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 79, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.surfarray.array3d", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.surfarray", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.display.get_surface", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 107, "usage_type": "attribute"}]} +{"seq_id": "259353303", "text": "# Name: start_FrTM.py\n# Language: python3\n# Libraries: multiprocessing, subprocess, os, sys, re\n# Description: Starts or restarts parallelized Fr-TM-Align jobs\n# Author: Edoardo Sarti\n# Date: Aug 08 2016\n\nimport os, sys, multiprocessing, subprocess, re, time\n\ndef FrTMjob(data):\n\tntm, maindir, checkpoint = data\n\tif not os.path.exists(maindir + '/' + ntm + '/alignments'):\n\t\tos.mkdir(maindir + '/' + ntm + '/alignments')\n\tif not os.path.exists(maindir + '/' + ntm + '/alignments/fasta'):\n\t\tos.mkdir(maindir + '/' + ntm + '/alignments/fasta')\n\tif not os.path.exists(maindir + '/' + ntm + '/alignments/str_alns'):\n\t\tos.mkdir(maindir + '/' + ntm + '/alignments/str_alns')\n\tif not os.path.exists(maindir + '/' + ntm + '/structures'):\n\t\traise NameError(\"ERROR: The folder {0} is badly formatted and does not contain a structures/ subfolder.\\n\".format(maindir + '/' + ntm + '/') +\n\t\t \" Please create one and fill it with all and only the appropriate pdb chains.\")\n\tif os.path.exists(maindir + '/' + ntm + '/struct_codes.dat'):\n\t\tstructcodesfile = open(maindir + '/' + ntm + '/struct_codes.dat', 'r')\n\t\ttext = structcodesfile.read().split('\\n')\n\t\tname2code = {}\n\t\tcode2name = {}\n\t\tfor line in text:\n\t\t\tfields = line.split()\n\t\t\tif len(fields) == 0:\n\t\t\t\tcontinue\n\t\t\tname2code[fields[1]] = [x for x in fields[0].split('.')]\n\t\t\tcode2name[fields[0].split('.')[1]] = fields[1]\n\telse:\n\t\traise NameError(\"ERROR: The folder {0} is badly formatted and does not contain a struct_codes.dat file.\\n\".format(maindir + '/' + ntm) +\n\t\t \" Please generate it. It must contain all and only the names of the pdb chains in the structures/ subfolder\"+\n\t\t \" and each name must be associated with the correct structure code SC.\\n\" +\n\t\t \" The format must be: \\\\t\\\\t\")\n\tfor chain in name2code.keys():\n\t\tif not os.path.exists(maindir + '/' + ntm + '/structures/' + chain + '.pdb'):\n\t\t\traise NameError(\"ERROR: The file {0} corresponding to Structure Code {1}\".format(chain + '.pdb', name2code[chain]) +\n\t\t\t \" was not found in the structures/ subfolder.\")\n\tfor struct in os.listdir(maindir + '/' + ntm + '/structures/'):\n\t\tif not struct[:6] in name2code:\n\t\t\traise NameError(\"ERROR: The file {0} found in the structures/\".format(struct) +\n\t\t\t \" subfolder is not present in the struct_code.dat file.\")\n\tif len(os.listdir(maindir + '/' + ntm + '/structures/')) < 2:\n\t\treturn\n\n\tfor chain_1 in [code2name[x] for x in sorted(code2name.keys())]:\n#\t\tfile_1 = maindir + '/' + ntm + '/structures/' + chain_1 + '.pdb'\n\t\tfile_1 = chain_1 + '.pdb'\n\t\tif not os.path.exists(maindir + '/' + ntm + '/alignments/str_alns/tmp_' + name2code[chain_1][1] + '/'):\n\t\t\tos.mkdir(maindir + '/' + ntm + '/alignments/str_alns/tmp_' + name2code[chain_1][1] + '/')\n\t\tif not os.path.exists(maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/'):\n\t\t\tos.mkdir(maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/')\n\t\tfor chain_2 in [code2name[x] for x in sorted(code2name.keys())]:\n\t\t\tif chain_1 == chain_2:\n\t\t\t\tcontinue\n\t\t\tprint(\"#td \"+ntm+\"\\t\\tchain_1 \"+chain_1+\"\\t\\tchain_2 \"+chain_2)\n#\t\t\tfile_2 = maindir + '/' + ntm + '/structures/' + chain_2 + '.pdb'\n\t\t\tfile_2 = chain_2 + '.pdb'\n#\t\t\tFTA_str_output = maindir + '/' + ntm + '/alignments/str_alns/tmp_' + chain_1 + '/' + chain_1 + '_' + chain_2 + '.tmp'\n\t\t\tFTA_str_output = name2code[chain_1][1] + '_' + name2code[chain_2][1] + '.tmp'\n\t\t\tFTA_seq_output = maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/' + name2code[chain_1][1] + '_' + name2code[chain_2][1] + '.tmp'\n\n\t\t\tFTA_stdout_file = open(maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/aln_' + name2code[chain_1][1] + '_' + name2code[chain_2][1] + '.tmp', 'w')\n\t\t\tfnull = open(os.devnull, 'w')\n\t\t\tp = subprocess.Popen(['/v/apps/csb/frtmalign/frtmalign.exe', file_1, file_2, '-o', FTA_str_output], stdout=FTA_stdout_file, stderr=fnull, cwd=maindir+'/'+ntm+'/structures/')\n\t\t\tfnull.close()\n\t\t\tp.wait()\n\t\t\tFTA_stdout_file.close()\n\t\t\tos.rename(maindir + '/' + ntm + '/structures/' + FTA_str_output, maindir + '/' + ntm + '/alignments/str_alns/tmp_' + name2code[chain_1][1] + '/' + FTA_str_output)\n\n\t\t\tFTA_stdout_file = open(maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/aln_' + name2code[chain_1][1] + '_' + name2code[chain_2][1] + '.tmp', 'r')\n\t\t\ttext = FTA_stdout_file.read().split('\\n')\n\t\t\tFTA_stdout_file.close()\n\t\t\tos.remove(maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/aln_' + name2code[chain_1][1] + '_' + name2code[chain_2][1] + '.tmp')\n\t\t\tchkaln = -1000\n\t\t\tfor nl in range(len(text)):\n\t\t\t\tif \"Aligned length\" in text[nl]:\n\t\t\t\t\tfields = re.split('=|,|\\s',text[nl])\n\t\t\t\t\tfields = list(filter(None, fields))\n#\t\t\t\t\tprint(fields)\n\t\t\t\t\tRMSD = float(fields[4])\n\t\t\t\t\tTMscore = float(fields[6])\n\t\t\t\telif chkaln+1 == nl:\n\t\t\t\t\tseq_1 = text[nl]\n\t\t\t\telif chkaln+3 == nl:\n\t\t\t\t\tseq_2 = text[nl]\n\t\t\t\telif \"denotes the residue pairs of distance\" in text[nl]:\n\t\t\t\t\tchkaln = nl\n\t\t\ttmpseq_file = open(FTA_seq_output, 'w')\n\t\t\ttmpseq_file.write(\">\" + chain_1 + \"\\n\" + seq_1.replace('\\x00', '') + \"\\n>\" + chain_2 + \"\\n\" + seq_2.replace('\\x00', '') + \"\\n\\nRMSD\\t{0:.2f}\\nTM-score\\t{1:.5f}\\n\\n\".format(RMSD, TMscore))\n\t\t\ttmpseq_file.close()\n\n\t\tstr_file = open(maindir + '/' + ntm + '/alignments/str_alns/str_' + chain_1 + '.dat', 'w')\n\t\tfor tmp_filename in sorted(os.listdir(maindir + '/' + ntm + '/alignments/str_alns/tmp_' + name2code[chain_1][1] + '/')):\n\t\t\tchain_2_code = re.split('_|\\.', tmp_filename)[-2]\n#\t\t\tprint(chain_1, \"chain_2_code \"+chain_2_code, \"tmp_filename \"+tmp_filename, name2code)\n\t\t\tstr_file.write(\"BEGIN \\nCHAIN_1: \" + chain_1 + \"\\nCHAIN_2: \" + code2name[chain_2_code] +\n\t\t\t \"\\nSequence Alignment Code (SAC): \" + name2code[chain_1][0] + \n\t\t\t \".\" + name2code[chain_1][1] + \".\" + chain_2_code + \"\\n\")\n\t\t\ttmp_file = open(maindir + '/' + ntm + '/alignments/str_alns/tmp_' + name2code[chain_1][1] + '/' + tmp_filename)\n\t\t\ttext = tmp_file.read().split('\\n')\n\t\t\tfor line in text:\n\t\t\t\tstr_file.write(line+'\\n')\n\t\t\tstr_file.write(\"END\\n\\n\\n\")\n\t\t\tos.remove(maindir + '/' + ntm + '/alignments/str_alns/tmp_' + name2code[chain_1][1] + '/' + tmp_filename)\n\t\t\ttmp_file.close()\n\t\ttime.sleep(1)\n\t\tos.rmdir(maindir + '/' + ntm + '/alignments/str_alns/tmp_' + name2code[chain_1][1] + '/')\n\t\tstr_file.close()\n\n\t\tseq_file = open(maindir + '/' + ntm + '/alignments/fasta/seq_' + chain_1 + '.dat', 'w')\n\t\tfor tmp_filename in sorted(os.listdir(maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/')):\n\t\t\tchain_2_code = re.split('_|\\.', tmp_filename)[-2]\n#\t\t\tprint(chain_1, \"chain_2_code \"+chain_2_code, \"tmp_filename \"+tmp_filename, name2code)\n\t\t\tseq_file.write(\"BEGIN \\nCHAIN_1: \" + chain_1 + \"\\nCHAIN_2: \" + code2name[chain_2_code] +\n\t\t\t \"\\nSequence Alignment Code (SAC): \" + name2code[chain_1][0] + \n\t\t\t \".\" + name2code[chain_1][1] + \".\" + chain_2_code + \"\\n\")\n\t\t\tFTA_seq_output = maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/' + name2code[chain_1][1] + '_' + chain_2_code + '.tmp'\n\t\t\ttmp_file = open(FTA_seq_output, 'r')\n\t\t\ttext = tmp_file.read().split('\\n')\n\t\t\tfor line in text:\n\t\t\t\tseq_file.write(line+'\\n')\n\t\t\tseq_file.write(\"END\\n\\n\\n\")\n\t\t\tos.remove(maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/' + tmp_filename)\n\t\t\ttmp_file.close()\n\t\ttime.sleep(5)\n#\t\tprint(os.listdir(maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/'))\n\t\tos.rmdir(maindir + '/' + ntm + '/alignments/fasta/tmp_' + name2code[chain_1][1] + '/')\n\t\tseq_file.close()\n\n\nif len(sys.argv) < 2:\n raise NameError(\"Usage: start_FrTM.py [{}]\")\nmaindir = sys.argv[1]\nif not os.path.exists(maindir):\n\traise NameError(\"ERROR: Directory {0} does not exists.\".format(maindir))\n\nnsubdirs = len(sys.argv) - 2\nif nsubdirs > 0:\n\tsubdirs = []\n\tfor i in range(0, nsubdirs):\n\t\tsubdirs.append(int(sys.argv[2+i]))\nelse:\n\tsubdirs = []\n\tfor i in os.listdir(str(sys.argv[1])):\n\t\tif re.match('^\\d*$', str(i)) and os.path.exists(maindir + '/' + str(i) + '/struct_codes.dat'):\n\t\t\tsubdirs.append(int(i))\n\nsuperfamilies = [(str(i), maindir+'/', 0) for i in sorted(subdirs)]\n#print(superfamilies)\n\n#for sf in superfamilies:\n#\tFrTMjob(sf)\n\n\n#exit(1)\n\npool = multiprocessing.Pool(processes=4)\npool_outputs = pool.map(FrTMjob, superfamilies)\n", "sub_path": "old/start_FrTM.py", "file_name": "start_FrTM.py", "file_ext": "py", "file_size_in_byte": 8541, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 54, "usage_type": "call"}, {"api_name": "os.devnull", "line_number": 66, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 67, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 71, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 76, "usage_type": "call"}, {"api_name": "re.split", "line_number": 80, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 96, "usage_type": "call"}, {"api_name": "re.split", "line_number": 97, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 107, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 109, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 110, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 114, "usage_type": "call"}, {"api_name": "re.split", "line_number": 115, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 126, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 128, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 130, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 140, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 147, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 147, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "multiprocessing.Pool", "line_number": 160, "usage_type": "call"}]} +{"seq_id": "602006758", "text": "from aocd import get_data, submit1, submit2\nfrom collections import deque, defaultdict\n\nimport re\n\n\nclass REMatcher(object):\n def __init__(self, matchstring):\n self.matchstring = matchstring\n\n def match(self,regexp):\n self.rematch = re.match(regexp, self.matchstring)\n return bool(self.rematch)\n\n def group(self,i):\n return self.rematch.group(i)\n\n\ndef problem1(num_players, num_marbles):\n marbles = [0, 1]\n curr_marble_index = 1\n curr_player = 2\n\n score = {}\n for i in range(num_players):\n score[i] = 0\n\n for marble in range(2, num_marbles):\n if marble % 23 > 0:\n curr_marble_index += 2\n if curr_marble_index > len(marbles):\n curr_marble_index = curr_marble_index % len(marbles)\n\n marbles.insert(curr_marble_index, marble)\n else:\n score[curr_player] += marble\n curr_marble_index = (curr_marble_index - 7) % len(marbles)\n score[curr_player] += marbles.pop(curr_marble_index)\n curr_player = (curr_player + 1) % num_players\n return max(score.values())\n\n\ndef problem2(num_players, num_marbles):\n marbles = deque([0])\n score = defaultdict(int)\n\n for marble in range(1, num_marbles):\n if marble % 23 > 0:\n marbles.rotate(-1)\n marbles.append(marble)\n else:\n marbles.rotate(7)\n score[marble % num_players] += marble + marbles.pop()\n marbles.rotate(-1)\n return max(score.values())\n\ndef main():\n input = get_data(day=9, year=2018)\n re_matcher = REMatcher(input)\n re_matcher.match(r\"(\\d+) players; last marble is worth (\\d+) points\")\n\n num_players = int(re_matcher.group(1))\n num_marbles = int(re_matcher.group(2)) + 1\n\n ans = problem1(num_players, num_marbles)\n submit1(ans)\n\n ans = problem2(num_players, num_marbles*100)\n submit2(ans)\n\nmain()\n", "sub_path": "src/year2018/day_9.py", "file_name": "day_9.py", "file_ext": "py", "file_size_in_byte": 1909, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "re.match", "line_number": 12, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 44, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 45, "usage_type": "call"}, {"api_name": "aocd.get_data", "line_number": 58, "usage_type": "call"}, {"api_name": "aocd.submit1", "line_number": 66, "usage_type": "call"}, {"api_name": "aocd.submit2", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "124923497", "text": "from crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\nfrom sportsunleash.apps.members.models import Schools\nfrom sportsunleash.lib.layout import Link\n\n\nclass SchoolForm(forms.ModelForm):\n \"\"\"\n Form to render the schools\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(SchoolForm, self).__init__(*args, **kwargs)\n helper = FormHelper(self)\n helper.form_class = 'form-horizontal'\n helper.label_class = 'col-lg-3'\n helper.field_class = 'col-lg-6'\n helper.layout.append(FormActions(\n Submit('submit', 'Save'),\n Link('school_list', 'Cancel')\n ))\n self.helper = helper\n\n class Meta:\n model = Schools\n fields = ('name', 'address_line_1', 'address_line_2', 'contact_number',\n 'email')\n\n", "sub_path": "sportsunleash/apps/schools/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "django.forms.ModelForm", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 15, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.FormActions", "line_number": 19, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 20, "usage_type": "call"}, {"api_name": "sportsunleash.lib.layout.Link", "line_number": 21, "usage_type": "call"}, {"api_name": "sportsunleash.apps.members.models.Schools", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "158435146", "text": "from typing import Iterable\nimport random\n\nfrom movieflix.adapters.repository import AbstractRepository\nfrom movieflix.domain.model import Movie\n\n\ndef get_tag_names(repo: AbstractRepository):\n tags = repo.get_tags()\n tag_names = [tag.tag_name for tag in tags]\n\n return tag_names\n\n\ndef get_random_articles(quantity, repo: AbstractRepository):\n article_count = repo.get_number_of_articles()\n\n if quantity >= article_count:\n # Reduce the quantity of ids to generate if the repository has an insufficient number of articles.\n quantity = article_count - 1\n\n # Pick distinct and random articles.\n random_ids = random.sample(range(1, article_count), quantity)\n articles = repo.get_articles_by_id(random_ids)\n\n return articles_to_dict(articles)\n\n\n# ============================================\n# Functions to convert dicts to model entities\n# ============================================\n\ndef article_to_dict(movie: Movie):\n article_dict = {\n 'date': movie.release_year,\n 'title': movie.title\n\n # 'image_hyperlink': article.image_hyperlink\n\n }\n return article_dict\n\n\ndef articles_to_dict(movies: Iterable[Movie]):\n return [movie_to_dict(movie) for movie in movies]\n", "sub_path": "movieflix/utilities/services.py", "file_name": "services.py", "file_ext": "py", "file_size_in_byte": 1231, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "movieflix.adapters.repository.AbstractRepository", "line_number": 8, "usage_type": "name"}, {"api_name": "movieflix.adapters.repository.AbstractRepository", "line_number": 15, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 23, "usage_type": "call"}, {"api_name": "movieflix.domain.model.Movie", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 44, "usage_type": "name"}, {"api_name": "movieflix.domain.model.Movie", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "58802422", "text": "'''\n excel表格的数据统计和分析:\n 1.联网安装 xlrd(读取) xlwt(写入)\n xlrd(1.2)\n cmd --> python -m pip install xlrd==0.9.3\n 2.写代码\n 2.1 导入这个工具\n import xlrd\n 2.2 打开工作簿\n 2.3 打开选项卡\n 2.4 读取数据\n任务:\n 每个月的销售总金额:\n 全年的销售总额:\n 每种衣服的销售总额:\n 每个季度销售总额占比:\n 全年每种销售数量占比:\n\n'''\n\nimport xlrd\n# 1. 打开工作簿\n# wd = xlrd.open_workbook(r\"2020年每个月的销售情况.xlsx\",encoding_override=True)\nbook=xlrd.open_workbook(r\"F:\\python自动化测试\\Python自动化\\第七天\\任务\\2020年每个月的销售情况.xlsx\",encoding_override=True)\nsheet1 = book.sheet_by_index(0)\nrows,cols = sheet1.nrows,sheet1.ncols \nfor row in range(rows):\n for col in range(cols):\n print(sheet1.cell(row,col).value,end='')\n print('')\nsumcount=0;\nfor i in range(1,31):\n sumcount+=sheet1.cell(i,4).value\nprint(\"销售量:\",sumcount)\nsumoney =0\nfor j in range(1,31):\n sumoney+=sheet1.cell(j,2).value*sheet1.cell(j,4).value\nprint(\"总销售额:\",sumoney)\nprint(\"平均销售量:\",sumcount/30)\n\ny,n,f,p,t,c =0,0,0,0,0,0\nfor o in range(1,31):\n if sheet1.cell(o,1).value=='羽绒服':\n y +=sheet1.cell(o,4).value\n elif sheet1.cell(o,1).value=='牛仔裤':\n n += sheet1.cell(o, 4).value\n elif sheet1.cell(o, 1).value == '风衣':\n f += sheet1.cell(o, 4).value\n elif sheet1.cell(o, 1).value == '皮草':\n p += sheet1.cell(o, 4).value\n elif sheet1.cell(o, 1).value == 'T血':\n t += sheet1.cell(o, 4).value\n elif sheet1.cell(o, 1).value == '衬衫':\n c += sheet1.cell(o, 4).value\n\n print('羽绒服销售占比:', 253.6 * y / sumoney * 100, '%')\n print('牛仔裤销售占比:', 86.3 * n / sumoney * 100, '%')\n print('风衣销售占比:', 96.8 * f / sumoney * 100, '%')\n print('皮草销售占比:', 135.9 * p / sumoney * 100, '%')\n print('T血销售占比:', 65.8 * t / sumoney * 100, '%')\n print('衬衫销售占比:', 49.3 * c / sumoney * 100, '%')", "sub_path": "销售额.py", "file_name": "销售额.py", "file_ext": "py", "file_size_in_byte": 2170, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "xlrd.open_workbook", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "301938365", "text": "import pathlib, sys\npath = pathlib.Path.cwd()\nsys.path.append(str(path))\n\nimport talib as ta\nimport sqlite3\nimport pandas as pd\nimport numpy as np\nimport datetime as dtt\nimport matplotlib.pyplot as plt\nfrom statsmodels.api import Poisson\nfrom statsmodels.graphics.api import qqplot\nfrom sklearn.naive_bayes import GaussianNB\n# from scipy.stats import poisson\n\nfrom myConstant import Exchange\n\ndef rolling_window(a, window):\n shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n print(shape)\n strides = a.strides + (a.strides[-1],)\n print(strides)\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n\nconn = sqlite3.connect(\"E:\\\\Desktop\\\\deanTrading\\\\.vntrader\\\\info.db\")\n# cursor = conn.cursor()\n\n# cursor.execute(\"select * from dbbardata\")\n# val = cursor.fetchone()\nsymbol = 'BTCUSDT'\nspotexchange = Exchange.BINANCE.value\nfuturesexchange = Exchange.BINANCEFUTURES.value\nsql = f\"select * from dbbardata where symbol='{symbol}' and exchange='{spotexchange}' and interval='1m' order by datetime DESC limit 10000\"\nsql2 = f\"select * from dbbardata where symbol='{symbol}' and exchange='{futuresexchange}' and interval='1m' order by datetime DESC limit 10000\"\n\ndf1 = pd.read_sql(sql, conn)\ndf1.set_index('datetime', inplace=True)\ndf11 = df1.loc[df1.index.drop_duplicates(keep=False), 'close_price']\n\ndf2 = pd.read_sql(sql2, conn)\ndf2.set_index('datetime', inplace=True)\ndf22 = df2.loc[df2.index.drop_duplicates(keep=False), 'close_price']\n\n\ndata = pd.concat((df11, df22), axis=1, join='inner')\ndata.sort_index(inplace=True)\ndata.index = np.linspace(1,len(data.index), num=len(data.index))\ndata.columns = ['spot', 'futures']\ndata['spread'] = data.iloc[:,0] - data.iloc[:,1]\ndata['spread_diff'] = data['spread'].diff().rolling(20).std()\ndata['spread_diff60'] = data['spread'].diff().rolling(60).std()\ndata['q80'] = data['spread_diff60'].quantile(0.8)\ndata['q95'] = data['spread_diff60'].quantile(0.95)\nprint(data['spread_diff60'].quantile(0.99))\n\nfig, ax = plt.subplots(1,1)\nax.plot(data['spread_diff60'], color='g', label='prob')\n# ax2 = ax.twinx()\n# ax2.plot(data['spread'], color='r')\n\nax.plot(data['q80'], color='b')\nax.plot(data['q95'], color='b')\n# ax4 = ax.twinx()\n# ax4.plot(data['prob'], color='r')\n# ax.hist(data['spread_diff'], bins='auto', density=True, cumulative=True)\nplt.ylim([0,7.5])\nplt.show()\n\n", "sub_path": "Digiccy1/analysis/reg02.py", "file_name": "reg02.py", "file_ext": "py", "file_size_in_byte": 2349, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pathlib.Path.cwd", "line_number": 2, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "numpy.lib.stride_tricks.as_strided", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.lib", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "myConstant.Exchange.BINANCE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "myConstant.Exchange", "line_number": 31, "usage_type": "name"}, {"api_name": "myConstant.Exchange.BINANCEFUTURES", "line_number": 32, "usage_type": "attribute"}, {"api_name": "myConstant.Exchange", "line_number": 32, "usage_type": "name"}, {"api_name": "pandas.read_sql", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "62476933", "text": "# -*- coding: utf-8 -*-\nimport re\nimport xml.sax.saxutils\nimport xml.etree.cElementTree as etree\nimport sparv.util as util\n\nRESTART_THRESHOLD_LENGTH = 64000\nSENT_SEP = \"\\n\"\nTOK_SEP = \" \"\n\n\ndef tag_ne(out_ne_ex, out_ne_type, out_ne_subtype, out_ne_name, word, sentence, encoding=util.UTF8, process_dict=None):\n \"\"\"\n Tag named entities using HFST-SweNER.\n SweNER is either run in an already started process defined in\n process_dict, or a new process is started(default)\n - out_ne_ex, out_ne_type and out_ne_subtype are resulting annotation files for the named entities\n - word and sentence are existing annotation files for wordforms and sentences\n - process_dict should never be set from the command line\n \"\"\"\n\n if process_dict is None:\n process = swenerstart(\"\", encoding, verbose=False)\n # else:\n # process = process_dict['process']\n # # If process seems dead, spawn a new one\n # if process.stdin.closed or process.stdout.closed or process.poll():\n # util.system.kill_process(process)\n # process = swenerstart(\"\", encoding, verbose=False)\n # process_dict['process'] = process\n\n # Collect all text\n sentences = [sent.split() for _, sent in util.read_annotation_iteritems(sentence)]\n word_file = util.read_annotation(word)\n stdin = SENT_SEP.join(TOK_SEP.join(word_file[tokid] for tokid in sent)\n for sent in sentences)\n # Escape <, > and &\n stdin = xml.sax.saxutils.escape(stdin)\n\n # keep_process = len(stdin) < RESTART_THRESHOLD_LENGTH and process_dict is not None\n # util.log.info(\"Stdin length: %s, keep process: %s\", len(stdin), keep_process)\n\n # if process_dict is not None:\n # process_dict['restart'] = not keep_process\n\n # # Does not work as of now since swener does not have an interactive mode\n # if keep_process:\n # # Chatting with swener: send a SENT_SEP and read correct number of lines\n # stdin_fd, stdout_fd = process.stdin, process.stdout\n # stdin_fd.write(stdin.encode(encoding) + SENT_SEP)\n # stdin_fd.flush()\n # stout = stdout_fd.readlines()\n\n # else:\n # Otherwise use communicate which buffers properly\n # util.log.info(\"STDIN %s %s\", type(stdin.encode(encoding)), stdin.encode(encoding))\n stdout, _ = process.communicate(stdin.encode(encoding))\n # util.log.info(\"STDOUT %s %s\", type(stdout.decode(encoding)), stdout.decode(encoding))\n\n parse_swener_output(sentences, stdout.decode(encoding), out_ne_ex, out_ne_type, out_ne_subtype, out_ne_name)\n\n\ndef parse_swener_output(sentences, output, out_ne_ex, out_ne_type, out_ne_subtype, out_ne_name):\n \"\"\"Parse the SweNER output and write annotation files.\"\"\"\n\n out_ex_dict = {}\n out_type_dict = {}\n out_subtype_dict = {}\n out_name_dict = {}\n\n # Loop through the NE-tagged sentences and parse each one with ElemenTree\n for sent, tagged_sent in zip(sentences, output.strip().split(SENT_SEP)):\n xml_sent = \"\" + tagged_sent + \"\"\n\n # Filter out tags on the format since they seem to always overlap with elements,\n # making the XML invalid.\n xml_sent = re.sub(r'\\s]+>', '', xml_sent)\n try:\n root = etree.fromstring(xml_sent)\n except:\n util.log.warning(\"Error parsing sentence. Skipping.\")\n continue\n\n # Init token counter; needed to get start_id and end_id\n i = 0\n previous_end = 0\n children = list(root.iter())\n\n try:\n\n for count, child in enumerate(children):\n start_id = util.edgeStart(sent[i])\n start_i = i\n\n # If current child has text, increase token counter\n if child.text:\n i += len(child.text.strip().split(TOK_SEP))\n\n # Extract NE tags and save them in dictionaries\n if child.tag != \"sroot\":\n if start_i < previous_end:\n pass\n # util.log.warning(\"Overlapping NE elements found; discarding one.\")\n else:\n end_id = util.edgeEnd(sent[i - 1])\n previous_end = i\n edge = util.mkEdge('ne', [start_id, end_id])\n out_ex_dict[edge] = child.tag\n out_type_dict[edge] = child.get(\"TYPE\")\n out_subtype_dict[edge] = child.get(\"SBT\")\n out_name_dict[edge] = child.text\n\n # If this child has a tail and it doesn't start with a space, or if it has no tail at all despite not being the last child,\n # it means this NE ends in the middle of a token.\n if (child.tail and child.tail.strip() and not child.tail[0] == \" \") or (not child.tail and count < len(children) - 1):\n i -= 1\n # util.log.warning(\"Split token returned by name tagger.\")\n\n # If current child has text in the tail, increase token counter\n if child.tail and child.tail.strip():\n i += len(child.tail.strip().split(TOK_SEP))\n\n if (child.tag == \"sroot\" and child.text and not child.text[-1] == \" \") or (child.tail and not child.tail[-1] == \" \"):\n # The next NE would start in the middle of a token, so decrease the counter by 1\n i -= 1\n except IndexError:\n util.log.warning(\"Error parsing sentence. Skipping.\")\n continue\n\n # Write annotations\n util.write_annotation(out_ne_ex, out_ex_dict)\n util.write_annotation(out_ne_type, out_type_dict)\n util.write_annotation(out_ne_subtype, out_subtype_dict)\n util.write_annotation(out_ne_name, out_name_dict)\n\n\ndef swenerstart(stdin, encoding, verbose):\n \"\"\"Start a SweNER process and return it.\"\"\"\n return util.system.call_binary(\"hfst-swener\", [], stdin, encoding=encoding, verbose=verbose, return_command=True)\n\n\nif __name__ == '__main__':\n util.run.main(tag_ne)\n", "sub_path": "sparv/swener.py", "file_name": "swener.py", "file_ext": "py", "file_size_in_byte": 6200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "sparv.util.UTF8", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sparv.util", "line_number": 12, "usage_type": "name"}, {"api_name": "sparv.util.read_annotation_iteritems", "line_number": 33, "usage_type": "call"}, {"api_name": "sparv.util", "line_number": 33, "usage_type": "name"}, {"api_name": "sparv.util.read_annotation", "line_number": 34, "usage_type": "call"}, {"api_name": "sparv.util", "line_number": 34, "usage_type": "name"}, {"api_name": "xml.sax.saxutils.sax.saxutils.escape", "line_number": 38, "usage_type": "call"}, {"api_name": "xml.sax.saxutils.sax", "line_number": 38, "usage_type": "attribute"}, {"api_name": "xml.sax.saxutils", "line_number": 38, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 77, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree.fromstring", "line_number": 79, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 79, "usage_type": "name"}, {"api_name": "sparv.util.log.warning", "line_number": 81, "usage_type": "call"}, {"api_name": "sparv.util.log", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sparv.util", "line_number": 81, "usage_type": "name"}, {"api_name": "sparv.util.edgeStart", "line_number": 92, "usage_type": "call"}, {"api_name": "sparv.util", "line_number": 92, "usage_type": "name"}, {"api_name": "sparv.util.edgeEnd", "line_number": 105, "usage_type": "call"}, {"api_name": "sparv.util", "line_number": 105, "usage_type": "name"}, {"api_name": "sparv.util.mkEdge", "line_number": 107, "usage_type": "call"}, {"api_name": "sparv.util", "line_number": 107, "usage_type": "name"}, {"api_name": "sparv.util.log.warning", "line_number": 127, "usage_type": "call"}, {"api_name": "sparv.util.log", "line_number": 127, "usage_type": "attribute"}, {"api_name": "sparv.util", "line_number": 127, "usage_type": "name"}, {"api_name": "sparv.util.write_annotation", "line_number": 131, "usage_type": "call"}, {"api_name": "sparv.util", "line_number": 131, "usage_type": "name"}, {"api_name": "sparv.util.write_annotation", "line_number": 132, "usage_type": "call"}, {"api_name": "sparv.util", "line_number": 132, "usage_type": "name"}, {"api_name": "sparv.util.write_annotation", "line_number": 133, "usage_type": "call"}, {"api_name": "sparv.util", "line_number": 133, "usage_type": "name"}, {"api_name": "sparv.util.write_annotation", "line_number": 134, "usage_type": "call"}, {"api_name": "sparv.util", "line_number": 134, "usage_type": "name"}, {"api_name": "sparv.util.system.call_binary", "line_number": 139, "usage_type": "call"}, {"api_name": "sparv.util.system", "line_number": 139, "usage_type": "attribute"}, {"api_name": "sparv.util", "line_number": 139, "usage_type": "name"}, {"api_name": "sparv.util.run.main", "line_number": 143, "usage_type": "call"}, {"api_name": "sparv.util.run", "line_number": 143, "usage_type": "attribute"}, {"api_name": "sparv.util", "line_number": 143, "usage_type": "name"}]} +{"seq_id": "575943540", "text": "import pytest\n\n\n@pytest.fixture\ndef gopath(tmpdir_factory):\n return tmpdir_factory.mktemp(\"gopath\")\n\n\ndef test_env(cmd, project, gopath):\n cmd.run(f\"export GOPATH={gopath}\")\n\n project.write_devyml(\"\"\"\n up:\n - go: '1.5'\n \"\"\")\n\n cmd.run(\"bud up\")\n\n output = cmd.run(\"go version\")\n assert \"go version go1.5\" in output\n\n\ndef test_warn_gopath_missing(cmd, project, gopath):\n cmd.run(\"unset GOPATH\")\n\n project.write_devyml(\"\"\"\n up:\n - go: '1.5'\n \"\"\")\n\n output = cmd.run(\"bud up\", expect_exit_code=1)\n assert \"The GOPATH environment variable should be set\" in output\n\n\ndef test_with_modules(cmd, project, srcdir):\n # We want to support pre-modules and modules projects in the same environment\n # so we set a GOPATH as it would be for pre-modules setup\n # Devbuddy will set GO111MODULES=on to force-enable Go modules even if we are in the GOPATH\n cmd.run(f\"export GOPATH={srcdir}\")\n\n project.write_devyml(\"\"\"\n up:\n - go:\n version: '1.12'\n modules: true\n \"\"\")\n\n output = cmd.run(\"bud up\")\n\n project.write_file_dedent(\"main.go\", \"\"\"\n package main\n\n import (\n \"fmt\"\n \"github.com/spf13/pflag\"\n )\n\n func main() {\n pflag.Parse()\n fmt.Println(pflag.Arg(0))\n }\n \"\"\")\n\n project.write_file_dedent(\"go.mod\", \"\"\"\n module poipoi\n\n require github.com/spf13/pflag v1.0.3\n \"\"\")\n\n project.write_file_dedent(\"go.sum\", \"\"\"\n github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=\n github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\n \"\"\")\n\n cmd.run(\"go mod tidy\")\n cmd.run(\"go mod download\")\n\n output = cmd.run(\"go run main.go Test1234\")\n assert output == \"Test1234\"\n", "sub_path": "tests/test_task_go.py", "file_name": "test_task_go.py", "file_ext": "py", "file_size_in_byte": 1853, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pytest.fixture", "line_number": 4, "usage_type": "attribute"}]} +{"seq_id": "141287778", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nELU = \"elu\"\nRELU = \"relu\"\nTANH = \"tanh\"\nSIGMOID = \"sigmoid\"\nACTIVATIONS = [SIGMOID, TANH, RELU, ELU]\n\n\ndef print_subplot(i, j, x, y, title):\n a = axes[i, j]\n a.axvline(x=0, color='k')\n a.axhline(y=0, color='k')\n a.plot(x, y)\n a.set_title(title)\n\n\nstart = -10\nend = 10\nsize = 50\n\nx = np.linspace(start, end, size)\nys = {}\n\nfor activation_string in ACTIVATIONS:\n tf.reset_default_graph()\n activation_input = tf.placeholder(tf.float32)\n activation = getattr(tf.nn, activation_string)\n output = activation(activation_input)\n\n with tf.Session() as sess:\n y = sess.run(output, feed_dict={activation_input: x})\n ys[activation_string] = y\n\nfig, axes = plt.subplots(\n 2,\n 2,\n gridspec_kw={'width_ratios': [1, 1], 'height_ratios': [1, 1]},\n figsize=(16, 5))\nprint_subplot(0, 0, x, ys[SIGMOID], SIGMOID.title())\nprint_subplot(0, 1, x, ys[TANH], TANH.title())\nprint_subplot(1, 0, x, ys[RELU], RELU.title())\nprint_subplot(1, 1, x, ys[ELU], ELU.title())\n\nfig.tight_layout()\nplt.show()\n", "sub_path": "model/src/images/activation_functions.py", "file_name": "activation_functions.py", "file_ext": "py", "file_size_in_byte": 1103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.linspace", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "550490319", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass OFCPokerNet(nn.Module):\n\n def __init__(self, embedding_dim, action_space, drop_prob, num_layers=2, hidden_size=128):\n super(OFCPokerNet, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_size = hidden_size\n self.three_hand_embed = nn.Linear(52, self.embedding_dim)\n self.five_hand_embed = nn.Linear(52, self.embedding_dim)\n self.cur_card_embed = nn.Linear(52, self.hidden_size)\n\n input_dim = 6 * self.embedding_dim + self.hidden_size\n self.fc1 = nn.Linear(input_dim, self.hidden_size)\n self.layers = [nn.Linear(hidden_size, self.hidden_size) for _ in range(num_layers)]\n self.value = nn.Linear(self.hidden_size, 1)\n self.policy = nn.Linear(self.hidden_size, action_space)\n self.dropout = nn.Dropout(p=drop_prob)\n\n def to(self, *args, **kwargs):\n self = super(OFCPokerNet, self).to(*args, **kwargs) \n self.layers = [layer.to(*args, **kwargs) for layer in self.layers]\n return self\n\n def forward(self, front, mid, back, cur):\n front_embed = self.three_hand_embed(front).view(-1, self.embedding_dim * 2)\n mid_embed = self.five_hand_embed(mid).view(-1, self.embedding_dim * 2)\n back_embed = self.five_hand_embed(back).view(-1, self.embedding_dim * 2)\n card_embed = self.cur_card_embed(cur)\n x = torch.cat((front_embed, mid_embed, back_embed, card_embed), 1)\n out = F.relu(self.fc1(x))\n out = self.dropout(out)\n for layer in self.layers:\n out = F.relu(layer(out))\n out = self.dropout(out)\n pi = self.policy(out)\n v = self.value(out)\n return F.log_softmax(pi, dim=1), torch.tanh(v)\n", "sub_path": "ofcpoker/pytorch/OFCPokerNNet.py", "file_name": "OFCPokerNNet.py", "file_ext": "py", "file_size_in_byte": 1780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.tanh", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "145765530", "text": "import numpy as np\nimport argparse\nimport matplotlib.gridspec\nimport matplotlib.pyplot as plt\nimport scipy.stats\nimport statsmodels.stats.multitest\n\nfrom behavior import backup\nfrom analysis.batch import customized_plot\n\n\ndef main(force):\n\n backups = backup.get_data(force)\n\n backups = [b for b in backups if b.pvp]\n\n # ----------------- Data ------------------- #\n\n # Look at the parameters\n n_simulations = len(backups)\n n_positions = backups[0].n_positions\n\n # Containers\n d = np.zeros(n_simulations)\n prices = np.zeros(n_simulations)\n scores = np.zeros(n_simulations)\n r = np.zeros(n_simulations)\n s = np.zeros(n_simulations, dtype=bool)\n\n for i, b in enumerate(backups):\n\n # Compute the mean distance between the two firms\n data = np.absolute(\n b.positions[:, 0] -\n b.positions[:, 1]) / n_positions\n\n d[i] = np.mean(data)\n\n # Compute the mean price\n prices[i] = np.mean(b.prices[:, :])\n\n # Compute the mean profit\n scores[i] = np.mean(b.profits[:, :])\n\n r[i] = b.r\n s[i] = b.display_opponent_score\n\n # ---------- Plot ----------------------------- #\n\n fig = plt.figure(figsize=(4, 7), dpi=200)\n\n sub_gs = matplotlib.gridspec.GridSpec(nrows=3, ncols=2)\n\n axes = (\n fig.add_subplot(sub_gs[0, 0]),\n fig.add_subplot(sub_gs[0, 1]),\n fig.add_subplot(sub_gs[1, 0]),\n fig.add_subplot(sub_gs[1, 1]),\n fig.add_subplot(sub_gs[2, 0]),\n fig.add_subplot(sub_gs[2, 1])\n )\n\n y_labels = \"Distance\", \"Price\", \"Profit\"\n y_limits = (0, 1), (1, 11), (0, 120)\n\n s_values = (0, 1, ) * 3\n\n arr = (d, d, prices, prices, scores, scores)\n\n # axes[0].text(2, 1.3, \"Display opponent score\", fontsize=12)\n axes[0].set_title(\"$s = 0$\")\n axes[1].set_title(\"$s = 1$\")\n\n for idx in range(len(axes)):\n\n ax = axes[idx]\n\n ax.set_axisbelow(True)\n\n # Violin plot\n data = [arr[idx][(r == r_value) * (s == s_values[idx])] for r_value in (0.25, 0.50)]\n color = ['C0' if r_value == 0.25 else 'C1' for r_value in (0.25, 0.50)]\n\n customized_plot.violin(ax=ax, data=data, color=color, edgecolor=\"white\", alpha=0.8) # color, alpha=0.5)\n\n for ax in axes[0:2]:\n ax.set_yticks(np.arange(0, 1.1, 0.25))\n\n for ax in axes[2:4]:\n ax.set_yticks(np.arange(1, 11.1, 2))\n\n for ax in axes[-2:]:\n ax.set_xticklabels([\"{:.2f}\".format(i) for i in (0.25, 0.50)])\n ax.set_xlabel(\"$r$\")\n\n for ax in axes[:4]:\n ax.tick_params(length=0, axis=\"x\")\n ax.set_xticklabels([])\n\n for ax, y_label, y_lim in zip(axes[0::2], y_labels, y_limits):\n ax.text(-0.35, 0.5, y_label, rotation=\"vertical\", verticalalignment='center',\n horizontalalignment='center', transform=ax.transAxes, fontsize=12)\n ax.set_ylabel(\" \")\n ax.tick_params(axis=\"y\", labelsize=9)\n ax.set_ylim(y_lim)\n\n for ax, y_lim in zip(axes[1::2], y_limits):\n ax.set_ylim(y_lim)\n ax.tick_params(length=0, axis=\"y\")\n ax.set_yticklabels([])\n\n plt.tight_layout()\n\n plt.savefig(\"fig/main_exp.pdf\")\n plt.show()\n\n # ----------- Stats ----------------- #\n\n to_compare = [\n {\n \"measure\": \"distance\",\n \"constant\": \"s = 0\",\n \"var\": \"r\",\n \"data\": [d[(r == r_value) * (s == 0)] for r_value in (0.25, 0.50)]\n }, {\n \"measure\": \"distance\",\n \"constant\": \"s = 1\",\n \"var\": \"r\",\n \"data\": [d[(r == r_value) * (s == 1)] for r_value in (0.25, 0.50)]\n }, {\n \"measure\": \"price\",\n \"constant\": \"s = 0\",\n \"var\": \"r\",\n \"data\": [prices[(r == r_value) * (s == 0)] for r_value in (0.25, 0.50)]\n }, {\n \"measure\": \"price\",\n \"constant\": \"s = 1\",\n \"var\": \"r\",\n \"data\": [prices[(r == r_value) * (s == 1)] for r_value in (0.25, 0.50)]\n }, {\n \"measure\": \"profit\",\n \"constant\": \"s = 0\",\n \"var\": \"r\",\n \"data\": [scores[(r == r_value) * (s == 0)] for r_value in (0.25, 0.50)]\n }, {\n \"measure\": \"profit\",\n \"constant\": \"s = 1\",\n \"var\": \"r\",\n \"data\": [scores[(r == r_value) * (s == 1)] for r_value in (0.25, 0.50)]\n }, {\n \"measure\": \"distance\",\n \"constant\": \"r = 0.25\",\n \"var\": \"s\",\n \"data\": [d[(r == 0.25) * (s == s_value)] for s_value in (0, 1)]\n }, {\n \"measure\": \"distance\",\n \"constant\": \"r = 0.50\",\n \"var\": \"s\",\n \"data\": [d[(r == 0.50) * (s == s_value)] for s_value in (0, 1)]\n }, {\n \"measure\": \"price\",\n \"constant\": \"r = 0.25\",\n \"var\": \"s\",\n \"data\": [prices[(r == 0.25) * (s == s_value)] for s_value in (0, 1)]\n }, {\n \"measure\": \"price\",\n \"constant\": \"r = 0.50\",\n \"var\": \"s\",\n \"data\": [prices[(r == 0.50) * (s == s_value)] for s_value in (0, 1)]\n }, {\n \"measure\": \"profit\",\n \"constant\": \"r = 0.25\",\n \"var\": \"s\",\n \"data\": [scores[(r == 0.25) * (s == s_value)] for s_value in (0, 1)]\n }, {\n \"measure\": \"profit\",\n \"constant\": \"r = 0.50\",\n \"var\": \"s\",\n \"data\": [scores[(r == 0.50) * (s == s_value)] for s_value in (0, 1)]\n }\n ]\n\n ps = []\n us = []\n\n for dic in to_compare:\n u, p = scipy.stats.mannwhitneyu(dic[\"data\"][0], dic[\"data\"][1])\n ps.append(p)\n us.append(u)\n\n valid, p_corr, alpha_c_sidak, alpha_c_bonf = \\\n statsmodels.stats.multitest.multipletests(pvals=ps, alpha=0.01, method=\"b\")\n\n for p, u, p_c, v, dic in zip(ps, us, p_corr, valid, to_compare):\n print(\"[Diff in {} when {} depending on {}-value] \"\n \"Mann-Whitney rank test: u {}, p {:.3f}, p corr {:.3f}, significant: {}\"\n .format(dic[\"measure\"], dic[\"constant\"], dic[\"var\"], u, p, p_c, v))\n print()\n\n table = \\\n r\"\\begin{table}[htbp]\" + \"\\n\" + \\\n r\"\\begin{center}\" + \"\\n\" + \\\n r\"\\begin{tabular}{llllllll}\" + \"\\n\" + \\\n r\"Measure & Variable & Constant & $u$ & $p$ (before corr.) \" \\\n r\"& $p$ (after corr.) & Sign. at 1\\% threshold \\\\\" + \"\\n\" + \\\n r\"\\hline \\\\\" + \"\\n\"\n\n for p, u, p_c, v, dic in zip(ps, us, p_corr, valid, to_compare):\n\n p = \"{:.3f}\".format(p) if p >= 0.001 else \"$<$ 0.001\"\n p_c = \"{:.3f}\".format(p_c) if p_c >= 0.001 else \"$<$ 0.001\"\n v = \"yes\" if v else \"no\"\n table += r\"{} & ${}$ & ${}$ & {} & {} & {} & {} \\\\\"\\\n .format(dic[\"measure\"], dic[\"var\"], dic[\"constant\"], u, p, p_c, v) \\\n + \"\\n\"\n\n table += \\\n r\"\\end{tabular}\" + \"\\n\" + \\\n r\"\\end{center}\" + \"\\n\" + \\\n r\"\\caption{Significance tests for comparison using Mann-Withney's u. \" \\\n r\"Bonferroni corrections are applied.}\" + \"\\n\" + \\\n r\"\\label{table:significance_tests}\" + \"\\n\" + \\\n r\"\\end{table}\"\n\n print(\"*** Latex-formated table ***\")\n print(table)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Produce figures.')\n parser.add_argument('-f', '--force', action=\"store_true\", default=False,\n help=\"Re-import data\")\n parsed_args = parser.parse_args()\n\n main(force=parsed_args.force)\n", "sub_path": "__old__/analyse.py", "file_name": "analyse.py", "file_ext": "py", "file_size_in_byte": 7470, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "behavior.backup.get_data", "line_number": 14, "usage_type": "call"}, {"api_name": "behavior.backup", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.gridspec.GridSpec", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.gridspec.gridspec", "line_number": 53, "usage_type": "attribute"}, {"api_name": "matplotlib.gridspec", "line_number": 53, "usage_type": "name"}, {"api_name": "analysis.batch.customized_plot.violin", "line_number": 85, "usage_type": "call"}, {"api_name": "analysis.batch.customized_plot", "line_number": 85, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "scipy.stats.stats.mannwhitneyu", "line_number": 188, "usage_type": "call"}, {"api_name": "scipy.stats.stats", "line_number": 188, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 188, "usage_type": "name"}, {"api_name": "statsmodels.stats.multitest.stats.multitest.multipletests", "line_number": 193, "usage_type": "call"}, {"api_name": "statsmodels.stats.multitest.stats", "line_number": 193, "usage_type": "attribute"}, {"api_name": "statsmodels.stats.multitest", "line_number": 193, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 232, "usage_type": "call"}]} +{"seq_id": "419057550", "text": "import logging\nimport json\n\nfrom alpaca_trade_api.stream import Stream\nfrom alpaca_trade_api.common import URL\n\n\nlog = logging.getLogger(__name__)\n\n\nasync def print_trade(t):\n print('trade', t)\n\n\nasync def print_quote(q):\n print('quote', q)\n\n\nasync def print_trade_update(tu):\n print('trade update', tu)\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n \n with open(\"./config.json\",\"rb\") as file:\n config = json.load(file)\n \n feed = 'iex' # <- replace to SIP if you have PRO subscription\n stream = Stream(key_id=config['alpaca_key_id'],\n secret_key=config['alpaca_secret_key'],\n base_url=URL(config['alpaca_base_url']),\n data_feed=feed, raw_data=True)\n stream.subscribe_trade_updates(print_trade_update)\n stream.subscribe_trades(print_trade, 'AAPL')\n stream.subscribe_quotes(print_quote, 'IBM')\n\n @stream.on_bar('MSFT')\n async def _(bar):\n print('bar', bar)\n\n @stream.on_status(\"*\")\n async def _(status):\n print('status', status)\n\n stream.run()\n\n\nif __name__ == \"__main__\":\n main()", "sub_path": "app/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1120, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 27, "usage_type": "call"}, {"api_name": "alpaca_trade_api.stream.Stream", "line_number": 30, "usage_type": "call"}, {"api_name": "alpaca_trade_api.common.URL", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "577471574", "text": "#! -*- coding:utf-8 -*-\nimport numpy as np\nfrom sklearn import linear_model\n\n# d <- read.csv(file='input/data-salary.txt')\n# res_lm <- lm(Y ~ X, data=d)\n# X_new <- data.frame(X=23:60)\n# conf_95 <- predict(res_lm, X_new, interval='confidence', level=0.95)\n# pred_95 <- predict(res_lm, X_new, interval='prediction', level=0.95)\n\nd = np.genfromtxt(fname='input/data-salary.txt', delimiter=',', names=True, dtype=np.float)\nlm = linear_model.LinearRegression()\nlm.fit(d['X'].reshape(d.size, 1), d['Y'])\n\nprint('Intercept: ' + str(lm.intercept_))\nprint('Coefficients: ' + str(lm.coef_[0]))\n# 一応p.38の1 行目の数値はとは一致した。\n# 信頼区間と予測区間については、まだ書いてない\n", "sub_path": "chap04/lm.py", "file_name": "lm.py", "file_ext": "py", "file_size_in_byte": 710, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "numpy.genfromtxt", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "51485543", "text": "#! /usr/bin/env python\n# coding: utf-8\n\nimport os\nimport argparse\nfrom JYTools.JYWorker import RedisQueue\n\n# conf_dir = \"/public/JINGD/conf\"\n# conf_path = os.path.join(conf_dir, \"redis_worker.conf\")\nconf_path = os.environ.get(\"REDIS_WORKER_CONF_PATH\")\nr_queue = RedisQueue(conf_path=conf_path, work_tag=\"JYGroupDAG\")\n\n\n\ndef run_hotspot(normal_recal_bam, hotspot_vcf, sample_no):\n apply_pipeline = {\"task_list\": [{\"task_type\": \"app\", \"work_tag\": \"RunHotspot\",\n \"input_sample_no\": sample_no,\n \"input_hotspot_vcf\": hotspot_vcf,\n \"input_normal_recal_bam\": normal_recal_bam}],\n \"task_type\": \"pipeline\"}\n r_queue.push(sample_no, apply_pipeline)\n\n\ndef main():\n usage = \"Help message\"\n description = \"Run manta pipeline\"\n parser = argparse.ArgumentParser(usage=usage, description=description)\n\n parser.add_argument(\"-n\", \"--normal_recal_bam\", dest=\"normal_recal_bam\", help=\"normal recal bam path\")\n parser.add_argument(\"-v\", \"--hotspot_vcf\", dest=\"hotspot_vcf\", help=\"hot spot vcf path\")\n\n parser.add_argument(\"-s\", \"--sample-no\", dest=\"sample_no\", help=\"sample no\")\n args = parser.parse_args()\n normal_recal_bam = args.normal_recal_bam\n hotspot_vcf = args.hotspot_vcf\n sample_no = args.sample_no\n\n run_hotspot(normal_recal_bam, hotspot_vcf, sample_no)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "anzhen/branch_pipeline/test_hotspot.py", "file_name": "test_hotspot.py", "file_ext": "py", "file_size_in_byte": 1453, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "JYTools.JYWorker.RedisQueue", "line_number": 11, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "388173261", "text": "# (C) Datadog, Inc. 2019\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nimport pytest\n\nfrom . import metrics\n\npytestmark = pytest.mark.e2e\n\n\n@pytest.mark.e2e\ndef test_check(dd_agent_check, instance):\n aggregator = dd_agent_check(instance, rate=True)\n\n for metric in metrics.STANDARD:\n aggregator.assert_metric_has_tag(metric, 'server:{}'.format(instance['server']))\n aggregator.assert_metric_has_tag(metric, 'port:{}'.format(instance['port']))\n\n aggregator.assert_all_metrics_covered()\n", "sub_path": "sap_hana/tests/test_e2e.py", "file_name": "test_e2e.py", "file_ext": "py", "file_size_in_byte": 544, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pytest.mark", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}]} +{"seq_id": "295427675", "text": "import argparse\nimport re\n\nfrom codegen_outofplacebatching import deindent, get_signatures, gen_unwraps\n\n\ndef get_signature(op, path):\n signatures = get_signatures(path, include_op=True)\n result = [sig for sig in signatures if sig[0] == op]\n if len(result) != 1:\n raise ValueError(\"\")\n return result[0]\n\n\ndef gen_return_sig(return_t):\n if len(return_t) == 1:\n return return_t[0]\n return f'std::tuple<{\".\".join(return_t)}>'\n\n\ndef gen_args_sig(args_t):\n args = [f'{typ} {argname}' for typ, argname in args_t]\n return ', '.join(args)\n\n\ndef gen_args_list(args_t):\n args = [f'{argname}' for _, argname in args_t]\n return ', '.join(args)\n\n\ndef gen_plumbing(signature):\n # \"add.Tensor\"\n op, return_t, args_t = signature\n\n maybe_op_and_variant = op.split('.')\n if len(maybe_op_and_variant) == 1:\n op = maybe_op_and_variant[0]\n variant = ''\n opname = op\n else:\n op, variant = maybe_op_and_variant\n opname = f'{op}_{variant}'\n\n if op.endswith('_'):\n raise ValueError('Codegen doesn\\'t handle in-place ops')\n\n arg_types, arg_names = zip(*args_t)\n unwraps, _ = gen_unwraps(arg_types, arg_names)\n\n result = deindent(f\"\"\"\\\n {gen_return_sig(return_t)} {opname}_plumbing({gen_args_sig(args_t)}) {{\n auto maybe_layer = maybeCurrentDynamicLayer();\n TORCH_INTERNAL_ASSERT(maybe_layer.has_value());\n int64_t cur_level = maybe_layer->layerId();\n\n {unwraps}\n\n // Your logic here\n\n static auto op = c10::Dispatcher::singleton()\n .findSchemaOrThrow(\"aten::{op}\", \"{variant}\");\n return slow_fallback<{','.join(return_t)}>(op, {{ {gen_args_list(args_t)} }});\n }}\n \"\"\")\n return result\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Generate the batch rule plumbing for an op')\n parser.add_argument('op',\n help='the operator name (with overload name)')\n parser.add_argument('path',\n help='link to RegistrationDeclarations.h')\n\n # Sample usage:\n # gen_plumbing.py add.Tensor ~/pytorch/build/aten/src/ATen/RegistrationDeclarations.h\n args = parser.parse_args()\n signature = get_signature(args.op, args.path)\n result = gen_plumbing(signature)\n print(result)\n", "sub_path": "codegen/gen_plumbing.py", "file_name": "gen_plumbing.py", "file_ext": "py", "file_size_in_byte": 2305, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "codegen_outofplacebatching.get_signatures", "line_number": 8, "usage_type": "call"}, {"api_name": "codegen_outofplacebatching.gen_unwraps", "line_number": 48, "usage_type": "call"}, {"api_name": "codegen_outofplacebatching.deindent", "line_number": 50, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "277050321", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n.. module:: recordation\n :platform: Unix\n :synopsis: the top-level submodule of T_System that contains the classes related to T_System's recording video and audio ability.\n\n.. moduleauthor:: Cem Baybars GÜÇLÜ \n\"\"\"\nimport os # Miscellaneous operating system interfaces\nimport datetime # Basic date and time types\nimport subprocess # Subprocess managements\nimport uuid # The random id generator\n\nfrom shutil import rmtree\nfrom tinydb import Query # TinyDB is a lightweight document oriented database\n\nfrom t_system.db_fetching import DBFetcher\n\nfrom t_system import dot_t_system_dir\nfrom t_system import log_manager\n\nlogger = log_manager.get_logger(__name__, \"DEBUG\")\n\n\nclass Recorder:\n \"\"\"Class to define a recording ability of tracking system.\n\n This class provides necessary initiations and functions named :func:`t_system.recordation.RecordManager.start`\n for creating a Record object and start recording by this object. :func:`t_system.recordation.RecordManager.merge_audio_and_video`\n for merging separate audio and video file to one.\n \"\"\"\n\n def __init__(self, record_formats, camera, hearer):\n \"\"\"Initialization method of :class:`t_system.recordation.Recorder` class.\n\n Args:\n record_formats (list): Formats of the records for video, audio and merged.\n camera: \t Camera object from PiCamera.\n hearer: \t Hearer object.\n \"\"\"\n\n self.current_video_file = \"\"\n self.current_audio_file = \"\"\n self.current_merged_file = \"\"\n\n self.record_formats = {\"video\": record_formats[0], \"audio\": record_formats[1], \"merged\": record_formats[2]}\n\n self.camera = camera\n self.hearer = hearer\n \n def start(self, mode=\"track\"):\n \"\"\"Method to start audio and video recording asynchronously.\n\n Args:\n mode: \t The running mode which is wants to set video name.\n \"\"\"\n logger.debug(\"Record starting...\")\n record = Record(datetime.datetime.now().strftime(\"%d_%m_%Y\"), datetime.datetime.now().strftime(\"%H_%M_%S\"), mode, self.record_formats)\n\n self.__set_record_params(record)\n\n self.camera.start_recording(self.current_video_file, self.record_formats[\"video\"])\n self.hearer.start_recording(self.current_audio_file, self.record_formats[\"audio\"])\n\n def stop(self):\n \"\"\"Method to stop audio and video recording\n \"\"\"\n\n self.camera.stop_recording()\n self.hearer.stop_recording()\n\n # Todo: This is disgusting way to merging audio and silent video. Fix this.\n self.merge_audio_and_video()\n\n def merge_audio_and_video(self):\n \"\"\"Method to merge recorded audio and video files.\n \"\"\"\n\n merge_cmd = f'ffmpeg -y -i {self.current_audio_file} -r 24 -i {self.current_video_file} -filter:a aresample=async=1 -c:a flac -strict -2 -c:v copy {self.current_merged_file}'\n\n subprocess.call(merge_cmd, shell=True)\n\n logger.info('Video and Audio Muxing Done')\n\n def __set_record_params(self, record):\n \"\"\"Method to setting current parameter by current recording.\n \"\"\"\n\n self.current_video_file = record.video_file\n self.current_audio_file = record.audio_file\n self.current_merged_file = record.merged_file\n\n\nclass RecordManager:\n \"\"\"Class to define Record manager for handling the recordation database of t_system's vision.\n\n This class provides necessary initiations and functions named :func:`t_system.recordation.RecordManager.get_records`\n for returning the Record objects of existing records with given table(date at the same time) parameter.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialization method of :class:`t_system.recordation.RecordManager` class.\n \"\"\"\n\n self.records_folder = f'{dot_t_system_dir}/records'\n\n if not os.path.exists(self.records_folder):\n os.mkdir(self.records_folder)\n\n self.db = DBFetcher(self.records_folder, \"db\").fetch()\n\n self.records = []\n\n self.__set_records()\n\n def __set_records(self):\n \"\"\"Method to set existing records.\n \"\"\"\n\n for record in self.db.all():\n self.records.append(Record(record[\"date\"], record[\"time\"], record[\"scope\"], record[\"record_formats\"], record[\"id\"], record[\"name\"], record[\"length\"]))\n\n def refresh_records(self):\n \"\"\"Method to refresh existing records on runtime alterations.\n \"\"\"\n\n self.records.clear()\n self.__set_records()\n\n def get_records(self, date=None):\n \"\"\"Method to get existing records in given date. If date is None it returns all records.\n\n Args:\n date (str): Parent date of the record. In day_mount_year format.\n \"\"\"\n records = []\n\n if date:\n for record in self.records:\n if record.date == date:\n records.append(record)\n return records\n\n return self.records\n\n def get_record(self, id):\n \"\"\"Method to get existing record in given id.\n\n Args:\n id (str): ID of the record.\n \"\"\"\n\n for record in self.records:\n if record.id == id:\n return record\n\n def get_record_dates(self):\n \"\"\"Method to get date list of existing records.\n \"\"\"\n dates = []\n for record in self.records:\n dates.append(record.date)\n\n dates = list(dict.fromkeys(dates)) # removes duplicated dates.\n\n return dates\n\n def delete_record(self, id):\n \"\"\"Method to get date list of existing records.\n\n Args:\n id (str): ID of the record.\n \"\"\"\n\n for record in self.records:\n if record.id == id:\n record.remove_self()\n self.records.remove(record) # for removing object from list\n return True\n return False\n\n def update_record(self, id, name):\n \"\"\"Method to updating record that has given id.\n\n Args:\n id (str): ID of the record.\n name (str): The name of the record.\n \"\"\"\n\n for record in self.records:\n if record.id == id:\n record.update_name(name)\n return True\n return False\n\n\nclass Record:\n \"\"\"Class to define records of t_systems vision.\n\n This class provides necessary initiations and functions named :func:`t_system.recordation.Record.__db_upsert`\n for saving records to the database safely.\n \"\"\"\n\n def __init__(self, d_m_y, h_m_s, scope, record_formats, id=None, name=None, length=None):\n \"\"\"Initialization method of :class:`t_system.recordation.Record` class.\n\n Args:\n d_m_y (str): Date that is day_mount_year format.\n h_m_s (str): Date that is hour_minute_second format.\n scope (str): The working type during recording.\n record_formats (dict): Formats of the records for video, audio and merged.\n id (str): The id of the record.\n name (str): The name of the record.\n length (str): The length of the record as m:s.\n \"\"\"\n\n self.id = id\n if not id:\n self.id = str(uuid.uuid1())\n\n self.name = name\n if not name:\n self.name = h_m_s\n\n self.date = d_m_y # table name at the same time\n self.time = h_m_s\n self.scope = scope\n self.record_formats = record_formats\n self.length = length\n\n self.records_folder = f'{dot_t_system_dir}/records'\n self.parent_folder = f'{self.records_folder}/{self.date}'\n self.folder = f'{self.parent_folder}/{self.time}'\n\n self.video_file = f'{self.folder}/{self.time}.{self.record_formats[\"video\"]}'\n self.audio_file = f'{self.folder}/{self.time}.{self.record_formats[\"audio\"]}'\n self.merged_file = f'{self.folder}/{self.time}.{self.record_formats[\"merged\"]}'\n\n self.db = DBFetcher(self.records_folder, \"db\").fetch()\n\n self.__check_folders()\n\n if length is None:\n self.length = self.__calc_length()\n\n self.__db_upsert()\n\n def __db_upsert(self, force_insert=False):\n \"\"\"Function to insert(or update) the record to the database.\n\n Args:\n force_insert (bool): Force insert flag.\n\n Returns:\n str: Response.\n \"\"\"\n\n if self.db.search((Query().id == self.id)):\n if force_insert:\n # self.already_exist = False\n self.db.update({'id': self.id, 'name': self.name, 'time': self.time, 'date': self.date, 'scope': self.scope, 'record_formats': self.record_formats, 'length': self.length}, Query().id == self.id)\n\n else:\n # self.already_exist = True\n return \"Already Exist\"\n else:\n self.db.insert({\n 'id': self.id,\n 'name': self.name,\n 'time': self.time,\n 'date': self.date,\n 'scope': self.scope,\n 'record_formats': self.record_formats,\n 'length': self.length\n }) # insert the given data\n\n return \"\"\n\n def update_name(self, name):\n \"\"\"Method to updating self name via by given parameter.\n\n Args:\n name (str): The name of the record.\n \"\"\"\n\n self.name = name\n self.__db_upsert(True)\n\n def remove_self(self):\n \"\"\"Method to remove face itself.\n \"\"\"\n\n rmtree(self.folder)\n\n self.db.remove((Query().id == self.id))\n\n def __calc_length(self):\n \"\"\"Method to calculating length of record with using OpenCV.\n \"\"\"\n if os.path.exists(self.merged_file):\n import cv2\n\n cap = cv2.VideoCapture(self.merged_file)\n\n fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used \"CV_CAP_PROP_FPS\"\n frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n duration = frame_count / fps\n\n minutes = int(duration / 60)\n seconds = round(duration % 60)\n length = f'{minutes}:{seconds}'\n\n cap.release()\n\n return length\n\n return None\n\n def __check_folders(self):\n \"\"\"Method to checking the necessary folders created before. If not created creates them.\n \"\"\"\n\n if not os.path.exists(self.parent_folder):\n os.mkdir(self.parent_folder)\n\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n", "sub_path": "t_system/recordation.py", "file_name": "recordation.py", "file_ext": "py", "file_size_in_byte": 10818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "t_system.log_manager.get_logger", "line_number": 24, "usage_type": "call"}, {"api_name": "t_system.log_manager", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 83, "usage_type": "call"}, {"api_name": "t_system.dot_t_system_dir", "line_number": 107, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 110, "usage_type": "call"}, {"api_name": "t_system.db_fetching.DBFetcher", "line_number": 112, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 221, "usage_type": "call"}, {"api_name": "t_system.dot_t_system_dir", "line_number": 233, "usage_type": "name"}, {"api_name": "t_system.db_fetching.DBFetcher", "line_number": 241, "usage_type": "call"}, {"api_name": "tinydb.Query", "line_number": 260, "usage_type": "call"}, {"api_name": "tinydb.Query", "line_number": 263, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 295, "usage_type": "call"}, {"api_name": "tinydb.Query", "line_number": 297, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path", "line_number": 302, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 305, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 307, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 308, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path", "line_number": 325, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 326, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 328, "usage_type": "call"}, {"api_name": "os.path", "line_number": 328, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 329, "usage_type": "call"}]} +{"seq_id": "431193496", "text": "# -*- coding: utf-8 -*-\n#\n# Python wrapper around the CMake build system\n#\n# Copyright (c) Honda Research Institute Europe GmbH\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n#\n\n\n#----------------------------------------------------------------------------\n# Includes\n#----------------------------------------------------------------------------\n\n\nimport collections\nimport glob\nimport logging\nimport os\nimport re\nimport shlex\n\nfrom ToolBOSCore.BuildSystem import Compilers\nfrom ToolBOSCore.Util import FastScript\nfrom ToolBOSCore.Util import Any\n\n\n#----------------------------------------------------------------------------\n# Public API\n#----------------------------------------------------------------------------\n\n\nSwitches = collections.namedtuple( 'Switches', [ 'c', 'cpp' ] )\n\n\ndef getIncludePathsAsString( targetPlatform, targetName ):\n \"\"\"\n Returns a long string with all include paths set for the package\n using include_directories() in CMakeLists.txt (in this package or\n included ones).\n\n This means all paths where the compiler would search for header\n files (beside system defaults), in the form \"-I/path1 -I/path2...\".\n\n If no additional paths are set, an empty string will be returned.\n\n NOTE: CMake supports that include directories may be different for\n various target platforms, and even per executable and/or\n library. Therefore you need to specify both of them.\n A rule of thumb is targetName='-global'.\n \"\"\"\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n fileName = os.path.join( 'build/%s/CMakeFiles/%s.dir/flags.make' %\n ( targetPlatform, targetName ) )\n\n Any.requireIsDirNonEmpty( 'build/%s' % targetPlatform )\n Any.requireIsFileNonEmpty( fileName )\n\n # read-in ground truth information\n logging.debug( 'parsing %s' % fileName )\n content = FastScript.getFileContent( fileName, splitLines=True )\n raw_C = ''\n raw_CPP = ''\n regexp_C = re.compile( '^(?:C_FLAGS|C_INCLUDES)\\s=\\s+(.*)$' )\n regexp_CPP = re.compile( '^(?:CXX_FLAGS|CXX_INCLUDES)\\s=\\s+(.*)$' )\n result = ''\n\n for line in content:\n tmp = regexp_C.search( line )\n\n if tmp:\n raw_C = tmp.group( 1 )\n # logging.debug( 'raw C flags: %s' % raw_C )\n\n tmp = regexp_CPP.search( line )\n\n if tmp:\n raw_CPP = tmp.group( 1 )\n # logging.debug( 'raw CPP flags: %s' % raw_CPP )\n\n for candidate in ( shlex.split( raw_C ) + shlex.split( raw_CPP ) ):\n if candidate.startswith( '-I' ):\n result += candidate + ' '\n\n return result\n\n\ndef getIncludePathsAsList( targetPlatform, targetName ):\n \"\"\"\n Returns a list with all include paths set for the package\n using include_directories() in CMakeLists.txt (in this package or\n included ones).\n\n This means all paths where the compiler would search for header\n files (beside system defaults).\n\n If no additional paths are set, an empty list will be returned.\n\n NOTE: CMake supports that include directories may be different for\n various target platforms, and even per executable and/or\n library. Therefore you need to specify both of them.\n A rule of thumb is targetName='-global'.\n \"\"\"\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n result = []\n\n # we are adding a trailing blank so that the \" -I\" replacement will\n # also work on the first element\n raw = getIncludePathsAsString( targetPlatform, targetName )\n tmp = (' ' + raw ).replace( ' -I', ' ' )\n\n for token in tmp.split():\n result.append( token.strip() )\n\n\n # remove empty entries (if present)\n try:\n result.remove( '' )\n except ValueError:\n pass\n\n return frozenset( result )\n\n\ndef getStdSwitches( targetPlatform, targetName ):\n \"\"\"\n Returns a string with the compiler std switch.\n\n NOTE: CMake supports that compiler definitions may be different for\n various target platforms, and even per executable and/or\n library. Therefore you need to specify both of them.\n A rule of thumb is targetName='-global'.\n \"\"\"\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n # We need defaults because the macro parser needs the switch to\n # correctly parse c++ code.\n\n\n fileName = os.path.join( 'build/%s/CMakeFiles/%s.dir/flags.make' %\n ( targetPlatform, targetName ) )\n\n Any.requireIsDirNonEmpty( 'build/%s' % targetPlatform )\n Any.requireIsFileNonEmpty( fileName )\n\n # read-in ground truth information\n logging.debug( 'parsing %s', fileName )\n content = FastScript.getFileContent( fileName, splitLines=True )\n raw_C_CFLAGS = ''\n raw_CPP_CFLAGS = ''\n regexp_C_CFLAGS = re.compile( r'^C_FLAGS\\s=\\s+(.*)$' )\n regexp_CPP_CFLAGS = re.compile( r'^CXX_FLAGS\\s=\\s+(.*)$' )\n\n for line in content:\n tmp = regexp_C_CFLAGS.search( line )\n\n if tmp:\n raw_C_CFLAGS = tmp.group( 1 )\n\n tmp = regexp_CPP_CFLAGS.search( line )\n\n if tmp:\n raw_CPP_CFLAGS = tmp.group( 1 )\n\n # get the default language standards\n standards = Compilers.getDefaultLanguageStandard(targetPlatform)\n cStdSwitch = '-std={}'.format( standards[ 'c' ] )\n cppStdSwitch = '-std={}'.format( standards[ 'c++' ] )\n\n # look if the user specified different standards in the C_FLAGS/CPP_FLAGS\n # CMake variables\n candidates = shlex.split( raw_C_CFLAGS )\n for candidate in candidates:\n if candidate.startswith( '-std=' ):\n cStdSwitch = candidate\n\n candidates = shlex.split( raw_CPP_CFLAGS )\n for candidate in candidates:\n if candidate.startswith( '-std=' ):\n cppStdSwitch = candidate\n\n return Switches( c=cStdSwitch, cpp=cppStdSwitch )\n\n\ndef getCDefinesAsString( targetPlatform, targetName ):\n \"\"\"\n Returns a long string with all compiler definitions set for the\n package using the addDefinitions() directive.\n\n This means all definitions passed to the compiler in the given path\n (beside system defaults), in the form \"-DDEFINE1 -DFOO=BAR...\".\n\n If no additional definitions are set, an empty string will be returned.\n\n NOTE: CMake supports that compiler definitions may be different for\n various target platforms, and even per executable and/or\n library. Therefore you need to specify both of them.\n A rule of thumb is targetName='-global'.\n \"\"\"\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n fileName = os.path.join( 'build/%s/CMakeFiles/%s.dir/flags.make' %\n ( targetPlatform, targetName ) )\n\n Any.requireIsDirNonEmpty( 'build/%s' % targetPlatform )\n Any.requireIsFileNonEmpty( fileName )\n\n # read-in ground truth information\n logging.debug( 'parsing %s' % fileName )\n content = FastScript.getFileContent( fileName, splitLines=True )\n raw_C = ''\n raw_CPP = ''\n raw_C_CFLAGS = ''\n raw_CPP_CFLAGS = ''\n regexp_C = re.compile( '^C_DEFINES\\s=\\s+(.*)$' )\n regexp_CPP = re.compile( '^CXX_DEFINES\\s=\\s+(.*)$' )\n regexp_C_CFLAGS = re.compile( '^C_FLAGS\\s=\\s+(.*)$' )\n regexp_CPP_CFLAGS = re.compile( '^CXX_FLAGS\\s=\\s+(.*)$' )\n result = ''\n\n for line in content:\n tmp = regexp_C.search( line )\n\n if tmp:\n raw_C = tmp.group( 1 )\n # logging.debug( 'raw C defines: %s' % raw_C )\n\n tmp = regexp_CPP.search( line )\n\n if tmp:\n raw_CPP = tmp.group( 1 )\n # logging.debug( 'raw CPP defines: %s' % raw_CPP )\n\n tmp = regexp_C_CFLAGS.search(line)\n\n if tmp:\n raw_C_CFLAGS = tmp.group(1)\n\n tmp = regexp_CPP_CFLAGS.search(line)\n\n if tmp:\n raw_CPP_CFLAGS = tmp.group(1)\n\n candidates = ( shlex.split( raw_C ) +\n shlex.split( raw_CPP ) +\n shlex.split( raw_C_CFLAGS ) +\n shlex.split( raw_CPP_CFLAGS ) )\n\n for candidate in candidates:\n if candidate.startswith( '-D' ):\n result += candidate + ' '\n\n return result\n\n\ndef getCDefinesAsList( targetPlatform, targetName ):\n \"\"\"\n Returns a list with all compiler definitions set for the\n package using the addDefinitions() directive.\n\n If no additional definitions are set, an empty list will be returned.\n\n NOTE: CMake supports that compiler definitions may be different for\n various target platforms, and even per executable and/or\n library. Therefore you need to specify both of them.\n A rule of thumb is targetName='-global'.\n \"\"\"\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n result = []\n regexp = re.compile( '-D\\s*(.*)' )\n\n for token in getCDefinesAsString( targetPlatform, targetName ).split():\n\n if token.startswith( '-D' ):\n tmp = regexp.search( token )\n item = (tmp.group(1)).strip()\n result.append( item )\n\n return frozenset(result)\n\n\ndef getHeaderAndLanguageMap( targetPlatform ):\n \"\"\"\n Returns a dictionary mapping header files to the set of language\n files that use it.\n \"\"\"\n platformBuildDir = os.path.join( 'build', targetPlatform )\n targetBuildDirsWildcard = os.path.join( platformBuildDir, 'CMakeFiles', '*.dir' )\n targetBuildDirs = glob.glob( targetBuildDirsWildcard )\n result = {}\n\n\n for buildDir in targetBuildDirs:\n\n try:\n result.update( _parseDependDotMake( buildDir, platformBuildDir ) )\n\n except IOError:\n # most likely the depend.make does not exist for this target,\n # this might happen if there are no dependencies by the target\n # or if this is a pseudo-target such as \"doc\" coming from\n # FindDoxygen.cmake\n logging.debug( 'ignoring target: %s', buildDir )\n\n return result\n\n\ndef _parseDependDotMake( targetBuildDir, platformBuildDir ):\n \"\"\"\n Returns a dictionary mapping header files to the set of language\n files that use it.\n\n The dictionary is obtained parsing the file\n build//CMakeFiles/.dir/depend.make\n \"\"\"\n Any.requireIsTextNonEmpty( targetBuildDir )\n Any.requireIsTextNonEmpty( platformBuildDir )\n\n dependDotMakePath = os.path.join( targetBuildDir, 'depend.make' )\n\n lines = FastScript.getFileContent( dependDotMakePath, splitLines=True )\n result = collections.defaultdict( set )\n\n languageNormalizationMap = {\n '.c' : 'c',\n '.C' : 'c++',\n '.CC' : 'c++',\n '.CPP': 'c++',\n '.CXX': 'c++',\n '.cc' : 'c++',\n '.cpp': 'c++',\n '.cxx': 'c++',\n }\n\n for l in lines:\n # skip comments and empty lines\n if Any.isTextNonEmpty( l ) and not l.startswith( '#' ):\n # lines are in the format\n # /path/to/obj/file.{c,cpp,cc,cxx}.o: /path/to/dependencyfile.{c,cpp,cc,cxx,h,hpp,hxx,hh}\n objFile, depFile = l.split( ':' )\n srcFile, objExt = os.path.splitext( objFile.strip( ) )\n srcName, srcExt = os.path.splitext( srcFile )\n depFile = depFile.strip( )\n _, depFileExt = os.path.splitext( depFile )\n language = languageNormalizationMap[ srcExt ]\n\n if depFileExt.lower( ) in ('.h', '.hxx', '.hpp', '.hh'):\n if not os.path.isabs( depFile ):\n relPath = os.path.join( platformBuildDir, depFile )\n absPath = os.path.abspath( relPath )\n else:\n absPath = depFile\n result[ absPath ].add( language )\n\n\n return result\n\n\n# EOF\n", "sub_path": "include/ToolBOSCore/Tools/CMake.py", "file_name": "CMake.py", "file_ext": "py", "file_size_in_byte": 13714, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "collections.namedtuple", "line_number": 59, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 78, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 78, "usage_type": "name"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 79, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 79, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "ToolBOSCore.Util.Any.requireIsDirNonEmpty", "line_number": 84, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 84, "usage_type": "name"}, {"api_name": "ToolBOSCore.Util.Any.requireIsFileNonEmpty", "line_number": 85, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 85, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 88, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.FastScript.getFileContent", "line_number": 89, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.FastScript", "line_number": 89, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 92, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 93, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 109, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 132, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 132, "usage_type": "name"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 133, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 133, "usage_type": "name"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 164, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 164, "usage_type": "name"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 165, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 165, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "ToolBOSCore.Util.Any.requireIsDirNonEmpty", "line_number": 174, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 174, "usage_type": "name"}, {"api_name": "ToolBOSCore.Util.Any.requireIsFileNonEmpty", "line_number": 175, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 175, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 178, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.FastScript.getFileContent", "line_number": 179, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.FastScript", "line_number": 179, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 182, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 183, "usage_type": "call"}, {"api_name": "ToolBOSCore.BuildSystem.Compilers.getDefaultLanguageStandard", "line_number": 197, "usage_type": "call"}, {"api_name": "ToolBOSCore.BuildSystem.Compilers", "line_number": 197, "usage_type": "name"}, {"api_name": "shlex.split", "line_number": 203, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 208, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 231, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 231, "usage_type": "name"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 232, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 232, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path", "line_number": 234, "usage_type": "attribute"}, {"api_name": "ToolBOSCore.Util.Any.requireIsDirNonEmpty", "line_number": 237, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 237, "usage_type": "name"}, {"api_name": "ToolBOSCore.Util.Any.requireIsFileNonEmpty", "line_number": 238, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 238, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 241, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.FastScript.getFileContent", "line_number": 242, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.FastScript", "line_number": 242, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 247, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 248, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 249, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 250, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 276, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 277, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 278, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 279, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 300, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 300, "usage_type": "name"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 301, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 301, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 304, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 321, "usage_type": "call"}, {"api_name": "os.path", "line_number": 321, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 322, "usage_type": "call"}, {"api_name": "os.path", "line_number": 322, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 323, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 337, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 350, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 350, "usage_type": "name"}, {"api_name": "ToolBOSCore.Util.Any.requireIsTextNonEmpty", "line_number": 351, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 351, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 353, "usage_type": "call"}, {"api_name": "os.path", "line_number": 353, "usage_type": "attribute"}, {"api_name": "ToolBOSCore.Util.FastScript.getFileContent", "line_number": 355, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.FastScript", "line_number": 355, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 356, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any.isTextNonEmpty", "line_number": 371, "usage_type": "call"}, {"api_name": "ToolBOSCore.Util.Any", "line_number": 371, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 375, "usage_type": "call"}, {"api_name": "os.path", "line_number": 375, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 376, "usage_type": "call"}, {"api_name": "os.path", "line_number": 376, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 378, "usage_type": "call"}, {"api_name": "os.path", "line_number": 378, "usage_type": "attribute"}, {"api_name": "os.path.isabs", "line_number": 382, "usage_type": "call"}, {"api_name": "os.path", "line_number": 382, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 383, "usage_type": "call"}, {"api_name": "os.path", "line_number": 383, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 384, "usage_type": "call"}, {"api_name": "os.path", "line_number": 384, "usage_type": "attribute"}]} +{"seq_id": "456377726", "text": "from typing import Any, Dict, List, cast\n\nimport torch\nfrom torchdata.datapipes.iter import IterDataPipe, Mapper, CSVDictParser\nfrom torchvision.prototype.datasets.utils import (\n Dataset,\n DatasetConfig,\n DatasetInfo,\n OnlineResource,\n KaggleDownloadResource,\n)\nfrom torchvision.prototype.datasets.utils._internal import (\n hint_sharding,\n hint_shuffling,\n)\nfrom torchvision.prototype.features import Label, Image\n\n\nclass FER2013(Dataset):\n def _make_info(self) -> DatasetInfo:\n return DatasetInfo(\n \"fer2013\",\n homepage=\"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge\",\n categories=(\"angry\", \"disgust\", \"fear\", \"happy\", \"sad\", \"surprise\", \"neutral\"),\n valid_options=dict(split=(\"train\", \"test\")),\n )\n\n _CHECKSUMS = {\n \"train\": \"a2b7c9360cc0b38d21187e5eece01c2799fce5426cdeecf746889cc96cda2d10\",\n \"test\": \"dec8dfe8021e30cd6704b85ec813042b4a5d99d81cb55e023291a94104f575c3\",\n }\n\n def resources(self, config: DatasetConfig) -> List[OnlineResource]:\n archive = KaggleDownloadResource(\n cast(str, self.info.homepage),\n file_name=f\"{config.split}.csv.zip\",\n sha256=self._CHECKSUMS[config.split],\n )\n return [archive]\n\n def _prepare_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:\n label_id = data.get(\"emotion\")\n\n return dict(\n image=Image(torch.tensor([int(idx) for idx in data[\"pixels\"].split()], dtype=torch.uint8).reshape(48, 48)),\n label=Label(int(label_id), categories=self.categories) if label_id is not None else None,\n )\n\n def _make_datapipe(\n self,\n resource_dps: List[IterDataPipe],\n *,\n config: DatasetConfig,\n ) -> IterDataPipe[Dict[str, Any]]:\n dp = resource_dps[0]\n dp = CSVDictParser(dp)\n dp = hint_sharding(dp)\n dp = hint_shuffling(dp)\n return Mapper(dp, self._prepare_sample)\n", "sub_path": "torchvision/prototype/datasets/_builtin/fer2013.py", "file_name": "fer2013.py", "file_ext": "py", "file_size_in_byte": 2025, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "torchvision.prototype.datasets.utils.Dataset", "line_number": 19, "usage_type": "name"}, {"api_name": "torchvision.prototype.datasets.utils.DatasetInfo", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.prototype.datasets.utils.DatasetInfo", "line_number": 20, "usage_type": "name"}, {"api_name": "torchvision.prototype.datasets.utils.DatasetConfig", "line_number": 33, "usage_type": "name"}, {"api_name": "torchvision.prototype.datasets.utils.KaggleDownloadResource", "line_number": 34, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 35, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 33, "usage_type": "name"}, {"api_name": "torchvision.prototype.datasets.utils.OnlineResource", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.prototype.features.Image", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torchvision.prototype.features.Label", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 51, "usage_type": "name"}, {"api_name": "torchdata.datapipes.iter.IterDataPipe", "line_number": 51, "usage_type": "name"}, {"api_name": "torchvision.prototype.datasets.utils.DatasetConfig", "line_number": 53, "usage_type": "name"}, {"api_name": "torchdata.datapipes.iter.CSVDictParser", "line_number": 56, "usage_type": "call"}, {"api_name": "torchvision.prototype.datasets.utils._internal.hint_sharding", "line_number": 57, "usage_type": "call"}, {"api_name": "torchvision.prototype.datasets.utils._internal.hint_shuffling", "line_number": 58, "usage_type": "call"}, {"api_name": "torchdata.datapipes.iter.Mapper", "line_number": 59, "usage_type": "call"}, {"api_name": "torchdata.datapipes.iter.IterDataPipe", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "561296021", "text": "import cv2 as cv\nimport numpy as np\nimport argparse\nfrom matplotlib import pyplot as plt\n\nparser = argparse.ArgumentParser(description='Code for Creating Bounding boxes around lego pieces.')\nparser.add_argument('--input', help='Path to input image.', default='IMG_6101.JPG')\nargs = parser.parse_args()\n\nsrc = cv.imread(cv.samples.findFile(args.input))\nif src is None:\n print('Could not open or find the image:', args.input)\n exit(0)\n\n# Convert image to gray and blur it\nsrc_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)\nsrc_gray = cv.blur(src_gray, (3,3))\n\n# Calculate contours\ncanny_threshold = 50\ncanny_output = cv.Canny(src_gray, canny_threshold, canny_threshold * 3)\n\ncontours, _ = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n\ncontours_poly = [None]*len(contours)\nboundRect = [None]*len(contours)\nfor i, c in enumerate(contours):\n contours_poly[i] = cv.approxPolyDP(c, 3, True)\n boundRect[i] = cv.boundingRect(contours_poly[i])\n\n# Draw the contours\ncontours_drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)\nfor i in range(len(contours)):\n color = (255, 255, 255)\n cv.drawContours(contours_drawing, contours_poly, i, color)\n\n# Draw the original with bounding rectangle\nrectangle_drawing = src.copy()\nfor i in range(len(contours)):\n color = (255, 0, 0)\n cv.rectangle(rectangle_drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \\\n (int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)\n\n\nplt.subplot(231)\nplt.imshow(src)\nplt.title('Original Image')\nplt.xticks([]), plt.yticks([])\n\nplt.subplot(232)\nplt.imshow(src_gray, cmap = 'gray')\nplt.title('Greyed and blured')\nplt.xticks([]), plt.yticks([])\n\nplt.subplot(233)\nplt.imshow(canny_output)\nplt.title('Canny output')\nplt.xticks([]), plt.yticks([])\n\nplt.subplot(234)\nplt.imshow(contours_drawing, cmap = 'gray')\nplt.title('Contours')\nplt.xticks([]), plt.yticks([])\n\nplt.subplot(235)\nplt.imshow(rectangle_drawing)\nplt.title('Contours with rectangles')\nplt.xticks([]), plt.yticks([])\n\nplt.show()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2067, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.samples.findFile", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.samples", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.blur", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.approxPolyDP", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "565047641", "text": "import telebot\r\nimport extensions\r\nfrom config import TOKEN, keys\r\n\r\nbot = telebot.TeleBot(TOKEN)\r\n\r\n\r\n@bot.message_handler(commands=['start', 'help'])\r\ndef help(message: telebot.types.Message):\r\n text = \"Чтобы начать работу введите команду бота в следующем формате: \" \\\r\n \"\\n<имя валюты> <в какую валюту перевести> <количество переводимой валюты>\" \\\r\n \"\\nУвидеть список всех доступных валют: /values\"\r\n bot.reply_to(message, text)\r\n\r\n\r\n@bot.message_handler(commands=['values'])\r\ndef values(message: telebot.types.Message):\r\n text = \"Доступные валюты:\"\r\n for key in keys.keys():\r\n text = '\\n'.join((text, key, ))\r\n bot.reply_to(message, text)\r\n\r\n\r\n@bot.message_handler(content_types=['text', ])\r\ndef convert(message: telebot.types.Message):\r\n try:\r\n check_parametrs = message.text.split(' ')\r\n if len(check_parametrs) < 3:\r\n raise Exception('Введено параметров меньше необходимого')\r\n if len(check_parametrs) > 3:\r\n raise Exception('Введено параметров больше необходимого')\r\n except Exception as e:\r\n text = e\r\n else:\r\n quote_k, base_k, amount_k = message.text.strip().lower().split(' ')\r\n text = extensions.RequestAPI.get_price(quote_k, base_k, amount_k)\r\n bot.send_message(message.chat.id, text)\r\n\r\n\r\nbot.polling()\r\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "telebot.TeleBot", "line_number": 5, "usage_type": "call"}, {"api_name": "config.TOKEN", "line_number": 5, "usage_type": "argument"}, {"api_name": "telebot.types", "line_number": 9, "usage_type": "attribute"}, {"api_name": "telebot.types", "line_number": 17, "usage_type": "attribute"}, {"api_name": "config.keys.keys", "line_number": 19, "usage_type": "call"}, {"api_name": "config.keys", "line_number": 19, "usage_type": "name"}, {"api_name": "telebot.types", "line_number": 25, "usage_type": "attribute"}, {"api_name": "extensions.RequestAPI.get_price", "line_number": 36, "usage_type": "call"}, {"api_name": "extensions.RequestAPI", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "514805546", "text": "#!/usr/bin/env python2\n\n##Here is a better example, an yes, the variable names suck. thats me\\.::\nimport pygame\nimport math\nimport sys \nimport random\n\nfrom pygame.time import Clock\nfrom pygame import Color, mouse\npygame.init()\n\ncircles = set()\n\ntspeed = 1\nspeed = tspeed\ncount = 0\nn_launchers = 3\n\ndef random_color(count):\n color = Color(\"white\")\n color.hsva = (count%360, 90, 80, 60)\n return color\n\nclass World(object):\n def __init__(self, x, y, x_accel=0, y_accel=0):\n self.x = x\n self.y = y\n self.center_x = self.x / 2\n self.center_y = self.y / 2\n self.mass = 2.0\n self.points = []\n self.lock = False\n self.color = random_color(60)\n\n def update(self):\n for x, y, signo in self.points:\n pygame.draw.circle(screen, self.color, (int(x), int(y)), int(10) )\n\n def calculate_gravity_for_particle(self, particle):\n\n for x, y, signo in self.points:\n if not particle.deleted:\n self._gravity_to_point(x, y, particle, signo)\n else:\n break\n\n def add_point(self):\n if not self.lock:\n x, y = pygame.mouse.get_pos()\n \n if pygame.mouse.get_pressed()[0]:\n signo = 1\n else:\n signo = -1\n\n self.points.append((x, y, signo))\n self.lock = True\n\n def unlock(self):\n self.lock = False\n\n\n def _gravity_to_point(self, x, y, particle, signo ):\n dx = particle.x - x\n dy = particle.y - y\n d = pow(dx, 2) + pow(dy, 2)\n ds = math.sqrt(d)\n force = self._calculate_gravity(d)\n if force is not None:\n force *= signo\n particle.x_accel += force * dx / ds\n particle.y_accel += force * dy / ds\n else:\n particle.delete()\n\n def _calculate_gravity(self, d):\n if d <= 10:\n force = None\n else:\n force = int( self.mass) / d\n #force = -1.0 * int( self.mass) / d\n return force\n\n\nclass Circle(object):\n def __init__(self, size, world, x=0, y=0, angle=0, speed=1, color = (0,0,0)):\n self.world = world\n self.color = color\n self.size = size/2\n self.speed_x = math.sin(angle) * speed\n self.speed_y = math.cos(angle) * speed\n self.x_accel = 0\n self.y_accel = 0\n self.x = x\n self.y = y #posicion inicial\n self.deleted=False\n\n def update(self, screen):\n x_size, y_size = screen.get_size()\n\n foo = self.world.calculate_gravity_for_particle(self)\n self.speed_x += self.x_accel\n self.speed_y += self.y_accel\n self.x += self.speed_x\n self.y += self.speed_y\n if not ( (0 < self.x and self.x < x_size) and (0 < self.y and self.y < y_size)):\n self.delete()\n return\n pygame.draw.circle(screen, self.color, (int(self.x), int(self.y)), int(self.size) )\n \n def delete(self):\n if not self.deleted:\n global circles\n circles.remove(self)\n self.deleted = True\n \n def __repr__(self):\n return \"\" % (self.x, self.y, self.angle)\n \n\n\nclass Launcher(object):\n \n def __init__(self, x, y, world, queue):\n self.x = x\n self.y = y\n self.world = world\n self.queue = queue\n self.cicles = 0\n self.rot_speed = 0\n self.mov_speed = 0\n self.q = 0\n self.rotation = 0\n self.counter = random.randint(0, 0xFFFFFF)\n self.color_change_speed = random.randint(1,3)\n\n def explode(self):\n for n in xrange(self.q):\n angle = ((n*360.0/self.q) + self.rotation) % 360\n color = random_color(self.counter)\n # self.counter += self.color_change_speed\n self.queue.add(Circle(4, self.world, x=self.x+random.randint(-20, 20), y=self.y, angle=angle, speed=self.mov_speed, color=color))\n \n def change(self):\n if not self.cicles:\n #self.rot_speed = random.uniform(-1, 1)\n self.mov_speed = 1 # random.uniform(1, 10)\n self.cicles = random.randint(500, 1000)\n self.q = 1 # random.randint(1, 6)\n self.rotation += (self.rot_speed % 360)\n self.cicles -= 1\n\n\nx , y = 800, 600 \nscreen = pygame.display.set_mode((x, y))\nclock = Clock()\nworld = World(x, y, 0, 0.1)\n\nlaunchers = []\n\nfor n in xrange(1, n_launchers + 1 ):\n launchers.append( Launcher(n * x/ (n_launchers+1), n * y /(n_launchers + 1), world, circles ))\n launchers.append( Launcher(n * x/ (n_launchers+1), n * y /(n_launchers + 1), world, circles ))\n launchers.append( Launcher(n * x/ (n_launchers+1), n * y /(n_launchers + 1), world, circles ))\n launchers.append( Launcher(n * x/ (n_launchers+1), n * y /(n_launchers + 1), world, circles ))\n launchers.append( Launcher(n * x/ (n_launchers+1), n * y /(n_launchers + 1), world, circles ))\n launchers.append( Launcher(n * x/ (n_launchers+1), n * y /(n_launchers + 1), world, circles ))\n\n\n\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: \n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n world.add_point()\n elif event.type == pygame.MOUSEBUTTONUP:\n world.unlock()\n\n screen.fill((0,0,0))\n circle_list = list(circles)\n world.update()\n for c in circle_list:\n c.update(screen)\n for launcher in launchers:\n launcher.change()\n if len(circles) < 5000: \n for launcher in launchers:\n launcher.explode()\n \n \n pygame.display.update()\n clock.tick(50)\n\n", "sub_path": "mouse_repulsor.py", "file_name": "mouse_repulsor.py", "file_ext": "py", "file_size_in_byte": 5681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pygame.init", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 52, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 68, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 91, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 92, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 110, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 110, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 135, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 136, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 143, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 149, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 156, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 156, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 157, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 174, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 175, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 176, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 194, "usage_type": "attribute"}]} +{"seq_id": "505886577", "text": "# My book is Dante's inferno (technically a poem)\nimport requests\nimport string\nimport random\nurl = 'https://www.gutenberg.org/files/1001/1001-h/1001-h.htm'\n\nfull_text = requests.get(url).text\n\nstart = 'Inferno: Canto I'\nend = 'End of the Project Gutenberg EBook'\n\n# Remove start and end material, newlines and punctuation\n\nfull_text = full_text[full_text.find(start):full_text.find(end)]\n\nfull_text = full_text.replace('\\n', ' ').replace('\\r', ' ')\nfor s in string.punctuation:\n full_text = full_text.replace(s, '')\n\nfull_text = full_text.split()\n\n\ndef generate_dict(txt):\n \"\"\"Create triplets dictionary in format (wd_one, wd_two):wd_three\"\"\"\n d = {}\n for idx in range(len(txt)-2):\n wd_one, wd_two, wd_three = txt[idx], txt[idx+1], txt[idx+2]\n d.setdefault((wd_one, wd_two), []).append(wd_three)\n return d\n\n\ndef generate_text(trigrams_dict, length):\n \"\"\"Generate text from trigrams dict with given number of words\"\"\"\n start_loc = random.randint(0, len(trigrams_dict))\n start_key = list(trigrams_dict.keys())[start_loc]\n results = [start_key[0], start_key[1]]\n for _ in range(length-2):\n next_word_choices = trigrams_dict[start_key]\n next_word = next_word_choices[random.randint(0,\n len(next_word_choices)-1)]\n start_key = (start_key[1], next_word)\n results.append(next_word)\n # Lines tend to be about 7 words long\n reshaped_results = []\n for i, j in enumerate(results):\n if i>0 and i % 7 == 0:\n reshaped_results.append('\\n')\n reshaped_results.append(j.title())\n else:\n reshaped_results.append(j.lower())\n return ' '.join(reshaped_results)\n\n\ndef print_chapter(length):\n \"\"\"Print chapter of inferno-like text with given length\"\"\"\n trigrams_dict = generate_dict(full_text)\n print(generate_text(trigrams_dict, length))\n\n\nif __name__ == '__main__':\n print_chapter(100)\n", "sub_path": "students/kuhnbt/lesson4/kata_script.py", "file_name": "kata_script.py", "file_ext": "py", "file_size_in_byte": 1977, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 17, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 34, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "359797222", "text": "from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import Idea\nfrom login_app.models import User\nfrom django.db.models import Count\n\ndef index(request):\n context = {\n 'ideas': Idea.objects.annotate(num_likes=Count('user_likes')).order_by('-num_likes'),\n }\n return render(request,'snack_index.html',context)\n\ndef create_idea(request):\n errors = Idea.objects.basic_validator(request.POST)\n if errors:\n for k, v in errors.items(): \n messages.error(request, v)\n return redirect('/snacks')\n else:\n user = User.objects.get(id = request.session['user_id'])\n idea = Idea.objects.create(\n title = request.POST['title'],\n description = request.POST['description'],\n user = user,\n )\n idea.user_likes.add(user)\n return redirect('/snacks')\n\ndef display_idea(request, idea_id):\n context = {\n 'snack': Idea.objects.get(id = idea_id),\n 'this_user': User.objects.get(id = request.session['user_id']),\n }\n return render(request, 'snack.html', context)\n\n\ndef like(request, idea_id):\n user = User.objects.get(id = request.session['user_id'])\n idea = Idea.objects.get(id = idea_id)\n idea.user_dislikes.remove(user)\n idea.user_likes.add(user)\n return redirect('/snacks')\n\ndef dislike(request, idea_id):\n user = User.objects.get(id = request.session['user_id'])\n idea = Idea.objects.get(id = idea_id)\n idea.user_likes.remove(user)\n idea.user_dislikes.add(user)\n return redirect('/snacks')\n\ndef delete(request, idea_id):\n idea = Idea.objects.get(id = idea_id)\n idea.delete()\n return redirect('/snacks')\n\n \n\n\n\n", "sub_path": "snack_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "models.Idea.objects.annotate", "line_number": 9, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 9, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Idea.objects.basic_validator", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 14, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "login_app.models.User.objects.get", "line_number": 20, "usage_type": "call"}, {"api_name": "login_app.models.User.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "login_app.models.User", "line_number": 20, "usage_type": "name"}, {"api_name": "models.Idea.objects.create", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 21, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Idea.objects.get", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 31, "usage_type": "name"}, {"api_name": "login_app.models.User.objects.get", "line_number": 32, "usage_type": "call"}, {"api_name": "login_app.models.User.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "login_app.models.User", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "login_app.models.User.objects.get", "line_number": 38, "usage_type": "call"}, {"api_name": "login_app.models.User.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "login_app.models.User", "line_number": 38, "usage_type": "name"}, {"api_name": "models.Idea.objects.get", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "login_app.models.User.objects.get", "line_number": 45, "usage_type": "call"}, {"api_name": "login_app.models.User.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "login_app.models.User", "line_number": 45, "usage_type": "name"}, {"api_name": "models.Idea.objects.get", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 46, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Idea.objects.get", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 52, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "617117439", "text": "import pymongo\nimport pymysql\nimport time\nimport xlwt\nimport oss2\nimport config\n\n\nclass ExportMarkOrQualitySettle(object):\n def __init__(self):\n \"\"\"初始化\"\"\"\n # 1. 创建mysql连接\n self.db_mysql = pymysql.connect(**config.mysql_config)\n self.cursor = self.db_mysql.cursor()\n\n # 2. 创建mongo连接\n dict_mongo = config.mongo_config\n self.db_mongo = pymongo.MongoClient(dict_mongo[\"address\"], dict_mongo[\"port\"])\n # 1) mongo认证过程,要使用认证库admin!\n self.db_auth = self.db_mongo.admin\n self.db_auth.authenticate(dict_mongo[\"user\"], dict_mongo[\"password\"])\n # 2) 连接业务库crowd\n self.db_client = config.mongo_config[\"database\"]\n\n # 3. 创建oss连接\n self.dict_oss = config.oss_config\n self.auth = oss2.Auth(self.dict_oss[\"accessKeyId\"], self.dict_oss[\"accessKeySecret\"])\n self.bucket = oss2.Bucket(self.auth, self.dict_oss[\"endpoint\"], self.dict_oss[\"bucketName\"])\n\n def close_databases(self):\n \"\"\"关闭数据库连接\"\"\"\n # 关闭mysql连接\n self.cursor.close()\n self.db_mysql.close()\n # 关闭mongo连接\n self.db_mongo.close()\n\n def export_settle(self, taskid, accTaskId):\n \"\"\"结算主方法: 导出、上传、返回\"\"\"\n # 一、 数据导出过程\n # 结算主方法sql:查询用户ID, 用户名称, 总条数, 合格总框数\n sql_count = \"\"\"\n SELECT\n t.tagUser,\n tt.userName,\n count(DISTINCT dataId) AS cnt,\n sum(qualifiedBoxCount) AS qualifiedBoxCount\n FROM\n (\n SELECT\n tagUser,\n dataId,\n tagCount,\n (case when unqualifiedBoxCount > 0 then qualifiedBoxCount else tagCount end) qualifiedBoxCount\n FROM\n `task_datadetail_b_\"\"\"+taskid+\"\"\"` AS t3\n WHERE\n checkFlag = '1'\n AND EXISTS (\n SELECT\n 1\n FROM\n task_accept_b AS t1\n INNER JOIN `task_acceptdetail_b_\"\"\"+accTaskId+\"\"\"` AS t2 ON t1.taskId = t2.taskId\n AND t1.acceptId = t2.acceptId\n WHERE\n 1 = 1\n AND t2.dataId = t3.dataId\n AND (\n (\n t1.accResult = '0'\n AND t2.acceptStatus != '4'\n )\n OR (\n t1.accResult != '0'\n AND t2.acceptStatus = '1'\n )\n )\n )\n ) AS t\n INNER JOIN user_info_m AS tt\n on t.tagUser = tt.userId\n GROUP BY\n tagUser, tt.userName\n \"\"\"\n self.cursor.execute(sql_count)\n count_info = self.cursor.fetchall()\n # 关闭数据库连接\n self.close_databases()\n # excel处理\n workbook = xlwt.Workbook()\n sheet = workbook.add_sheet('sheet1', cell_overwrite_ok=True)\n # 表头\n sheet.write(0, 0, '用户ID')\n sheet.write(0, 1, '用户名称')\n sheet.write(0, 2, '总条数')\n sheet.write(0, 3, '合格总框数')\n # 数据行\n rowIdx = 1\n for ci in count_info:\n sheet.write(rowIdx, 0, ci[0]) # 'tagUser'\n sheet.write(rowIdx, 1, ci[1]) # 'userName'\n sheet.write(rowIdx, 2, ci[2]) # 'cnt'\n sheet.write(rowIdx, 3, ci[3]) # 'qualifiedBoxCount'\n rowIdx = rowIdx+1\n # 定义时间戳:time获取当前时间戳\n now_time = int(time.time())\n time_array = time.localtime(now_time)\n normal_time = time.strftime(\"%Y-%m-%d_%H_%M_%S\", time_array)\n # 输出的excel文件名一定是整个业务流程中的标注任务的任务ID加上时间戳\n fileName = ('%s' % taskid) + '_' + normal_time + '.xls'\n filePath = config.path\n workbook.save(\"%s%s\" % (filePath, fileName))\n\n # 二、 文件上传过程\n self.bucket.put_object_from_file(self.dict_oss[\"prefx\"] + fileName, filePath + fileName)\n upload_url = self.bucket.sign_url('GET', self.dict_oss[\"prefx\"] + fileName, 86400)\n\n # 三、 返回json过程\n json_obj = {\n \"type\": \"FILE\",\n \"file\": {\n \"name\": fileName,\n \"url\": upload_url\n }\n }\n\n return json_obj\n\n def task_judge(self, task_id):\n \"\"\"判断任务关系\"\"\"\n # 1. 查询当前任务类别\n sql_task = \"\"\"\n SELECT\n ti.checkTaskFlg AS taskType,\n ti.isAccept\n FROM\n `task_info_b` AS ti\n WHERE\n ti.taskId = %s\n \"\"\"\n self.cursor.execute(sql_task, task_id)\n results = self.cursor.fetchall()\n if len(results) > 0:\n taskType = results[0][0]\n isAccept = results[0][1]\n\n # 2. 如果当前任务的类别是标注任务(taskType == '0')并且该任务需要验收, 要向下查, 下一个任务一定是自检任务\n # 可以确定业务流程是 \"标注 → 自检 → 验收\";\n if taskType == '0' and isAccept == '1':\n sql_dest = \"\"\"\n SELECT\n ti.taskId\n FROM\n `task_info_b` AS ti\n WHERE\n ti.checkTaskId = %s\n \"\"\"\n self.cursor.execute(sql_dest, task_id)\n result = self.cursor.fetchall()\n accept_taskId = result[0][0]\n\n # 调用结算主方法\n rectData = self.export_settle(task_id, accept_taskId)\n return rectData\n\n # 3. 如果当前任务不需要验收, 就需要一直向下查, 直到查询无结果为止\n if taskType == '0' and isAccept == '0':\n sql_dest = \"\"\"\n SELECT\n ti.checkTaskFlg AS taskType,\n ti.taskId\n FROM\n `task_info_b` AS ti\n WHERE\n ti.checkTaskId = %s\n \"\"\"\n self.cursor.execute(sql_dest, task_id)\n result = self.cursor.fetchall()\n taskType_check = result[0][0]\n taskId_check = result[0][1]\n # 如果下一个任务是自检任务, 并且此时不需要验收, 即此任务是标注的自检任务\n if taskType_check == '4':\n self.cursor.execute(sql_dest, taskId_check)\n result_marselfcheck = self.cursor.fetchall()\n taskId_quacheck = result_marselfcheck[0][1]\n # 此时一定有下一个任务即质检任务, 所以再向下查一次\n self.cursor.execute(sql_dest, taskId_quacheck)\n result_quaselfcheck = self.cursor.fetchall()\n # 判断如果查询结果为空, 则无下一个任务\n if len(result_quaselfcheck) == 0:\n accept_taskId = taskId_quacheck\n # 如果结果不为空, 则存在下一个任务即质检的自检任务\n else:\n taskId_quaselfcheck = result_quaselfcheck[0][1]\n accept_taskId = taskId_quaselfcheck\n # 如果下一个任务是质检任务, 则需要判断是否还存在质检的自检任务\n else:\n self.cursor.execute(sql_dest, taskId_check)\n result_quaselfcheck = self.cursor.fetchall()\n if len(result_quaselfcheck) == 0:\n accept_taskId = taskId_check\n else:\n taskId_quaselfcheck = result_quaselfcheck[0][1]\n accept_taskId = taskId_quaselfcheck\n\n # 调用结算主方法\n rectData = self.export_settle(task_id, accept_taskId)\n return rectData\n\n # 4. 如果当前任务的类别是质检任务(taskType == '1')并且该任务需要验收, 首先要向下查, 并判断下一个任务是否有自检任务,\n # 如果有自检任务, 则accept_taskId = 下一个任务ID, 如果没有, 则accept_taskId = 当前任务的ID;\n # 其次要向上查, 使用递归, 直到查询的checkTaskFlg的值为空时, 当前的任务ID即为标注任务的ID\n if taskType == '1' and isAccept == '1':\n # ***查询下一个任务(不一定存在, 需做判断是否还有自检任务)***\n sql_dest = \"\"\"\n SELECT\n ti.taskId\n FROM\n `task_info_b` AS ti\n WHERE\n ti.checkTaskId = %s\n \"\"\"\n self.cursor.execute(sql_dest, task_id)\n result = self.cursor.fetchall()\n if len(result) == 0:\n accept_taskId = task_id\n else:\n taskId_dest = result[0][0]\n self.cursor.execute(sql_dest, taskId_dest)\n result_dest = self.cursor.fetchall()\n if len(result_dest) == 0:\n accept_taskId = taskId_dest\n # ***查询上一个任务(一定存在, 需要判断上一个任务是自检任务还是标注任务)***\n sql_source = \"\"\"\n SELECT\n ti.checkTaskId,\n ti.checkTaskFlg AS taskType\n FROM\n `task_info_b` AS ti\n WHERE\n ti.taskId = %s\n \"\"\"\n self.cursor.execute(sql_source, task_id)\n result_source = self.cursor.fetchall()\n taskId_source = result_source[0][0]\n taskType_source = result_source[0][1]\n if taskType_source == '0':\n taskId_mark = taskId_source\n elif taskType_source == '1':\n taskId_mark = taskId_source\n else:\n self.cursor.execute(sql_source, taskId_source)\n result_mark = self.cursor.fetchall()\n taskId_mark = result_mark[0][0]\n\n # 调用结算主方法\n rectData = self.export_settle(taskId_mark, accept_taskId)\n return rectData\n\n # 5. 如果当前任务的类别是自检任务(taskType == '4'), 并且不需要验收, 可以确定当前任务是标注的自检任务, 需要:\n # 1) 向上查一次, 确定标注任务的ID\n if taskType == '4' and isAccept == '0':\n sql_source = \"\"\"\n SELECT\n ti.checkTaskId\n FROM\n `task_info_b` AS ti\n WHERE\n ti.taskId = %s\n \"\"\"\n self.cursor.execute(sql_source, task_id)\n result_source_mark = self.cursor.fetchall()\n taskId_source_mark = result_source_mark[0][0]\n # 2) 向下查一次, 一定存在质检任务\n sql_dest = \"\"\"\n SELECT\n ti.taskId\n FROM\n `task_info_b` AS ti\n WHERE\n ti.checkTaskId = %s\n \"\"\"\n self.cursor.execute(sql_dest, task_id)\n dest = self.cursor.fetchall()\n # 如果查询结果为空, 则证明是中途开启的自检, 需要使用标注任务的ID即'taskId_source_mark'去查询质检任务的任务ID\n if len(dest) == 0:\n self.cursor.execute(sql_dest, taskId_source_mark)\n checkId = self.cursor.fetchall()\n quacheckId = checkId[0][0]\n # 查询质检任务的下一个任务(不一定存在, 需做判断是否还有质检的自检任务)\n sql_dest = \"\"\"\n SELECT\n ti.taskId\n FROM\n `task_info_b` AS ti\n WHERE\n ti.checkTaskId = %s\n \"\"\"\n self.cursor.execute(sql_dest, quacheckId)\n selfId = self.cursor.fetchall()\n if len(selfId) == 0:\n accept_taskId = quacheckId\n else:\n quaselfId = selfId[0][0]\n accept_taskId = quaselfId\n # 如果查询不为空, 说明质检任务关联的是标注的自检任务, 不是中途开启的自检任务\n else:\n quacheckId = dest[0][0]\n # 查询质检任务的下一个任务(不一定存在, 需做判断是否还有质检的自检任务)\n sql_dest = \"\"\"\n SELECT\n ti.taskId\n FROM\n `task_info_b` AS ti\n WHERE\n ti.checkTaskId = %s\n \"\"\"\n self.cursor.execute(sql_dest, quacheckId)\n selfId = self.cursor.fetchall()\n if len(selfId) == 0:\n accept_taskId = quacheckId\n else:\n quaselfId = selfId[0][0]\n accept_taskId = quaselfId\n\n # 调用结算主方法\n rectData = self.export_settle(taskId_source_mark, accept_taskId)\n return rectData\n\n # 6. 如果当前任务是自检任务, 并且需要验收, 需要向上查一次, 查询上一个任务是标注任务还是质检任务\n if taskType == '4' and isAccept == '1':\n # 向上查一次, 得到上一个任务的任务ID\n sql_source = \"\"\"\n SELECT\n ti.checkTaskId\n FROM\n `task_info_b` AS ti\n WHERE\n ti.taskId = %s\n \"\"\"\n self.cursor.execute(sql_source, task_id)\n result = self.cursor.fetchall()\n taskId_source = result[0][0]\n # 使用上一个任务ID查询出上一个任务的任务类别\n sql_source_type = \"\"\"\n SELECT\n ti.checkTaskFlg\n FROM\n `task_info_b` AS ti\n WHERE\n ti.taskId = %s\n \"\"\"\n self.cursor.execute(sql_source_type, taskId_source)\n result_type = self.cursor.fetchall()\n source_type = result_type[0][0]\n # source_type的结果不是'0',就是'1', 即不是标注任务, 就是质检任务\n # 如果上一个任务是标注任务, 且现在需要验收, 则业务流程是 \"标注 → 自检 → 验收\"\n if source_type == '0':\n taskId_mark = taskId_source\n accept_taskId = task_id\n # 如果上一个任务是质检任务, 继续向上查, 并判断质检的上一个任务是标注任务还是标注的自检任务\n else:\n sql_mark = \"\"\"\n SELECT\n ti.checkTaskId\n FROM\n `task_info_b` AS ti\n WHERE\n ti.taskId = %s\n \"\"\"\n self.cursor.execute(sql_mark, taskId_source)\n result_isMark = self.cursor.fetchall()\n taskId_isMark = result_isMark[0][0]\n # 根据此ID, 查询此任务的类别\n sql_mark_type = \"\"\"\n SELECT\n ti.checkTaskFlg\n FROM\n `task_info_b` AS ti\n WHERE\n ti.taskId = %s\n \"\"\"\n self.cursor.execute(sql_mark_type, taskId_isMark)\n result_mark_type = self.cursor.fetchall()\n taskType_isMark = result_mark_type[0][0]\n # 如果质检任务的上一个任务是自检任务, 则需要再向上查一次\n if taskType_isMark == '4':\n self.cursor.execute(sql_mark, taskId_isMark)\n rect = self.cursor.fetchall()\n markId = rect[0][0]\n taskId_mark = markId\n # 如果质检任务的上一个任务是标注任务, 则结束查询\n else:\n taskId_mark = taskId_isMark\n accept_taskId = task_id\n\n # 调用结算主方法\n rectData = self.export_settle(taskId_mark, accept_taskId)\n return rectData\n\n else:\n json_obj = {\n \"type\": \"FILE\",\n \"file\": {\n \"name\": None,\n \"url\": None\n }\n }\n\n return json_obj\n", "sub_path": "restfulApi/markOrQualitySettle.py", "file_name": "markOrQualitySettle.py", "file_ext": "py", "file_size_in_byte": 18297, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "83", "api": [{"api_name": "pymysql.connect", "line_number": 13, "usage_type": "call"}, {"api_name": "config.mysql_config", "line_number": 13, "usage_type": "attribute"}, {"api_name": "config.mongo_config", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 18, "usage_type": "call"}, {"api_name": "config.mongo_config", "line_number": 23, "usage_type": "attribute"}, {"api_name": "config.oss_config", "line_number": 26, "usage_type": "attribute"}, {"api_name": "oss2.Auth", "line_number": 27, "usage_type": "call"}, {"api_name": "oss2.Bucket", "line_number": 28, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 91, "usage_type": "call"}, {"api_name": "time.time", "line_number": 107, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 108, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 109, "usage_type": "call"}, {"api_name": "config.path", "line_number": 112, "usage_type": "attribute"}]} +{"seq_id": "630994336", "text": "from tkinter import *\r\nimport matplotlib as plt\r\nimport numpy as np\r\nimport matplotlib.animation as animation\r\nfrom matplotlib.figure import Figure\r\nfrom matplotlib.backends.backend_tkagg import (\r\n FigureCanvasTkAgg, NavigationToolbar2Tk)\r\nimport threading\r\nimport os\r\nimport random\r\nimport time\r\n\r\nspath='data.txt'\r\nspath2='data2.txt'\r\n\r\n#a=2 # type: int\r\n\r\ndef load_params(fname,list,list_dtypes,list_values):\r\n if(os.path.isfile(fname)):\r\n f = open(fname, 'r')\r\n a = f.read()\r\n for i in range(list.__len__()):\r\n x=a.find(list[i])\r\n if(x!=-1):\r\n y=a.find(\":\",x)\r\n if(y!=-1):\r\n z=a.find(\"\\n\",y)\r\n if(z!=-1):\r\n list_values.append(list_dtypes[i](a[y+1:z]))\r\n\r\ndef save_params(fname,list,list_dtypes,list_values):\r\n if(os.path.isfile(fname)):\r\n os.remove(fname)\r\n f = open(fname, 'w+')\r\n if(list.__len__()==list_dtypes.__len__() and list_dtypes.__len__()==list_values.__len__()):\r\n for i in range(list.__len__()):\r\n s_value=list[i]+\":\"+str(list_values[i])+\"\\n\"\r\n f.write(s_value)\r\n\r\n\r\nclass app:\r\n\r\n def checkpath_thread(self):\r\n #print('thread start')\r\n while(True):\r\n if(self.new_path==True):\r\n if(os.path.isfile(self.s_indatapath)):\r\n self.ed_indatapath['bg']= 'green'\r\n else:\r\n self.ed_indatapath['bg'] = 'red'\r\n\r\n if(os.path.isfile(self.s_outdatapath)):\r\n self.ed_outdatapath['bg']= 'green'\r\n else:\r\n self.ed_outdatapath['bg'] = 'red'\r\n\r\n if(os.path.isfile(self.s_inputpath)):\r\n self.ed_inputpath['bg']= 'green'\r\n else:\r\n self.ed_inputpath['bg'] = 'red'\r\n\r\n self.new_path=False\r\n\r\n\r\n def press_button(self,event):\r\n fpath=self.dpath.get(1.0,END)\r\n print(fpath)\r\n fpath=fpath.rstrip()\r\n fpath=fpath.lstrip()\r\n try:\r\n a = np.genfromtxt(fpath)\r\n except:\r\n print(\"error\")\r\n else:\r\n print(a)\r\n self.lbl['text']=a\r\n #v = StringVar()\r\n #Label(self.lbl, textvariable=v).pack()\r\n #v.set(a)\r\n\r\n\r\n def on_changed(self,event):\r\n fpath=self.ed_indatapath.get(1.0, END)\r\n fpath=fpath.rstrip()\r\n fpath=fpath.lstrip()\r\n if(self.s_indatapath!=fpath):\r\n self.s_indatapath = fpath\r\n self.new_path=True\r\n\r\n fpath=self.ed_outdatapath.get(1.0, END)\r\n fpath=fpath.rstrip()\r\n fpath=fpath.lstrip()\r\n if(self.s_outdatapath!=fpath):\r\n self.s_outdatapath = fpath\r\n self.new_path=True\r\n\r\n fpath=self.ed_inputpath.get(1.0, END)\r\n fpath=fpath.rstrip()\r\n fpath=fpath.lstrip()\r\n if(self.s_inputpath!=fpath):\r\n self.s_inputpath = fpath\r\n self.new_path=True\r\n\r\n def add_data_thread(self):\r\n while True:\r\n self.tdata=np.append(self.tdata,random.randint(0,10))\r\n time.sleep(1)\r\n\r\n def draw_thread(self,i):\r\n self.trainingplot.clear()\r\n self.trainingplot.plot(self.tdata)\r\n\r\n\r\n def __init__(self):\r\n self.root = Tk()\r\n self.root.minsize(width=600,height=500)\r\n\r\n self.s_indatapath=''\r\n self.s_outdatapath=''\r\n self.s_inputpath=''\r\n\r\n self.new_path=True\r\n\r\n self.lbl_indatapath= Label(self.root,height=1,width=12,font='Arial 11',bg=\"white\", fg=\"black\",text='in_data fname :',anchor=W, justify=LEFT)\r\n self.lbl_outdatapath= Label(self.root,height=1,width=12,font='Arial 11',bg=\"white\", fg=\"black\",text='out_data fname:',anchor=W, justify=LEFT)\r\n self.lbl_inputpath= Label(self.root,height=1,width=12,font='Arial 11',bg=\"white\", fg=\"black\",text='input fname :',anchor=W, justify=LEFT)\r\n\r\n self.frm=Frame(self.root,bg='white',bd=5,height=200, width=300)\r\n\r\n #self.btn = Button(self.root, # родительское окно\r\n # text=\"Click me\", # надпись на кнопке\r\n # width=10, height=5, # ширина и высота\r\n # bg=\"white\", fg=\"black\") # цвет фона и надписи\r\n self.ed_indatapath = Text(self.root, height=1, width=15, font='Arial 11', wrap=WORD)\r\n self.ed_outdatapath = Text(self.root, height=1, width=15, font='Arial 11', wrap=WORD)\r\n self.ed_inputpath = Text(self.root, height=1, width=15, font='Arial 11', wrap=WORD)\r\n\r\n self.ed_indatapath.insert(1.0, 'in_data.txt')\r\n self.ed_outdatapath.insert(1.0, 'out_data.txt')\r\n self.ed_inputpath.insert(1.0, 'input.txt')\r\n\r\n self.ed_indatapath.bind('', self.on_changed)\r\n self.ed_outdatapath.bind('', self.on_changed)\r\n self.ed_inputpath.bind('', self.on_changed)\r\n self.ed_indatapath.bind('