diff --git "a/1276.jsonl" "b/1276.jsonl" new file mode 100644--- /dev/null +++ "b/1276.jsonl" @@ -0,0 +1,397 @@ +{"seq_id": "1524653", "text": "import os\nimport socket\n\nimport config\nfrom handler import Handler\n\nclass Server:\n def __init__(self, root, ncpu):\n self.root = root\n self.ncpu = ncpu\n self.workers = []\n\n\n def start(self):\n print('Server start')\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind((config.HOST, config.PORT)) \n server_socket.listen(config.LISTENERS) \n\n\n for worker in range(self.ncpu):\n pid = os.fork()\n if pid:\n self.workers.append(pid)\n else:\n print('Run worker: {}'.format(os.getpid()))\n while True:\n client_socket, client_address = server_socket.accept() \n request = client_socket.recv(config.REQ_SIZE) \n\n \n if request.strip() == 0:\n client_socket.close()\n continue\n\n handler = Handler(self.root, request)\n response = handler.get_response()\n client_socket.sendall(response)\n\n client_socket.close()\n\n server_socket.close()\n\n\n for pid in self.workers:\n os.waitpid(pid, 0)\n", "sub_path": "prefork_server/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1426, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "socket.socket", "line_number": 16, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 16, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 16, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 17, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "config.HOST", "line_number": 18, "usage_type": "attribute"}, {"api_name": "config.PORT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "config.LISTENERS", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.fork", "line_number": 23, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 27, "usage_type": "call"}, {"api_name": "config.REQ_SIZE", "line_number": 30, "usage_type": "attribute"}, {"api_name": "handler.Handler", "line_number": 37, "usage_type": "call"}, {"api_name": "handler.get_response", "line_number": 38, "usage_type": "call"}, {"api_name": "os.waitpid", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "342743269", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef clean_plot():\n aqi_data1 = pd.read_csv('Real Time AQI of Major Cities in the World.csv', na_values=['-'])\n aqi_data = aqi_data1.copy()\n for i in aqi_data.index:\n aqi_data['City'][i] = aqi_data1['City'][i].split('(')[0]\n # save as csv by pd without import csv\n aqi_data.to_csv('Cleaned: Real Time AQI of Major Cities in the World.csv', index=False)\n\n top50_cities = aqi_data.sort_values(by=['AQI'], ascending=False).head(50)\n top50_cities.plot(kind='bar', x='City', y='AQI',\n title='Top 50 Major City with the Worst AQI in the World', figsize=(20, 10))\n\n plt.show()\n\n\nclean_plot()\n", "sub_path": "code folder/Projects/AQI Data Processing.py", "file_name": "AQI Data Processing.py", "file_ext": "py", "file_size_in_byte": 690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "5033152", "text": "from easydict import EasyDict as edict\n\n__C = edict()\n\ncfg = __C\n\n#\n# Training options\n#\n\n__C.TRAIN = edict()\n\n\n# Images to use per minibatch\n__C.TRAIN.IMS_PER_BATCH = 100\n\n# Iterations between snapshots\n__C.TRAIN.SNAPSHOT_ITERS = 100000\n\n# solver.prototxt specifies the snapshot path prefix, this adds an optional\n# infix to yield the path: [_]_iters_XYZ.caffemodel\n__C.TRAIN.SNAPSHOT_INFIX = ''\n#\n# Testing options\n#\n\n__C.TEST = edict()\n\n\n\n#\n# MISC\n#\n__C.RNG_SEED = 3\n", "sub_path": "pulseEx1/lib/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 485, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "easydict.EasyDict", "line_number": 3, "usage_type": "call"}, {"api_name": "easydict.EasyDict", "line_number": 11, "usage_type": "call"}, {"api_name": "easydict.EasyDict", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "545713582", "text": "import win32com.client\n\ndef Excel():\n return win32com.client.Dispatch('Excel.Application')\n\ndef hello_world():\n \"\"\"\n Create a 'Hello World' Excel workbook.\n \"\"\"\n xl = Excel()\n\n wb = xl.Workbooks.Add()\n wb.Worksheets[0].Cells(1,1).Value = 'Hello World'\n\n xl.Visible = True\n\ndef main():\n \"\"\"\n Demonstrations of using win32com to create Excel files.\n \"\"\"\n from argparse import ArgumentParser\n\n cli = ArgumentParser(description=main.__doc__)\n args = cli.parse_args()\n hello_world()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "demos/win32xl.py", "file_name": "win32xl.py", "file_ext": "py", "file_size_in_byte": 562, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "win32com.client.client.Dispatch", "line_number": 4, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 4, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 4, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "181545676", "text": "import web\nimport os\n#resolve absolute directory path\nroot_dir = os.path.abspath(os.path.dirname(__file__))\n\n#render templates from folder\ntemplate_dir = root_dir + '/templates'\nrender = web.template.render(template_dir, base='layout')\n\n#absolute path to sqlite db\ndb_dir = root_dir + '/news.db'\n\n#debugging purposes\n#web.config.debug = True\n\nurls = (\n '/', 'index',\n '/news', 'news',\n '/discography','discography',\n '/about', 'about'\n)\n\nclass index:\n def GET(self):\n return render.index()\n\nclass news:\n def GET(self):\n db = web.database(dbn='sqlite', db=db_dir)\n #retrieve only the twenty most recent articles\n articles = db.select('articles', order='epochtime DESC', limit = 20)\n update_time = db.select('articles', what='dbtime', \n limit = 1, order = 'dbtime DESC')[0].dbtime\n return render.news(articles,update_time)\n\nclass discography:\n def GET(self):\n return render.discography()\n\nclass about:\n def GET(self):\n return render.about()\n\nif __name__ == \"__main__\":\n #development\n app = web.application(urls, globals())\n app.run()\nelse:\n #mod_wsgi\n app = web.application(urls, globals(), autoreload=False)\n application = app.wsgifunc()\n", "sub_path": "code.py", "file_name": "code.py", "file_ext": "py", "file_size_in_byte": 1266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "os.path.abspath", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "web.template.render", "line_number": 8, "usage_type": "call"}, {"api_name": "web.template", "line_number": 8, "usage_type": "attribute"}, {"api_name": "web.database", "line_number": 29, "usage_type": "call"}, {"api_name": "web.application", "line_number": 46, "usage_type": "call"}, {"api_name": "web.application", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "478003627", "text": "import numpy as np\nimport torch\nimport random\nfrom torch.utils.data import Dataset\n\n\nclass MyDataset(Dataset):\n def __init__(self, ap, meta_data, voice_len=1.6, num_speakers_in_batch=64,\n num_utter_per_speaker=10, skip_speakers=False, verbose=False):\n \"\"\"\n Args:\n ap (TTS.tts.utils.AudioProcessor): audio processor object.\n meta_data (list): list of dataset instances.\n seq_len (int): voice segment length in seconds.\n verbose (bool): print diagnostic information.\n \"\"\"\n self.items = meta_data\n self.sample_rate = ap.sample_rate\n self.voice_len = voice_len\n self.seq_len = int(voice_len * self.sample_rate)\n self.num_speakers_in_batch = num_speakers_in_batch\n self.num_utter_per_speaker = num_utter_per_speaker\n self.skip_speakers = skip_speakers\n self.ap = ap\n self.verbose = verbose\n self.__parse_items()\n if self.verbose:\n print(\"\\n > DataLoader initialization\")\n print(f\" | > Number of instances : {len(self.items)}\")\n print(f\" | > Sequence length: {self.seq_len}\")\n print(f\" | > Num speakers: {len(self.speakers)}\")\n\n def load_wav(self, filename):\n audio = self.ap.load_wav(filename, sr=self.ap.sample_rate)\n return audio\n\n def load_data(self, idx):\n text, wav_file, speaker_name = self.items[idx]\n wav = np.asarray(self.load_wav(wav_file), dtype=np.float32)\n mel = self.ap.melspectrogram(wav).astype(\"float32\")\n # sample seq_len\n\n assert text.size > 0, self.items[idx][1]\n assert wav.size > 0, self.items[idx][1]\n\n sample = {\n \"mel\": mel,\n \"item_idx\": self.items[idx][1],\n \"speaker_name\": speaker_name,\n }\n return sample\n\n def __parse_items(self):\n \"\"\"\n Find unique speaker ids and create a dict mapping utterances from speaker id\n \"\"\"\n speakers = list({item[-1] for item in self.items})\n self.speaker_to_utters = {}\n self.speakers = []\n for speaker in speakers:\n speaker_utters = [item[1] for item in self.items if item[2] == speaker]\n if len(speaker_utters) < self.num_utter_per_speaker and self.skip_speakers:\n print(\n f\" [!] Skipped speaker {speaker}. Not enough utterances {self.num_utter_per_speaker} vs {len(speaker_utters)}.\"\n )\n else:\n self.speakers.append(speaker)\n self.speaker_to_utters[speaker] = speaker_utters\n\n def __len__(self):\n return int(1e10)\n\n def __sample_speaker(self):\n speaker = random.sample(self.speakers, 1)[0]\n if self.num_utter_per_speaker > len(self.speaker_to_utters[speaker]):\n utters = random.choices(\n self.speaker_to_utters[speaker], k=self.num_utter_per_speaker\n )\n else:\n utters = random.sample(\n self.speaker_to_utters[speaker], self.num_utter_per_speaker\n )\n return speaker, utters\n\n def __sample_speaker_utterances(self, speaker):\n \"\"\"\n Sample all M utterances for the given speaker.\n \"\"\"\n feats = []\n labels = []\n for _ in range(self.num_utter_per_speaker):\n # TODO:dummy but works\n while True:\n if len(self.speaker_to_utters[speaker]) > 0:\n utter = random.sample(self.speaker_to_utters[speaker], 1)[0]\n else:\n self.speakers.remove(speaker)\n speaker, _ = self.__sample_speaker()\n continue\n wav = self.load_wav(utter)\n if wav.shape[0] - self.seq_len > 0:\n break\n self.speaker_to_utters[speaker].remove(utter)\n\n offset = random.randint(0, wav.shape[0] - self.seq_len)\n mel = self.ap.melspectrogram(wav[offset : offset + self.seq_len])\n feats.append(torch.FloatTensor(mel))\n labels.append(speaker)\n return feats, labels\n\n def __getitem__(self, idx):\n speaker, _ = self.__sample_speaker()\n return speaker\n\n def collate_fn(self, batch):\n labels = []\n feats = []\n for speaker in batch:\n feats_, labels_ = self.__sample_speaker_utterances(speaker)\n labels.append(labels_)\n feats.extend(feats_)\n feats = torch.stack(feats)\n return feats.transpose(1, 2), labels\n", "sub_path": "TTS/speaker_encoder/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 4578, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 74, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 76, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 80, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 95, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "193316732", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom os.path import dirname, realpath, exists\nfrom setuptools import setup, find_packages\nimport sys\n\n\nauthor = u\"Paul Müller\"\nauthors = [author]\ndescription = 'user-friendly quantitative phase imaging analysis'\nname = 'drymass'\nyear = \"2017\"\n\nsys.path.insert(0, realpath(dirname(__file__))+\"/\"+name)\nfrom _version import version\n\nif version.count(\"dev\") or sys.argv.count(\"test\"):\n # specific versions are not desired for\n # - development version\n # - running pytest\n release_deps = [\"qpformat\",\n \"qpimage\",\n \"qpsphere\"]\nelse:\n release_deps = [\"qpformat==0.1.4\",\n \"qpimage==0.1.6\",\n \"qpsphere==0.1.4\",\n ]\n\nsetup(\n name=name,\n author=author,\n author_email='dev@craban.de',\n url='https://github.com/RI-imaging/DryMass',\n version=version,\n packages=find_packages(),\n package_dir={name: name},\n license=\"MIT\",\n description=description,\n long_description=open('README.rst').read() if exists('README.rst') else '',\n install_requires=[\"matplotlib\",\n \"numpy\",\n \"scikit-image>=0.13.1\",\n ] + release_deps,\n setup_requires=['pytest-runner'],\n tests_require=[\"pytest\"],\n entry_points={\n \"console_scripts\": [\n \"dm_analyze_sphere = drymass.cli:cli_analyze_sphere\",\n \"dm_convert = drymass.cli:cli_convert\",\n \"dm_extract_roi = drymass.cli:cli_extract_roi\",\n ],\n },\n python_requires='>=3.5, <4',\n keywords=[\"digital holographic microscopy\",\n \"optics\",\n \"quantitative phase imaging\",\n \"refractive index\",\n \"scattering\",\n ],\n classifiers= [\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Intended Audience :: Science/Research'\n ],\n platforms=['ALL'],\n )\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2002, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "sys.path.insert", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "_version.version.count", "line_number": 17, "usage_type": "call"}, {"api_name": "_version.version", "line_number": 17, "usage_type": "name"}, {"api_name": "sys.argv.count", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 30, "usage_type": "call"}, {"api_name": "_version.version", "line_number": 35, "usage_type": "name"}, {"api_name": "setuptools.find_packages", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "449657749", "text": "import pygame.font\n\n\n# Class for play button.\nclass Button():\n def __init__(self, alien_invasion_settings, game_window, message):\n self.game_window = game_window\n self.game_window_rect = self.game_window.get_rect()\n \"\"\"Set the dimensions and properties of the button.\"\"\"\n self.button_width = 180\n self.button_height = 70\n self.button_color = (193, 237, 109)\n self.button_text_color = (47, 59, 47)\n self.font = pygame.font.SysFont(None, 38)\n \"\"\"Build button and center it.\"\"\"\n self.rect = pygame.Rect(0, 0, self.button_width, self.button_height)\n self.rect.center = self.game_window_rect.center\n \"\"\"Pygame renders strings of text as images.\"\"\"\n self.prepare_message(message)\n\n # Method for turning message into a rendered image and center the text on\n # the button.\n def prepare_message(self, message):\n \"\"\"The call to font.render() turns text into an image. A Boolean value\n is used to turn antialiasing on.\"\"\"\n self.message_image = self.font.render(\n message, True, self.button_text_color, self.button_color)\n \"\"\"Center text image on the button.\"\"\"\n self.message_image_rect = self.message_image.get_rect()\n self.message_image_rect.center = self.rect.center\n\n # Method for first drawing a blank button and then the message.\n def draw_button(self):\n self.game_window.fill(self.button_color, self.rect)\n self.game_window.blit(self.message_image, self.message_image_rect)\n", "sub_path": "button.py", "file_name": "button.py", "file_ext": "py", "file_size_in_byte": 1545, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "pygame.font.font.SysFont", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.font.font", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.font", "line_number": 14, "usage_type": "name"}, {"api_name": "pygame.font.Rect", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "448600535", "text": "import socket,random,tempfile,os\n#still not sure what this is for\nukeyvals=\"0123456789abcdef\"\ndef getRandomUKey():\n global ukeyvals\n return \"\".join([random.choice(ukeyvals) for i in range(32)])\nboundaryvals=\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\ndef getRandomBoundary():\n global boundaryvals\n return \"----\"+\"\".join([random.choice(boundaryvals) for x in range(34)])\ndef createPayload(file_path,ukey=None,boundary=None,filename=None,out_file=None,out_dir=None):\n if filename is None:filename=os.path.basename(file_path)\n if ukey is None:ukey=getRandomUKey()\n fh=None;wfh=None\n if out_file is None:fh,out_file=tempfile.mkstemp(dir=out_dir)\n else:wfh=open(out_file,'wb')\n if boundary is None:boundary=getRandomBoundary()\n if fh is not None:wfh=os.fdopen(fh)\n act_bound=\"--\"+boundary\n act_bound=act_bound.encode()\n wfh.write(act_bound)\n wfh.write(b\"\\r\\nContent-Disposition: form-data; name=\\\"u_key\\\"\\r\\n\\r\\n\")\n wfh.write(ukey.encode())\n wfh.write(b\"\\r\\n\")\n wfh.write(act_bound)\n wfh.write(\"\\r\\nContent-Disposition: form-data; name=\\\"files[]\\\"; filename=\\\"{0}\\\"\\r\\nContent-Type: application/octet-stream\\r\\n\\r\\n\".format(filename).encode())\n with open(file_path,'rb')as inp:\n wfh.write(inp.read())\n wfh.write(b\"\\r\\n\")\n wfh.write(act_bound)\n wfh.write(b\"--\\r\\n\")\n wfh.close()\n return out_file\ndef uploadFile():\n conn=socket.create_connection((\"expirebox.com\",443))\n", "sub_path": "expirebox.py", "file_name": "expirebox.py", "file_ext": "py", "file_size_in_byte": 1468, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "random.choice", "line_number": 6, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tempfile.mkstemp", "line_number": 15, "usage_type": "call"}, {"api_name": "os.fdopen", "line_number": 18, "usage_type": "call"}, {"api_name": "socket.create_connection", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "70994681", "text": "# -*- coding: utf-8 -*-\nimport os, sys, time, datetime, json\nimport csv\nimport helpers.file_helper as file_helper\nimport helpers.log_helper as log_helper\nimport helpers.sql_helper as sql_helper\nimport helpers.send_slack as send_slack\nimport helpers.logger_helper as logger_helper\nimport config\nfrom enums import status, color\n\nnow = datetime.datetime.now()\nnow_str = now.strftime(\"%Y-%m-%d %H:%M:%S\")\nnow_id = now.strftime(\"%Y%m%d%H%M%S\")\nlogger = logger_helper.mylog('validate_katalon_report').getlog()\n\ndef validate_purchase_ui(log_id, steps, country, report_path,report_result, sql_insert_data_list=[], is_send_slack=False):\n with open(report_path) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n\n step_go = True\n for (step, item) in steps.items():\n if step_go:\n is_success = False\n for row in readCSV:\n if row[0] == item[1] and row[6] == 'PASSED':\n is_success = True\n break\n if is_success:\n logger.info('katalon passed: country=%s, step=%s, case=%s' % (country, step, item[1]))\n sql_insert_data_list.append((log_id, country, step, item[0], status.Success, '', now_str))\n else:\n logger.info('katalon failed: country=%s, step=%s, case=%s' % (country, step, item[1]))\n if is_send_slack:\n try:\n title = ', this is Katalon failed notification with purchase availability'\n attachments = [\n {\n \"pretext\": \"--------------\",\n \"title\": \"Country=%s, Step=%s, Case=%s\" % (country, step, item[1]), \n \"text\": \"Refer To: %s\" % report_result, \n \"color\": color.Fail,\n \"ts\": int(time.time())\n }\n ]\n send_slack.send_slack(config.slack[\"token\"], config.slack[\"channel\"], title, attachments)\n except Exception as e:\n logger.error('send slack error:' + str(e))\n finally:\n logger.info('end send slack...')\n sql_insert_data_list.append((log_id, country, step, item[0], status.Failed, report_path, now_str))\n step_go = False\n else:\n logger.info('Skip: country=%s, step=%s, case=%s' % (country, step, item[1]))\n sql_insert_data_list.append((log_id, country, step, item[0], status.NotValid_Skip, '', now_str))\n return sql_insert_data_list\n\nif __name__ == \"__main__\":\n #init data what need (format data&time, env, country)\n args = sys.argv[1:]\n if not args:\n logger.info(\"not args\")\n env='Preview'\n country='HK'\n report_id='7'\n else:\n env = args[0]\n country = args[1]\n report_id = args[2]\n logger.info(\"env:{0}, country:{1}, report_id:{2}\".format(env, country, report_id))\n\n sql_insert_data_list = []\n\n is_success_purchase_ui = validate_purchase_ui(env, country, report_id, sql_insert_data_list)\n logger.info(\"sql_insert_data_list: %s\" % sql_insert_data_list)\n ", "sub_path": "validate_katalon_report.py", "file_name": "validate_katalon_report.py", "file_ext": "py", "file_size_in_byte": 3418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "attribute"}, {"api_name": "helpers.logger_helper.mylog", "line_number": 15, "usage_type": "call"}, {"api_name": "helpers.logger_helper", "line_number": 15, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 19, "usage_type": "call"}, {"api_name": "enums.status.Success", "line_number": 31, "usage_type": "attribute"}, {"api_name": "enums.status", "line_number": 31, "usage_type": "name"}, {"api_name": "enums.color.Fail", "line_number": 42, "usage_type": "attribute"}, {"api_name": "enums.color", "line_number": 42, "usage_type": "name"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "helpers.send_slack.send_slack", "line_number": 46, "usage_type": "call"}, {"api_name": "helpers.send_slack", "line_number": 46, "usage_type": "name"}, {"api_name": "config.slack", "line_number": 46, "usage_type": "attribute"}, {"api_name": "enums.status.Failed", "line_number": 51, "usage_type": "attribute"}, {"api_name": "enums.status", "line_number": 51, "usage_type": "name"}, {"api_name": "enums.status.NotValid_Skip", "line_number": 55, "usage_type": "attribute"}, {"api_name": "enums.status", "line_number": 55, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "316693765", "text": "import pandas as pd\nimport json\n# import sys\n# import matplotlib\nfrom pandas import DataFrame, read_csv \n# import ast\n# from ldap3 import Server, Connection, SUBTREE, ALL, ObjectDef, Reader\n\nprojectid = \"test-project-175520\"\njson_file = open('/home/shawon/Documents/google-cloud/big-query-test/big-query-test-service-account.json')\nprivate_key = json.load(json_file)\n\napplications = r'/home/shawon/Puppet/puppet-greenhouse/applications.csv'\ncandidates = r'/home/shawon/Puppet/puppet-greenhouse/candidates.csv'\njobs = r'/home/shawon/Puppet/puppet-greenhouse/jobs.csv'\noffers = r'/home/shawon/Puppet/puppet-greenhouse/offers.csv'\n\napp_df = pd.read_csv(applications)\ncan_df = pd.read_csv(candidates)\njobs_df = pd.read_csv(jobs)\noffers_df = pd.read_csv(offers)\n\n# create_table('greenhouse.application', schema, projectid)\napp_df.to_gbq(app_df, 'greenhouse_data.application_data', projectid, private_key=private_key, verbose=True)\ngbq.to_gbq(dataframe=df, destination_table='lake1.pond1', project_id='pubsub-bq-pipe-1', chunksize=10000, verbose=True, reauth=False, if_exists='replace', private_key=\"./pubsub-bq-pipe-1-a865bbaa5f48.json\", auth_local_webserver=False)\n\n\n# hired_apps = app_df[app_df['status'] == \"hired\"]\n\n# Gets application ids into a list\n# def app_ids(): \n# app_ids = hired_apps[\"id\"].tolist()\n# app_ids = [int(x) for x in app_ids]\n# app_ids.sort()\n# return app_ids\n\n# # Gets only hired candidates based on applications with status of hired. Pulls candidates based on application_id value\n# def candidates():\n# can_ids = hired_apps[\"candidate_id\"].tolist()\n# can_ids = [int(x) for x in can_ids]\n# can_ids.sort()\n# hired_cans = can_df.loc[can_df['id'].isin(can_ids)]\n# return hired_cans\n\n# # Sorts applications based on \n# def applications():\n# sorted_apps = hired_apps.sort_values('id', ascending=True)\n# return sorted_apps\n\n# def jobs():\n# sorted_jobs = jobs_df.sort_values('id', ascending=True)\n# jobs_custom_fields = \n# return sorted_jobs\n\n# def jobs_id_pull():\n# job_id_list = []\n# job_entry_list = []\n# app_jobs = applications()['jobs']\n# for entry in app_jobs:\n# entry_string = entry.translate({ord(c): None for c in '[]'})\n# entry_dict = ast.literal_eval(entry_string)\n# job_id_list.append(entry_dict['id'])\n# job_entry_list.append(entry)\n# job_ids = {'job_id': job_id_list, 'jobs': job_entry_list}\n# job_ids_frame = pd.DataFrame.from_dict(job_ids)\n# return job_ids_frame\n\n# # Gets offer entries that have hired applications tied to them together. Also removes deprecated offers and sorts numerically based on application_id value\n# def offers():\n# extended_offers = offers_df.loc[offers_df['application_id'].isin(app_ids())]\n# extended_offers = extended_offers[extended_offers['status'] == 'accepted']\n# extended_offers = extended_offers.sort_values('application_id', ascending=True)\n# return extended_offers\n\n# def merge_dfs():\n# app_labels = ['id', 'candidate_id', 'jobs']\n# offer_labels = ['application_id', 'starts_at', 'keyed_custom_fields']\n# candidate_labels = ['id', 'addresses', 'phone_numbers', 'first_name', 'last_name', 'email_addresses']\n# job_labels = ['id', 'keyed_custom_fields', 'requisition_id', 'departments']\n# reduced_apps = applications().loc[:, app_labels]\n# reduced_offs = offers().loc[:, offer_labels]\n# reduced_candidates = candidates().loc[:, candidate_labels]\n# reduced_jobs = jobs().loc[:, job_labels]\n# job_id_frame = jobs_id_pull()\n# apps_jobs = pd.merge(reduced_apps, job_id_frame, on='jobs')\n# apps_jobs = apps_jobs.drop_duplicates()\n# apps_jobs = apps_jobs.sort_values('id', ascending=True)\n# apps_and_offs = pd.merge(apps_jobs, reduced_offs, left_on='id', right_on='application_id')\n# apps_offs_cans = pd.merge(apps_and_offs, reduced_candidates, left_on='candidate_id', right_on='id')\n# merged_dfs = pd.merge(apps_offs_cans, reduced_jobs, left_on='job_id', right_on='id')\n# merged_dfs.drop(['id_x', 'id_y', 'id'], axis=1, inplace=True)\n# merged_dfs.to_csv('merged_dfs.csv')\n\n# merge_dfs()\n", "sub_path": "greenhouse/gh_pandas.py", "file_name": "gh_pandas.py", "file_ext": "py", "file_size_in_byte": 4148, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "609705665", "text": "#!/usr/bin/env python -O\n# -*- coding: utf-8 -*-\n#\n# tests.dao.programdb.test_ramstkfailuredefinition.py is part of The RAMSTK\n# Project\n#\n# All rights reserved.\n\"\"\" Test class for testing the RAMSTKFailureDefinition module algorithms and models. \"\"\"\n\nimport pytest\n\nfrom ramstk.dao.programdb.RAMSTKFailureDefinition import RAMSTKFailureDefinition\n\n__author__ = 'Doyle Rowland'\n__email__ = 'doyle.rowland@reliaqual.com'\n__organization__ = 'ReliaQual Associates, LLC'\n__copyright__ = 'Copyright 2017 Doyle \"weibullguy\" Rowland'\n\nATTRIBUTES = {\n 'revision_id': 1,\n 'definition_id': 1,\n 'definition': 'Failure Definition'\n}\n\n\n@pytest.mark.integration\ndef test_ramstkfailuredefinition_create(test_dao):\n \"\"\" __init__() should create an RAMSTKFailureDefinition model. \"\"\"\n _session = test_dao.RAMSTK_SESSION(\n bind=test_dao.engine, autoflush=False, expire_on_commit=False)\n DUT = _session.query(RAMSTKFailureDefinition).first()\n\n assert isinstance(DUT, RAMSTKFailureDefinition)\n\n # Verify class attributes are properly initialized.\n assert DUT.__tablename__ == 'ramstk_failure_definition'\n assert DUT.revision_id == 1\n assert DUT.definition_id == 1\n assert DUT.definition == 'Failure Definition'\n\n\n@pytest.mark.integration\ndef test_get_attributes(test_dao):\n \"\"\" get_attributes() should return a tuple of attribute values. \"\"\"\n _session = test_dao.RAMSTK_SESSION(\n bind=test_dao.engine, autoflush=False, expire_on_commit=False)\n DUT = _session.query(RAMSTKFailureDefinition).first()\n\n assert DUT.get_attributes() == ATTRIBUTES\n\n\n@pytest.mark.integration\ndef test_set_attributes(test_dao):\n \"\"\" set_attributes() should return a zero error code on success. \"\"\"\n _session = test_dao.RAMSTK_SESSION(\n bind=test_dao.engine, autoflush=False, expire_on_commit=False)\n DUT = _session.query(RAMSTKFailureDefinition).first()\n\n ATTRIBUTES['definition'] = 'Test Failure Definition'\n\n _error_code, _msg = DUT.set_attributes(ATTRIBUTES)\n\n assert _error_code == 0\n assert _msg == (\"RAMSTK SUCCESS: Updating RAMSTKFailureDefinition {0:d} \"\n \"attributes.\".format(DUT.definition_id))\n\n\n@pytest.mark.integration\ndef test_set_attributes_missing_key(test_dao):\n \"\"\" set_attributes() should return a 40 error code when passed too few attributes. \"\"\"\n _session = test_dao.RAMSTK_SESSION(\n bind=test_dao.engine, autoflush=False, expire_on_commit=False)\n DUT = _session.query(RAMSTKFailureDefinition).first()\n\n ATTRIBUTES.pop('definition')\n\n _error_code, _msg = DUT.set_attributes(ATTRIBUTES)\n\n assert _error_code == 40\n assert _msg == (\"RAMSTK ERROR: Missing attribute 'definition' in attribute \"\n \"dictionary passed to \"\n \"RAMSTKFailureDefinition.set_attributes().\")\n", "sub_path": "tests/dao/programdb/test_ramstkfailuredefinition.py", "file_name": "test_ramstkfailuredefinition.py", "file_ext": "py", "file_size_in_byte": 2826, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "ramstk.dao.programdb.RAMSTKFailureDefinition.RAMSTKFailureDefinition", "line_number": 31, "usage_type": "argument"}, {"api_name": "ramstk.dao.programdb.RAMSTKFailureDefinition.RAMSTKFailureDefinition", "line_number": 33, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ramstk.dao.programdb.RAMSTKFailureDefinition.RAMSTKFailureDefinition", "line_number": 47, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 42, "usage_type": "attribute"}, {"api_name": "ramstk.dao.programdb.RAMSTKFailureDefinition.RAMSTKFailureDefinition", "line_number": 57, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 52, "usage_type": "attribute"}, {"api_name": "ramstk.dao.programdb.RAMSTKFailureDefinition.RAMSTKFailureDefinition", "line_number": 73, "usage_type": "argument"}, {"api_name": "pytest.mark", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "117063833", "text": "# -*- coding=utf-8 -*-\n#U版账户与支付的接口\nimport xml.dom.minidom\nimport xlrd\nimport xlutils.copy\n\nclass message(object):\n def __init__(self,src=\"\",dst=\"\"):\n self.src=src\n self.dst=dst\n def __str__(self):\n return \"src=\"+self.src+\" dst=\"+self.dst\n\nclass check_win_function(object):\n def __init__(self):\n self.excel_map={}\n self.xml_map={}\n self.messages=[]\n self.sheet_index=9\n self._xml=\"kbss2pay_unix.xml\"\n self._excel=\"KBSS_统一账户系统对接系统关系清单.xlsx\"\n self.tmp_excel=\"U版账户与支付临时文件.xls\"\n\n def parse_xml(self):\n dom=xml.dom.minidom.parse(self._xml)\n self.messages=dom.getElementsByTagName(\"message\")\n print(\"message count=\",len(self.messages))\n for msg in self.messages:\n #print(msg.getAttribute(\"src\"),msg.getAttribute(\"dst\"))\n m=message(msg.getAttribute(\"src\").strip(),msg.getAttribute(\"dst\").strip())\n if not m.dst in self.xml_map.keys():\n self.xml_map[m.dst]=m\n #if not m.dst in self.excel_map.keys():\n # print(m)\n print(\"message unique count=\",len(self.xml_map))\n\n def parese_excel(self):\n workbook=xlrd.open_workbook(self._excel)\n sheet=workbook.sheets()[self.sheet_index]\n for i in range(1,sheet.nrows):\n dst=str(sheet.cell_value(i,1))\n src=str(sheet.cell_value(i,2))\n if dst.find(\".\")!=-1:\n dst=dst[:dst.find(\".\")].strip()\n if src.find(\".\")!=-1:\n src=src[:dst.find(\".\")].strip()\n #print(\"dst=\",dst,\"src=\",src)\n msg=message(src,dst)\n if not msg.dst in self.excel_map.keys():\n self.excel_map[msg.dst]=msg\n print(\"excel lbm count=\",len(self.excel_map))\n #for item in self.excel_map.items():\n # print(item)\n def check(self):\n workbook=xlrd.open_workbook(self._excel)\n wb=xlutils.copy.copy(workbook)\n sheet1=wb.get_sheet(self.sheet_index)\n index=workbook.sheets()[self.sheet_index].nrows+1\n sheet2=workbook.sheets()[self.sheet_index]\n for i in range(1,sheet2.nrows):\n dst=str(sheet2.cell_value(i,1)).strip()\n if dst.find(\".\")!=-1:\n dst=dst[:dst.find(\".\")].strip()\n msg=self.xml_map.get(dst,None)\n sheet1.write(i,2,msg.src if msg is not None else \"not found\")\n print(dst,self.xml_map.get(dst,\"not found\"))\n print(\"key in xml not in excel:\")\n sheet1.write(index,0,\"需增加的接口\")\n index+=1\n for key in self.xml_map.keys():\n if not key in self.excel_map.keys():\n print(self.xml_map[key])\n index+=1\n sheet1.write(index,1,self.xml_map[key].dst)\n sheet1.write(index,2,self.xml_map[key].src)\n\n index+=1\n sheet1.write(index,0,\"需增删除的接口\")\n print(\"key in excel not in xml:\")\n for key in self.excel_map.keys():\n if not key in self.xml_map.keys():\n print(self.excel_map[key])\n index+=1\n sheet1.write(index,1,self.excel_map[key].dst)\n sheet1.write(index,2,self.excel_map[key].src)\n wb.save(self.tmp_excel)\n\nif __name__==\"__main__\":\n runner=check_win_function()\n runner.parese_excel()\n runner.parse_xml()\n runner.check()\n", "sub_path": "tools/process_funcation/check_function/9check_kbss2pay_unix.py", "file_name": "9check_kbss2pay_unix.py", "file_ext": "py", "file_size_in_byte": 3473, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "xml.dom.minidom.dom.minidom.parse", "line_number": 25, "usage_type": "call"}, {"api_name": "xml.dom.minidom.dom", "line_number": 25, "usage_type": "attribute"}, {"api_name": "xml.dom.minidom", "line_number": 25, "usage_type": "name"}, {"api_name": "xlrd.open_workbook", "line_number": 38, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 55, "usage_type": "call"}, {"api_name": "xlutils.copy.copy.copy", "line_number": 56, "usage_type": "call"}, {"api_name": "xlutils.copy.copy", "line_number": 56, "usage_type": "attribute"}, {"api_name": "xlutils.copy", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "383503029", "text": "from datetime import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom tweets.constants import TweetPhotoStatus, TWEET_PHOTO_STATUS_CHOICES\nfrom tweets.listeners import push_tweet_to_cache\nfrom likes.models import Like\nfrom utils.memcached_helper import MemcachedHelper\nfrom django.db.models.signals import post_save\nfrom utils.listeners import invalidate_object_cache\n\nclass Tweet(models.Model):\n\n user = models.ForeignKey(\n User,\n on_delete=models.SET_NULL,\n null=True,\n help_text=\"who post the this tweet\",\n )\n\n content = models.CharField(max_length=255)\n created_at = models.DateTimeField(auto_now_add=True)\n class Meta:\n index_together = (('user', 'created_at'),)\n ordering = ('user', '-created_at')\n\n @property\n def hours_to_now(self):\n # time zones should all be utc\n return (datetime.utcnow() - self.created_at).seconds // 3600\n\n def __str__(self):\n return f'{self.created_at} {self.user}: {self.content}'\n\n @property\n def like_set(self):\n return Like.objects.filter(\n content_type=ContentType.objects.get_for_model(Tweet),\n object_id=self.id,\n ).order_by('-created_at')\n\n @property\n def cached_user(self):\n return MemcachedHelper.get_object_through_cache(User, self.user_id)\n\npost_save.connect(invalidate_object_cache, sender=Tweet)\npost_save.connect(push_tweet_to_cache, sender=Tweet)\n\n\nclass TweetPhoto(models.Model):\n tweet = models.ForeignKey(Tweet, on_delete=models.SET_NULL, null=True)\n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n file = models.FileField()\n order = models.IntegerField(default=0)\n\n #photo status\n status = models.IntegerField(\n default = TweetPhotoStatus.PENDING,\n choices=TWEET_PHOTO_STATUS_CHOICES,\n )\n\n is_deleted = models.BooleanField(default=False)\n deleted_at = models.DateTimeField(null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n index_together = (\n ('user', 'created_at',),\n ('is_deleted', 'created_at',),\n ('status', 'created_at',),\n ('tweet', 'order',),\n )\n\n def __str__(self):\n return f'{self.tweet.id}: {self.file}'\n\n", "sub_path": "tweets/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "django.db.models.Model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "likes.models.Like.objects.filter", "line_number": 38, "usage_type": "call"}, {"api_name": "likes.models.Like.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "likes.models.Like", "line_number": 38, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 39, "usage_type": "name"}, {"api_name": "utils.memcached_helper.MemcachedHelper.get_object_through_cache", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 45, "usage_type": "argument"}, {"api_name": "utils.memcached_helper.MemcachedHelper", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.connect", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.listeners.invalidate_object_cache", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.connect", "line_number": 48, "usage_type": "call"}, {"api_name": "tweets.listeners.push_tweet_to_cache", "line_number": 48, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 52, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 53, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.db.models.FileField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "tweets.constants.TweetPhotoStatus.PENDING", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tweets.constants.TweetPhotoStatus", "line_number": 59, "usage_type": "name"}, {"api_name": "tweets.constants.TWEET_PHOTO_STATUS_CHOICES", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "418281712", "text": "\n# coding: utf-8\n\n# In[14]:\n\nimport numpy as np \nimport os \nimport tensorflow as tf \nimport datetime\nimport time\nfrom matplotlib import pyplot as plt \nfrom PIL import Image\nimport glob\n\n# In[15]:\n\nMODEL_NAME=\"./\"\nPATH_TO_CKPT = MODEL_NAME + 'frozen_inference_graph.pb' \n\n\n# In[20]:\n\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\n# In[21]:\n\ntest_img_base_path=\"./Sample\"\nimgs_files=os.path.join(test_img_base_path,\"*\",\"*.png\")\nimgs_list=glob.glob(imgs_files)\nnum_imgs=len(imgs_list)\nprint(\"Images num:\"+str(num_imgs))\ninference_path=\"./inference_result\"\nnew_files=[]\nif not os.path.exists(inference_path):\n os.mkdir(inference_path)\ntotal_time = 0\n\n\n# In[22]:\n\nwith detection_graph.as_default(): \n with tf.Session(graph=detection_graph) as sess: \n image_tensor = detection_graph.get_tensor_by_name('ImageTensor:0') \n prediction = detection_graph.get_tensor_by_name('SemanticPredictions:0') \n start_time=datetime.datetime.now()\n print(\"STARTING ...\")\n for image_path in imgs_list:\n image_np = Image.open(image_path)\n image_np_expanded = np.expand_dims(image_np, axis=0) \n # Definite input and output Tensors for detection_graph \n out_name=os.path.join(inference_path,image_path.split(\"/\")[-2],image_path.split(\"/\")[-1])\n time1 = time.time()\n prediction_out= sess.run( \n prediction,feed_dict={image_tensor: image_np_expanded}) \n time2 = time.time()\n total_time += float(time2-time1)\n result=Image.fromarray(np.array(prediction_out[0]*200).astype(np.uint8))\n if not os.path.exists(os.path.join(inference_path,out_name.split(\"/\")[-2])):\n os.mkdir(os.path.join(inference_path,out_name.split(\"/\")[-2]))\n result.save(out_name)\n end_time=datetime.datetime.now()\n \n print(\"START TIME :\"+str(start_time))\n print(\"END TIME :\"+str(end_time))\n print(\"THE TOTAL TIME COST IS:\"+str(total_time))\n print(\"THE average TIME COST IS:\"+str(float(total_time)/float(num_imgs)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "segement_files.py", "file_name": "segement_files.py", "file_ext": "py", "file_size_in_byte": 2249, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "tensorflow.Graph", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.GraphDef", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.import_graph_def", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "time.time", "line_number": 62, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 64, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "135338184", "text": "from eth_keys.datatypes import PrivateKey\nfrom eth_typing import Address\nfrom eth_utils import (\n to_int,\n)\nfrom rlp.exceptions import (\n DeserializationError,\n)\n\nfrom eth.abc import (\n SignedTransactionAPI,\n TransactionBuilderAPI,\n UnsignedTransactionAPI,\n)\nfrom eth.exceptions import UnrecognizedTransactionType\nfrom eth.vm.forks.muir_glacier.transactions import (\n MuirGlacierTransaction,\n MuirGlacierUnsignedTransaction,\n)\n\nfrom eth._utils.transactions import (\n create_transaction_signature,\n)\n\n\nclass BerlinLegacyTransaction(MuirGlacierTransaction):\n pass\n\n\nclass BerlinUnsignedLegacyTransaction(MuirGlacierUnsignedTransaction):\n def as_signed_transaction(self,\n private_key: PrivateKey,\n chain_id: int = None) -> BerlinLegacyTransaction:\n v, r, s = create_transaction_signature(self, private_key, chain_id=chain_id)\n return BerlinLegacyTransaction(\n nonce=self.nonce,\n gas_price=self.gas_price,\n gas=self.gas,\n to=self.to,\n value=self.value,\n data=self.data,\n v=v,\n r=r,\n s=s,\n )\n\n\nclass BerlinTransactionBuilder(TransactionBuilderAPI):\n \"\"\"\n Responsible for serializing transactions of ambiguous type.\n\n It dispatches to either the legacy transaction type or the new typed\n transaction, depending on the nature of the encoded/decoded transaction.\n \"\"\"\n legacy_signed = BerlinLegacyTransaction\n legacy_unsigned = BerlinUnsignedLegacyTransaction\n\n @classmethod\n def deserialize(cls, encoded: bytes) -> SignedTransactionAPI:\n if len(encoded) == 0:\n raise DeserializationError(\n \"Encoded transaction was empty, which makes it invalid\",\n encoded,\n )\n\n if isinstance(encoded, bytes):\n transaction_type = to_int(encoded[0])\n if transaction_type == 1:\n raise UnrecognizedTransactionType(transaction_type, \"TODO: Implement EIP-2930\")\n elif transaction_type in range(0, 0x80):\n raise UnrecognizedTransactionType(transaction_type, \"Unknown transaction type\")\n else:\n raise DeserializationError(\n f\"Typed Transaction must start with 0-0x7f, but got {hex(transaction_type)}\",\n encoded,\n )\n else:\n return cls.legacy_signed.deserialize(encoded)\n\n @classmethod\n def serialize(cls, obj: SignedTransactionAPI) -> bytes:\n return cls.legacy_signed.serialize(obj)\n\n @classmethod\n def create_unsigned_transaction(cls,\n *,\n nonce: int,\n gas_price: int,\n gas: int,\n to: Address,\n value: int,\n data: bytes) -> UnsignedTransactionAPI:\n return cls.legacy_unsigned(nonce, gas_price, gas, to, value, data)\n\n @classmethod\n def new_transaction(\n cls,\n nonce: int,\n gas_price: int,\n gas: int,\n to: Address,\n value: int,\n data: bytes,\n v: int,\n r: int,\n s: int) -> SignedTransactionAPI:\n return cls.legacy_signed(nonce, gas_price, gas, to, value, data, v, r, s)\n", "sub_path": "eth/vm/forks/berlin/transactions.py", "file_name": "transactions.py", "file_ext": "py", "file_size_in_byte": 3492, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "eth.vm.forks.muir_glacier.transactions.MuirGlacierTransaction", "line_number": 26, "usage_type": "name"}, {"api_name": "eth.vm.forks.muir_glacier.transactions.MuirGlacierUnsignedTransaction", "line_number": 30, "usage_type": "name"}, {"api_name": "eth_keys.datatypes.PrivateKey", "line_number": 32, "usage_type": "name"}, {"api_name": "eth._utils.transactions.create_transaction_signature", "line_number": 34, "usage_type": "call"}, {"api_name": "eth.abc.TransactionBuilderAPI", "line_number": 48, "usage_type": "name"}, {"api_name": "rlp.exceptions.DeserializationError", "line_number": 61, "usage_type": "call"}, {"api_name": "eth_utils.to_int", "line_number": 67, "usage_type": "call"}, {"api_name": "eth.exceptions.UnrecognizedTransactionType", "line_number": 69, "usage_type": "call"}, {"api_name": "eth.exceptions.UnrecognizedTransactionType", "line_number": 71, "usage_type": "call"}, {"api_name": "rlp.exceptions.DeserializationError", "line_number": 73, "usage_type": "call"}, {"api_name": "eth.abc.SignedTransactionAPI", "line_number": 59, "usage_type": "name"}, {"api_name": "eth.abc.SignedTransactionAPI", "line_number": 81, "usage_type": "name"}, {"api_name": "eth_typing.Address", "line_number": 90, "usage_type": "name"}, {"api_name": "eth.abc.UnsignedTransactionAPI", "line_number": 92, "usage_type": "name"}, {"api_name": "eth_typing.Address", "line_number": 101, "usage_type": "name"}, {"api_name": "eth.abc.SignedTransactionAPI", "line_number": 106, "usage_type": "name"}]} +{"seq_id": "174983025", "text": "\"\"\"\nCopyright 2020 Ye Bai by1993@qq.com\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport os\nimport argparse\nimport logging\nimport yaml\nimport torch\nfrom torch.utils.data import DataLoader\n\nimport utils\nfrom dataload import datasets, samplers, data_utils, collates\n\n\nif \"LAS_LOG_LEVEL\" in os.environ:\n LOG_LEVEL = os.environ[\"LAS_LOG_LEVEL\"]\nelse:\n LOG_LEVEL = \"INFO\"\nif LOG_LEVEL == \"DEBUG\":\n logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s')\nelse:\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"\"\"\n Usage: train.py \"\"\")\n parser.add_argument(\"config\", help=\"path to config file\")\n parser.add_argument('--type', type=str, default='pretrain',\n help='Continue training from last_model.pt.')\n parser.add_argument('--continue-training', type=utils.str2bool, default=False,\n help='Continue training from last_model.pt.')\n args = parser.parse_args()\n return args\n\n\n\nif __name__ == \"__main__\":\n timer = utils.Timer()\n\n args = get_args()\n timer.tic()\n config = utils.AttrDict(yaml.load(open(args.config)))\n dataconfig = config[\"data\"]\n trainingconfig = config[\"training\"]\n modelconfig = config[\"model\"]\n feat_range = [int(i) for i in dataconfig['feat_range'].split(',')]\n\n ngpu = 1\n if \"multi_gpu\" in trainingconfig and trainingconfig[\"multi_gpu\"] == True:\n ngpu = torch.cuda.device_count()\n\n if args.type == 'pretrain':\n training_set = datasets.SpeechDataset(dataconfig[\"trainset\"], feat_range=feat_range)\n valid_set = datasets.SpeechDataset(dataconfig[\"devset\"], reverse=True, feat_range=feat_range)\n trainingsampler = samplers.TimeBasedSampler(training_set, trainingconfig[\"batch_time\"]*ngpu, ngpu, shuffle=True)\n validsampler = samplers.TimeBasedSampler(valid_set, trainingconfig[\"batch_time\"]*ngpu, ngpu, shuffle=False) # for plot longer utterance\n\n tr_loader = DataLoader(training_set, collate_fn=collates.waveCollate,\n batch_sampler=trainingsampler, shuffle=False,\n num_workers=dataconfig[\"fetchworker_num\"])\n cv_loader = DataLoader(valid_set, collate_fn=collates.waveCollate,\n batch_sampler=validsampler, shuffle=False,\n num_workers=dataconfig[\"fetchworker_num\"])\n\n from frameworks.CPC_Models import CPC_Model as Model\n from solvers import CPC_Solver as Solver\n\n model = Model.create_model(modelconfig['sp'], modelconfig['cpc'])\n\n elif args.type == 'finetune':\n\n tokenizer = data_utils.SubwordTokenizer(dataconfig[\"vocab_path\"], add_blk=modelconfig['add_blk'])\n modelconfig[\"decoder\"][\"vocab_size\"] = tokenizer.unit_num()\n label_range = [int(i) for i in dataconfig['label_range'].split(',')]\n\n training_set = datasets.SpeechDataset(dataconfig[\"trainset\"], feat_range=feat_range, label_range=label_range)\n valid_set = datasets.SpeechDataset(dataconfig[\"devset\"], reverse=True, feat_range=feat_range, label_range=label_range)\n trainingsampler = samplers.TimeBasedSampler(training_set, trainingconfig[\"batch_time\"]*ngpu, ngpu, shuffle=True)\n validsampler = samplers.TimeBasedSampler(valid_set, trainingconfig[\"batch_time\"]*ngpu, ngpu, shuffle=False) # for plot longer utterance\n collect = collates.WaveSampleCollate(tokenizer, add_eos=modelconfig[\"add_eos\"],\n label_type=trainingconfig[\"label_type\"])\n tr_loader = DataLoader(training_set, collate_fn=collect, batch_sampler=trainingsampler,\n shuffle=False, num_workers=dataconfig[\"fetchworker_num\"])\n cv_loader = DataLoader(valid_set, collate_fn=collect, batch_sampler=validsampler,\n shuffle=False, num_workers=dataconfig[\"fetchworker_num\"])\n\n from frameworks.Speech_Models import GRU_CTC_Model as Model\n from solvers import CTC_Solver as Solver\n\n model = Model.create_model(modelconfig[\"signal\"],\n modelconfig[\"encoder\"],\n modelconfig[\"decoder\"][\"vocab_size\"])\n\n if trainingconfig['load_splayer']:\n logging.info(\"Load pretrained splayer from {}.\".format(trainingconfig[\"load_splayer\"]))\n pkg = torch.load(trainingconfig[\"load_splayer\"])\n model.load_splayer(pkg[\"model\"])\n utils.freeze(model.splayer)\n\n logging.info(\"\\nModel info:\\n{}\".format(model))\n\n if args.continue_training:\n logging.info(\"Load package from {}.\".format(os.path.join(trainingconfig[\"exp_dir\"], \"last.pt\")))\n pkg = torch.load(os.path.join(trainingconfig[\"exp_dir\"], \"last.pt\"))\n model.restore(pkg[\"model\"])\n\n if \"multi_gpu\" in trainingconfig and trainingconfig[\"multi_gpu\"] == True:\n logging.info(\"Let's use {} GPUs!\".format(torch.cuda.device_count()))\n model = torch.nn.DataParallel(model)\n\n if torch.cuda.is_available():\n model = model.cuda()\n\n solver = Solver(model, trainingconfig, tr_loader, cv_loader)\n\n if args.continue_training:\n logging.info(\"Restore solver states...\")\n solver.restore(pkg)\n logging.info(\"Start training...\")\n solver.train()\n logging.info(\"Total time: {:.4f} secs\".format(timer.toc()))\n", "sub_path": "src/train_CPC.py", "file_name": "train_CPC.py", "file_ext": "py", "file_size_in_byte": 6075, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 37, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 41, "usage_type": "call"}, {"api_name": "utils.str2bool", "line_number": 46, "usage_type": "attribute"}, {"api_name": "utils.Timer", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.AttrDict", "line_number": 58, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 66, "usage_type": "attribute"}, {"api_name": "dataload.datasets.SpeechDataset", "line_number": 69, "usage_type": "call"}, {"api_name": "dataload.datasets", "line_number": 69, "usage_type": "name"}, {"api_name": "dataload.datasets.SpeechDataset", "line_number": 70, "usage_type": "call"}, {"api_name": "dataload.datasets", "line_number": 70, "usage_type": "name"}, {"api_name": "dataload.samplers.TimeBasedSampler", "line_number": 71, "usage_type": "call"}, {"api_name": "dataload.samplers", "line_number": 71, "usage_type": "name"}, {"api_name": "dataload.samplers.TimeBasedSampler", "line_number": 72, "usage_type": "call"}, {"api_name": "dataload.samplers", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 74, "usage_type": "call"}, {"api_name": "dataload.collates.waveCollate", "line_number": 74, "usage_type": "attribute"}, {"api_name": "dataload.collates", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 77, "usage_type": "call"}, {"api_name": "dataload.collates.waveCollate", "line_number": 77, "usage_type": "attribute"}, {"api_name": "dataload.collates", "line_number": 77, "usage_type": "name"}, {"api_name": "frameworks.CPC_Models.CPC_Model.create_model", "line_number": 84, "usage_type": "call"}, {"api_name": "frameworks.CPC_Models.CPC_Model", "line_number": 84, "usage_type": "name"}, {"api_name": "dataload.data_utils.SubwordTokenizer", "line_number": 88, "usage_type": "call"}, {"api_name": "dataload.data_utils", "line_number": 88, "usage_type": "name"}, {"api_name": "dataload.datasets.SpeechDataset", "line_number": 92, "usage_type": "call"}, {"api_name": "dataload.datasets", "line_number": 92, "usage_type": "name"}, {"api_name": "dataload.datasets.SpeechDataset", "line_number": 93, "usage_type": "call"}, {"api_name": "dataload.datasets", "line_number": 93, "usage_type": "name"}, {"api_name": "dataload.samplers.TimeBasedSampler", "line_number": 94, "usage_type": "call"}, {"api_name": "dataload.samplers", "line_number": 94, "usage_type": "name"}, {"api_name": "dataload.samplers.TimeBasedSampler", "line_number": 95, "usage_type": "call"}, {"api_name": "dataload.samplers", "line_number": 95, "usage_type": "name"}, {"api_name": "dataload.collates.WaveSampleCollate", "line_number": 96, "usage_type": "call"}, {"api_name": "dataload.collates", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 100, "usage_type": "call"}, {"api_name": "frameworks.Speech_Models.GRU_CTC_Model.create_model", "line_number": 106, "usage_type": "call"}, {"api_name": "frameworks.Speech_Models.GRU_CTC_Model", "line_number": 106, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 112, "usage_type": "call"}, {"api_name": "utils.freeze", "line_number": 114, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 116, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 127, "usage_type": "attribute"}, {"api_name": "solvers.CTC_Solver", "line_number": 130, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 133, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 135, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "48088024", "text": "#!/usr/bin/python\n#-*- coding: utf-8 -*-\nimport sys\nimport subprocess\nfrom utils import COMMON_PARSER, MODIFY_PARSER, get_command\n\ntry:\n args = COMMON_PARSER.parse_args([sys.argv[1]])\n\n if args.command == 'add':\n args = MODIFY_PARSER.parse_args(sys.argv[2:4])\n print(args.type)\n print(args.name)\n handler = __import__(\n '.'.join(['supports', args.type]),\n fromlist=['add'])\n handler.add(sys.argv[4:], args.name)\n elif args.command == 'remove':\n pass\n elif args.command == 'update':\n args = MODIFY_PARSER.parse_args(sys.argv[2:4])\n handler = __import__(\n '.'.join(['supports', args.type]),\n fromlist=['add'])\n handler.update(sys.argv[4:], args.name)\n else:\n cmds = get_command(args.command).split(' ')\n subprocess.call(cmds)\nexcept:\n import traceback\n traceback.print_exc()\n print(\"Failed to execute the command\")\n", "sub_path": "cmdhelper/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 974, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "utils.COMMON_PARSER.parse_args", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.COMMON_PARSER", "line_number": 8, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "utils.MODIFY_PARSER.parse_args", "line_number": 11, "usage_type": "call"}, {"api_name": "utils.MODIFY_PARSER", "line_number": 11, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "utils.MODIFY_PARSER.parse_args", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.MODIFY_PARSER", "line_number": 21, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "utils.get_command", "line_number": 27, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 28, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "415136824", "text": "\"\"\"rename project to team\n\nRevision ID: a23d4d63432d\nRevises: e9f89fb022ef\nCreate Date: 2016-12-26 21:57:55.276849\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a23d4d63432d'\ndown_revision = 'e9f89fb022ef'\nbranch_labels = None\ndepends_on = None\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('teams',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('users_teams',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('team_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['team_id'], ['teams.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], )\n )\n op.drop_table('users_projects')\n op.drop_table('projects')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('projects',\n sa.Column('id', sa.INTEGER(), server_default=sa.text(\"nextval('projects_id_seq'::regclass)\"), nullable=False),\n sa.Column('name', sa.VARCHAR(), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id', name='projects_pkey'),\n sa.UniqueConstraint('name', name='projects_name_key'),\n postgresql_ignore_search_path=False\n )\n op.create_table('users_projects',\n sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.Column('project_id', sa.INTEGER(), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['project_id'], ['projects.id'], name='users_projects_project_id_fkey'),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='users_projects_user_id_fkey')\n )\n op.drop_table('users_teams')\n op.drop_table('teams')\n ### end Alembic commands ###\n", "sub_path": "alembic/versions/a23d4d63432d_rename_project_to_team.py", "file_name": "a23d4d63432d_rename_project_to_team.py", "file_ext": "py", "file_size_in_byte": 1904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "21", "api": [{"api_name": "alembic.op.create_table", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}, {"api_name": "alembic.op.create_table", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 39, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 43, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 46, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 46, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 50, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 52, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 52, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 53, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "290252358", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCutword for sentiment dataset\n\ninput format:\n id sentitype text\n\nUsage:\n cut -t [pos] -c [hidden] -f --template