diff --git "a/1764.jsonl" "b/1764.jsonl" new file mode 100644--- /dev/null +++ "b/1764.jsonl" @@ -0,0 +1,720 @@ +{"seq_id":"151329167","text":"import numpy as np\r\n\r\n\r\ndef get_dataset(npz_path, use_cath=True):\r\n raw_data_path = npz_path.parent / npz_path.name.replace('.npz', '.npy')\r\n ids_path = npz_path.parent / npz_path.name.replace('.npz', '_ids.txt')\r\n\r\n # Load raw data from two seperate files if already created\r\n # IDs are stored as txt and embeddings are stored as npz (uncompressed)\r\n # Row-Indices in .npz correspond to line number in IDs file to keep track of ID-Embedding pairs\r\n if raw_data_path.is_file() and ids_path.is_file():\r\n raw_data = np.load(raw_data_path)\r\n with open(ids_path, 'r') as id_f:\r\n ids = [line.strip() for line in id_f]\r\n dataset = {seq_id: np.expand_dims(raw_data[idx], axis=0)\r\n for idx, seq_id in enumerate(ids)}\r\n\r\n # Otherwise, if only npy file (compressed dictionary) exists:\r\n # Load dictionary, split Key/Value pairs and write Keys as txt and\r\n # concatenated embeddings as npz\r\n else:\r\n dataset = dict(np.load(npz_path, mmap_mode='r'))\r\n ids, raw_data = zip(*dataset.items())\r\n\r\n _write_files(ids_path, raw_data_path, ids, raw_data, use_cath)\r\n dataset = {seq_id: np.expand_dims(embd, axis=0)\r\n for seq_id, embd in dataset.items()}\r\n\r\n return dataset\r\n\r\n\r\ndef get_dataset_uncompressed(npy_file, id_file):\r\n raw_data = np.load(npy_file)\r\n with open(id_file, 'r') as read_in:\r\n ids = [line.strip() for line in read_in]\r\n\r\n dataset = {seq_id: np.expand_dims(raw_data[idx], axis=0)\r\n for idx, seq_id in enumerate(ids)}\r\n\r\n return dataset\r\n\r\n\r\ndef write_dataset_speedup(path, data, use_cath=False):\r\n ids, raw_data = zip(*data.items())\r\n\r\n raw_data_path = path.parent / path.name.replace('.npz', '.npy')\r\n ids_path = path.parent / path.name.replace('.npz', '_ids.txt')\r\n\r\n _write_files(ids_path, raw_data_path, ids, raw_data, use_cath)\r\n\r\n\r\ndef _write_files(ids_path, raw_data_path, ids, raw_data, use_cath=False):\r\n\r\n with open(ids_path, 'w+') as id_f:\r\n for seq_id in ids:\r\n if use_cath:\r\n seq_id = seq_id.split('|')[2].split('/')[0]\r\n id_f.write(seq_id + '\\n')\r\n np.save(raw_data_path, raw_data)\r\n","sub_path":"npy2npz.py","file_name":"npy2npz.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"225935326","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 11 13:42:12 2017\n\n@author: jeromescelza\n\"\"\"\n\nimport pymysql\nimport requests\nimport json\nimport pprint\nimport pandas as pd\nimport datetime\nimport math\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nfrom IPython.display import display\nimport time\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n\n#pd.set_option('display.height', 200cd\n#pd.set_option('display.max_rows', 800)\n\nconn = pymysql.connect(host='127.0.0.1', port=3000, user='jeroscel', passwd='Defense43*55', db='device_ingest')\n\ncur = conn.cursor()\n\n#View all TABLE names in MySQL db\ncur.execute('USE device_ingest')\ncur.execute(\"SHOW TABLES\")\ntables = cur.fetchall()\n\n\nsub_int = [52, 53, 55, 56, 57, 58]\nnow = datetime.datetime.now()\nstrt_date_temp = datetime.datetime(now.year, now.month, now.day, hour=14)\nsub_strt_date = strt_date_temp - datetime.timedelta(days=4)\nsub_stp_date = sub_strt_date + datetime.timedelta(days=1)\n\n\n\n\nfitbitD = pd.read_sql('SELECT * FROM fitbit_sleep_logs', conn)\nposition_fit = -1\nsubplot_count = 111\ni = 55\n\nfits = fitbitD.loc[(fitbitD['subject_id'] == i)]\nfit_range = fits['date_of_sleep_datetime']\nfit_duration = fits.loc[((fit_range >= str(sub_strt_date)) & (fit_range <= str(sub_stp_date)))]\nfit_duration.index = range(0,len(fit_duration))\n\nfitbit_sleep_metrics = ['minutes_asleep', 'minutes_awake', 'time_in_bed', 'restless_duration', 'total_minutes_asleep']\n\nfitbit_duration_check = fit_duration['sleep_duration']\ncounter = 0\nposition_fit = position_fit + 1\n \nif i in fitbit_duration_check <= 18000000 or fitbit_duration_check.empty:\n# compliance_list.iloc[position_fit,4] = 'FAIL'\n print('FAIL')\nelse:\n# compliance_list.iloc[position_fit,4] = 'PASS'\n print(\"PASS\")\n x = range(0,len(fitbit_sleep_metrics))\n y = fit_duration.iloc[0][fitbit_sleep_metrics]\n plt.bar(x, y)\n plt.title('Fitbit Summary Subject %d' % i)\n my_xticks = fitbit_sleep_metrics\n plt.xticks(x, my_xticks, rotation=45)\n \n subplot_count = subplot_count + 1\n \n plt.show()","sub_path":"fitbit_working.py","file_name":"fitbit_working.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"119396794","text":"__author__ = \"Manoel Horta Ribeiro\"\n__email__ = \"manoelhortaribeiro@gmail.com\"\n\nimport argparse\nimport json\nimport os\n\nparser = argparse.ArgumentParser(description=\"\"\"This script creates the folder structure for the other YouTube loader \n scripts. Optionally, it receives an YouTube v3 API key which will be\n used by some of the scripts.\"\"\")\n\nparser.add_argument(\"--apikey\", dest=\"apikey\", type=str, default=\"Your API key here\",\n help=\"YouTube v3 API key.\")\n\nparser.add_argument(\"--apikeydst\", dest=\"keydst\", type=str, default=\"./data/youtube/api_key.json\",\n help=\"YouTube v3 API key path.\")\n\nparser.add_argument(\"--cm\", dest=\"cm\", type=str, default=\"./data/youtube/cm/\",\n help=\"Folder to store the comments.\")\n\nparser.add_argument(\"--cp\", dest=\"cp\", type=str, default=\"./data/youtube/cp/\",\n help=\"Folder to store the captions.\")\n\nparser.add_argument(\"--rc\", dest=\"rc\", type=str, default=\"./data/youtube/rc/\",\n help=\"Folder to store the graphs of recommended videos.\")\n\nparser.add_argument(\"--yt\", dest=\"yt\", type=str, default=\"./data/youtube/yt/\",\n help=\"Folder to store stats and info about videos.\")\n\nparser.add_argument(\"--vd\", dest=\"vd\", type=str, default=\"./data/youtube/vd/\",\n help=\"Folder to store recommended videos.\")\nargs = parser.parse_args()\n\nos.makedirs(args.cm, exist_ok=True)\nos.makedirs(args.cp, exist_ok=True)\nos.makedirs(args.rc, exist_ok=True)\nos.makedirs(args.yt, exist_ok=True)\nos.makedirs(args.vd, exist_ok=True)\n\nwith open(args.keydst, \"w\") as f:\n json.dump({\"key\": args.apikey}, f)\n","sub_path":"youtube_tools/loader_setup.py","file_name":"loader_setup.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"487672156","text":"import jieba\nfrom gensim.models import Word2Vec\nimport numpy as np\nimport os.path\nimport math\nfrom sklearn.cluster import KMeans\nfrom collections import defaultdict\n\ndef load_corpus(path):\n corpus = []\n with open(path, encoding='utf8') as f:\n for line in f:\n corpus.append(jieba.lcut(line))\n return corpus\n\ndef train_word2vec_model(corpus, dim=100):\n model = Word2Vec(corpus, vector_size=dim)\n model.save('model.w2v')\n return model\n\ndef load_sentences(path):\n sentences = set()\n with open(path, encoding='utf8') as f:\n for line in f:\n sentences.add(\" \".join(jieba.cut(line.strip())))\n return sentences\n\ndef sentences_to_vector(sentences, model):\n vectors = []\n for sentence in sentences:\n words = sentence.split()\n vector = np.zeros(model.vector_size)\n for word in words:\n try:\n vector += model.wv[word]\n except KeyError:\n vector += np.zeros(model.vector_size)\n vectors.append(vector / len(words))\n return np.array(vectors)\n\n\ndef main():\n # 检查当前目录下词向量模型是否存在\n if os.path.isfile('model.w2v'):\n model = Word2Vec.load('model.w2v')\n else:\n corpus = load_corpus('corpus.txt')\n model = train_word2vec_model(corpus)\n\n sentences = load_sentences('titles.txt')\n vectors = sentences_to_vector(sentences, model)\n\n n_clusters = int(math.sqrt(len(sentences)))\n kmeans = KMeans(n_clusters)\n kmeans.fit(vectors)\n\n sentences_label_dict = defaultdict(list)\n for sentence, label in zip(sentences, kmeans.labels_):\n sentences_label_dict[label].append(sentence)\n for label, sentences in sentences_label_dict.items():\n print('cluster id is %d' % label)\n for i in range(min(4, len(sentences))):\n print(sentences[i].replace(\" \", \"\"))\n print(\"----------\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"57-小关-杭州/week4/homework_week4.py","file_name":"homework_week4.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"149708357","text":"from pynput.keyboard import Listener\nimport time\nimport threading\nimport webbrowser\n\nclass ComboListener:\n\n def __init__(self):\n self.cur_keys = []\n # 修改keymap,快捷键组合对应的是URL\n self.keymap = {\n 'gh': 'https://github.com/',\n 'wk': 'https://www.wikipedia.org/'\n }\n self._run()\n\n def _on_press(self, key):\n try:\n self.cur_keys.append(key.char)\n\n except AttributeError:\n self.cur_keys.append(key.name)\n\n def _cleaner(self):\n while True:\n time.sleep(0.7)\n self.cur_keys.clear()\n\n def _run(self):\n l = Listener(on_press=self._on_press)\n l.daemon = True\n l.start()\n\n t = threading.Thread(target=self._cleaner)\n t.daemon = True\n t.start()\n\n def get_combo(self):\n if len(self.cur_keys) >= 2:\n combo = self.cur_keys[-2:]\n # [a,a,a,a]\n return combo\n\n # 修改解析快捷键的模块,当监测到特定快捷键组合,则利用webbrowser打开指定的URL\n def parsed_combo(self):\n combo = self.get_combo()\n if combo:\n key = ''.join(combo)\n if key in self.keymap.keys():\n # 使用上一课用到的webbrowser,打开一个URL\n webbrowser.open_new_tab(self.keymap[key])\n print(\"URL has been opened. {}\".format(self.keymap[key]))\n # 当URL成功开启后,清空cur_keys,防止因程序运行过快而打开多个同样的页面\n self.cur_keys.clear()\n\nif __name__ == \"__main__\":\n cl = ComboListener()\n while True:\n # 在while True循环中,只需要运行combo监测函数即可\n cl.parsed_combo()\n\n","sub_path":"实用主义学习文档/04脚本/9.定制快捷键输入.py","file_name":"9.定制快捷键输入.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"6293273","text":"import pprint\nimport maya.cmds as cmds\n\ndef create_layer_override(node, attr_dict, layer):\n \n overide_attr_list = ['{0}.{1}'.format(node, attr) for attr in attr_dict.keys()]\n \n # create layer override\n pm.editRenderLayerAdjustment(overide_attr_list, layer=layer) \n \n for attr, value in attr_dict.iteritems():\n #print(attr, value)\n pm.setAttr('{0}.{1}'.format(node, attr), value)\n \n \n\ndef set_attr(node, attr_dict, layer):\n \n for attr, value in attr_dict.iteritems():\n pm.setAttr('{0}.{1}'.format(node, attr), value)\n \n \n\ndef set_render_element_override(render_element_dict, override_dict):\n \n for name, value in override_dict.iteritems():\n \n render_element = render_element_dict.get(name)\n \n if render_element is None:\n pm.warning('did not find render element, continue')\n continue\n \n try:\n render_element = pm.PyNode(render_element)\n \n except pm.MayaNodeError as e:\n pm.warning(e) \n \n # create layer override\n pm.editRenderLayerAdjustment('{0}.enabled'.format(render_element))\n\n # set attr \n render_element.enabled.set(value)\n \n\ndef setup_render_layers():\n\n # just a remainder to assign the environt material to the environt on the truck and trailer layer.\n try:\n environment_material = pm.PyNode('environment_material')\n \n except pm.MayaNodeError as e:\n pm.warning('Please make sure that a vray material called environment_material exists. White diffuse, no reflection')\n return\n \n if not isinstance(environment_material, pm.nodetypes.VRayMtl):\n pm.warning('Please make sure that a vray material called environment_material exists. White diffuse, no reflection')\n return\n \n \n \n render_element_string_dict = {\n 'multimatte_1':'MultiMatteElement',\n 'multimatte_2':'MultiMatteElement',\n 'multimatte_3':'MultiMatteElement',\n 'multimatte_4':'MultiMatteElement',\n 'lighting':'lightingChannel',\n 'gi':'giChannel',\n 'reflection':'reflectChannel',\n 'refraction':'refractChannel',\n 'specular':'specularChannel',\n 'contact_ao':'ExtraTexElement'}\n \n \n \n # add render elements\n render_element_dict = {}\n \n for render_element_name, class_name in render_element_string_dict.iteritems():\n render_element_unicode = mel.eval('vrayAddRenderElement {0}'.format(class_name))\n render_element = pm.PyNode(render_element_unicode)\n render_element_dict[render_element_name] = render_element\n render_element.setName(render_element_name)\n \n if class_name == 'MultiMatteElement':\n render_element.vray_name_multimatte.set(render_element_name)\n \n \n \n # set multimatte attr\n multimatte_list = []\n multimatte_list.append(render_element_dict.get('multimatte_1'))\n multimatte_list.append(render_element_dict.get('multimatte_2'))\n multimatte_list.append(render_element_dict.get('multimatte_3'))\n multimatte_list.append(render_element_dict.get('multimatte_4'))\n \n for index, multimatte in enumerate(multimatte_list):\n \n multimatte.vray_usematid_multimatte.set(1)\n multimatte.vray_redid_multimatte.set(index*3+1)\n multimatte.vray_greenid_multimatte.set(index*3+2)\n multimatte.vray_blueid_multimatte.set(index*3+3)\n \n \n # add env render layer\n pm.select('|bridge_main_grp')\n env_render_layer = pm.createRenderLayer(name='env')\n \n \n # add truck render layer\n pm.select('P2952_FH13_Enhanced_Plus', '|bridge_main_grp', 'ref_trailer_2952_v003:trailer_main_grp', 'VRayLightDome1')\n truck_trailer_render_layer = pm.createRenderLayer(name='truck_trailer')\n \n \n # add contact ao render layer\n pm.select('P2952_FH13_Enhanced_Plus', '|bridge_main_grp|render_road_grp', 'ref_trailer_2952_v003:trailer_main_grp')\n contact_ao_render_layer = pm.createRenderLayer(name='contact_ao')\n \n \n \n \n \n # truck_trailer_render_layer\n \n truck_trailer_render_layer_vray_op_bridge_garage_dict = {\n 'giVisibility':0,\n 'primaryVisibility':0,\n 'useIrradianceMap':0,\n 'generateGI':0,\n 'receiveGI':0,\n 'generateCaustics':0,\n 'receiveCaustics':0}\n \n \n truck_trailer_render_layer_vray_op_render_road_dict = {\n 'matteSurface':1,\n 'alphaContribution':-1}\n \n \n truck_trailer_render_layer_render_element_dict = {\n 'multimatte_1':1,\n 'multimatte_2':1,\n 'multimatte_3':1,\n 'multimatte_4':1,\n 'lighting':1,\n 'gi':1,\n 'reflection':1,\n 'refraction':1,\n 'specular':1,\n 'contact_ao':0}\n \n \n # contact_ao_render_layer\n \n contact_ao_render_layer_multi_op_dict = {\n 'giVisibility':0,\n 'primaryVisibility':0,\n 'useIrradianceMap':0,\n 'generateGI':0,\n 'receiveGI':0,\n 'generateCaustics':0,\n 'receiveCaustics':0,\n 'giVisibility':0,\n 'primaryVisibility':0,\n 'reflectionVisibility':0,\n 'refractionVisibility':0,\n 'shadowVisibility':0}\n \n contact_ao_render_layer_render_element_dict = {\n 'multimatte_1':0,\n 'multimatte_2':0,\n 'multimatte_3':0,\n 'multimatte_4':0,\n 'lighting':0,\n 'gi':0,\n 'reflection':0,\n 'refraction':0,\n 'specular':0,\n 'contact_ao':1}\n \n \n # env_render_layer\n \n env_render_layer_render_element_dict = {\n 'multimatte_1':0,\n 'multimatte_2':0,\n 'multimatte_3':0,\n 'multimatte_4':0,\n 'lighting':0,\n 'gi':0,\n 'reflection':0,\n 'refraction':0,\n 'specular':0,\n 'contact_ao':0}\n \n \n \n \n # create overrides\n # ----------------------------------------------------\n \n # truck_trailer_render_layer\n \n pm.editRenderLayerGlobals(currentRenderLayer=truck_trailer_render_layer)\n \n \n create_layer_override( node = 'vray_op_bridge_garage',\n attr_dict = truck_trailer_render_layer_vray_op_bridge_garage_dict, \n layer = truck_trailer_render_layer)\n \n \n create_layer_override( node='vray_op_render_road',\n attr_dict = truck_trailer_render_layer_vray_op_render_road_dict, \n layer=truck_trailer_render_layer)\n \n \n set_render_element_override(render_element_dict=render_element_dict, override_dict=truck_trailer_render_layer_render_element_dict)\n \n \n # contact_ao_render_layer\n \n pm.editRenderLayerGlobals(currentRenderLayer=contact_ao_render_layer)\n \n create_layer_override( node='vray_op_FH13_Enhanced_Plus_body',\n attr_dict = contact_ao_render_layer_multi_op_dict,\n layer=contact_ao_render_layer)\n \n create_layer_override( node='vray_op_FH13_Enhanced_Plus_wheels',\n attr_dict = contact_ao_render_layer_multi_op_dict,\n layer=contact_ao_render_layer)\n \n create_layer_override( node='vray_op_FH13_Enhanced_Plus_headlamps',\n attr_dict = contact_ao_render_layer_multi_op_dict,\n layer=contact_ao_render_layer)\n \n create_layer_override( node='vray_op_FH13_Enhanced_Plus_windshield',\n attr_dict = contact_ao_render_layer_multi_op_dict,\n layer=contact_ao_render_layer)\n \n \n create_layer_override( node='ref_trailer_2952_v003:vray_op_trailer_body',\n attr_dict = contact_ao_render_layer_multi_op_dict,\n layer=contact_ao_render_layer)\n \n \n create_layer_override( node='ref_trailer_2952_v003:vray_op_trailer_wheels',\n attr_dict = contact_ao_render_layer_multi_op_dict,\n layer=contact_ao_render_layer)\n \n \n set_render_element_override(render_element_dict=render_element_dict, override_dict=contact_ao_render_layer_render_element_dict)\n \n \n \n # contact_ao_render_layer\n \n pm.editRenderLayerGlobals(currentRenderLayer=env_render_layer)\n set_render_element_override(render_element_dict=render_element_dict, override_dict=env_render_layer_render_element_dict)\n \n \n #pprint.pprint(dir(contact_ao_render_layer))\n #pprint.pprint(contact_ao_render_layer.listAdjustments())\n\n\nsetup_render_layers()","sub_path":"petfactory/rendering/vray/setup_render_layer.py","file_name":"setup_render_layer.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"240969900","text":"import pytest\nfrom nougat import TestClient\nfrom nougat_router import *\nfrom functools import wraps\n\n\nclass TestFeature:\n\n @pytest.mark.asyncio\n async def test_format_int(self, app, router, port):\n\n class MainRestRouting(RestRouting):\n\n @get('/')\n async def static_route(self):\n return 0\n\n router.add(MainRestRouting)\n app.use(router)\n\n async with TestClient(app, port) as client:\n res = await client.get('/?name=world')\n assert res.text == '0'\n\n res = await client.get('/')\n assert res.text == '0'\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"519825598","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n\"\"\"Collision detection\"\"\"\r\nimport pygame\r\nfrom pygame.locals import *\r\nimport sys\r\n\r\n# 画面サイズ\r\nSCREEN_W, SCREEN_H = 640, 480\r\n\r\n# マウスカーソルサイズ\r\nMOUSE_CURSOR_SIZE = 10\r\n\r\n# 句形サイズ\r\nRECT_SIZE = 30\r\n\r\n\r\nclass Mouse:\r\n \"\"\"マウスクラス\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"コンストラクタ\"\"\"\r\n\r\n def get_pos(self):\r\n \"\"\"マウス位置取得\"\"\"\r\n return pygame.mouse.get_pos()\r\n\r\n def get_rect(self):\r\n \"\"\"マウス座標を矩形で取得\"\"\"\r\n x, y = pygame.mouse.get_pos()\r\n return Rect(x, y, 1, 1)\r\n\r\n\r\nclass CollideRect():\r\n \"\"\"矩形クラス\"\"\"\r\n\r\n def __init__(self, x, y, w, h, mouse):\r\n \"\"\"コンストラクタ\"\"\"\r\n self.rect = Rect(x, y, w, h)\r\n self.mouse = mouse\r\n\r\n def update(self):\r\n \"\"\"状態の更新\"\"\"\r\n if self.rect.left < 0 or self.rect.right > SCREEN_W:\r\n self.vx = -self.vx\r\n if self.rect.top < 0 or self.rect.bottom > SCREEN_H:\r\n self.vy = -self.vy\r\n # 画面からはみ出さないようにする\r\n self.rect = self.rect.clamp(Rect(0, 0, SCREEN_W, SCREEN_H))\r\n\r\n # 衝突検出\r\n if self.rect.colliderect(self.mouse.get_rect()):\r\n self.color = (200, 100, 100)\r\n else:\r\n self.color = (100, 100, 100)\r\n\r\n def draw(self, screen):\r\n \"\"\"描画\"\"\"\r\n pygame.draw.rect(screen, self.color, self.rect)\r\n\r\n\r\nclass Main:\r\n \"\"\"メインクラス\"\"\"\r\n rects = []\r\n\r\n def __init__(self):\r\n \"\"\"初期化\"\"\"\r\n # pygame初期化\r\n pygame.init()\r\n\r\n # 画面サイズを設定\r\n screen = pygame.display.set_mode((SCREEN_W, SCREEN_H))\r\n\r\n # タイトルバーの文字列を設定\r\n pygame.display.set_caption(u\"衝突検出\")\r\n\r\n # ゲームの初期化\r\n self.init_game()\r\n\r\n # clockオブジェクトの取得\r\n clock = pygame.time.Clock()\r\n\r\n # ゲームループ\r\n while True:\r\n # FPSを60に固定\r\n clock.tick(60)\r\n # 画面を青色で塗りつぶす\r\n screen.fill((0, 0, 0))\r\n self.update()\r\n self.draw(screen)\r\n # 画面を更新\r\n pygame.display.update()\r\n self.key_handler()\r\n\r\n def init_game(self):\r\n \"\"\"ゲーム初期化\"\"\"\r\n # マウスカーソル\r\n self.mouse = Mouse()\r\n\r\n # 画面を矩形で埋める\r\n for i in range(0, 20):\r\n for j in range(0, 15):\r\n x = i * 32\r\n y = j * 32\r\n cr = CollideRect(x+1, y+1, RECT_SIZE, RECT_SIZE, self.mouse)\r\n self.rects.append(cr)\r\n\r\n def update(self):\r\n \"\"\"ゲーム更新\"\"\"\r\n # オブジェクト更新\r\n for obj in self.rects:\r\n obj.update()\r\n\r\n def draw(self, screen):\r\n \"\"\"ゲーム描画\"\"\"\r\n for obj in self.rects:\r\n obj.draw(screen)\r\n\r\n # 句形の描画\r\n x, y = self.mouse.get_pos()\r\n pygame.draw.rect( \\\r\n screen, (255, 255, 255), \\\r\n Rect(x - MOUSE_CURSOR_SIZE // 2, \\\r\n y - MOUSE_CURSOR_SIZE // 2, \\\r\n MOUSE_CURSOR_SIZE, MOUSE_CURSOR_SIZE))\r\n\r\n def key_handler(self):\r\n \"\"\"キーハンドラ\"\"\"\r\n for event in pygame.event.get():\r\n # 終了イベント\r\n if event.type == QUIT:\r\n sys.exit()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\"エントリポイント\"\"\"\r\n Main()\r\n\r\n","sub_path":"15_collision_detection.py","file_name":"15_collision_detection.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"346866695","text":"import sys, os\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport numpy as np\nimport spectra\nfrom numpy.polynomial import polynomial as P\nfrom scipy.signal import find_peaks_cwt\nfrom scipy.interpolate import interp1d\n\nREF_CALIB = 4600.0\nTSCALE = 0\n\nNOISE_THRESHOLD = 5.5 #5.5\nstop = False\n\nR_0 = 8.0 # kpc - GC-Sun distance\nV_0 = 200 # km / sun's velocity about GC\n\nrotation_velocities = {}\nwith open('data/rot_values.txt', 'r') as f:\n for l in f.readlines():\n n,v = l.split()\n rotation_velocities[n] = float(v)\n\n\ndef get_max_velocity(spec):\n global NOISE_THRESHOLD, stop, TSCALE\n stop = False\n def min_reached(x,y):\n global stop\n if stop: return False\n if y > NOISE_THRESHOLD and x < 300:\n return True\n if x > 0:\n stop = True\n return False\n spec_norm = [(x,TSCALE*(y-P.polyval(x,spec['baseline']))) for (x,y) in spec['smooth_normed']]\n x_err = spectra.smooth_err([x for (x,y) in spec['original']])\n\n return max([(x, x_err[i]) for i, (x,y) in enumerate(spec_norm) if min_reached(x,y)], key=lambda x: x[0])\n\n\ndef main(prefix):\n global TSCALE, V_0, R_0\n calib_spec = \"MWR_S7\"\n print (\"setting up calibration...(%s)\" % calib_spec)\n calibspec = spectra.load_clean(calib_spec)\n integral = calibspec['integral']\n print (\"Integral: %5.2f \" % integral)\n TSCALE = REF_CALIB/float(integral)\n\n print (TSCALE)\n\n print (\"reading spectra...\")\n files = spectra.list_clean()\n\n rotspec_files = [f for f in files if f.startswith(prefix) or f.startswith('Rot')]\n rotspecs = spectra.loads_clean(rotspec_files)\n\n rotation_data = []\n\n smoothing_window = int(input('smooting window = '))\n\n for spec in rotspecs:\n if 'S7' in spec['source']: continue\n print(spec['source'])\n print('\\tl=%s, b=%s' % spec['coords'])\n\n l,b = spec['coords']\n l,b = float(l),float(b)\n d = 8.5 * np.sin( np.deg2rad(float(l)))\n\n vel,err = get_max_velocity(spec)\n\n x, y = zip(*spec['original'])\n x_s, y_s = x, spectra.smooth(y, window=smoothing_window)-P.polyval(x,spec['baseline'])\n dy_s = spectra.smooth_err(y,window=smoothing_window)\n peak_idxs = find_peaks_cwt(y_s, np.arange(10, 20), noise_perc=20)\n\n res = [(x_s[idx], y_s[idx]) for idx in peak_idxs if -200 < x[idx] < 200]\n if len(res) > 0:\n x_p, y_p = zip(*res)\n\n m_x,m_y = max(res, key=lambda k: k[1])\n for i, (x1, y1) in enumerate(zip(x_s,y_s)):\n if x1 < m_x:\n continue\n if y1/m_y < 0.025:\n if y_s[i+20]/m_y < 0.025:\n vel = x1\n break\n #vel = m_x\n else:\n x_p,y_p=[],[]\n plt.figure(figsize=(12,5))\n plt.fill_between(x, y_s-dy_s,y_s+dy_s, alpha=0.4, color='b')\n plt.plot(x,y_s,'-k')\n plt.plot(x_p,y_p,'dr')\n #plt.show()\n\n vel = rotation_velocities[spec['source']]#float(input('where is max vel? '))\n # now find error value\n closest_peak = min([(x1, y1, abs(x1-vel)) for x1,y1 in zip(x_p,y_p)], key=lambda k: k[-1])\n err = closest_peak[-1]/2.\n # find distance from 25% maximum\n for i, (x1,y1) in enumerate(zip(x_s, y_s)):\n if y1/closest_peak[1] < 0.25 and x1 > closest_peak[0]:\n err = abs(x1-closest_peak[0])/2.\n break\n\n plt.axhline(0, xmin=-200, xmax=200, linestyle='--',color='r')\n plt.axvline(vel, linestyle='--',color='r')\n plt.ylim(-1,200)\n plt.title('rotspec %s,%s d=%5.2f datapoint clean plot - %s (%5.1f km/s)' % (spec['coords'][0], spec['coords'][-1],d, spec['source'], vel))\n plt.savefig('rotspecs/clean/%s.png' % spec['source'])\n plt.cla()\n\n\n rotation_velocity = lambda v_rmax: v_rmax + V_0 * np.sin(np.deg2rad(l))\n\n\n rotation_data.append((d, rotation_velocity(vel), err))\n print('\\td=%5.1f, v=%5.1f +- %5.1f' % (d,vel,err))\n\n\n x,y,e = zip(*sorted(rotation_data, key=lambda k: k[0]))\n\n fig, ax = plt.subplots(figsize=(10,6))\n (_, caps, _) = ax.errorbar(x,y,e, fmt='o', markersize=5, color='k', capsize=4, label='measured')\n for cap in caps:\n cap.set_markeredgewidth(1)\n\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1.0))\n ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))\n ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))\n f2 = interp1d(x, y, kind='cubic')\n ax.plot(x, f2(x), '-k')\n\n\n ax.set_xlabel('Distance from Galactic Center r [kpc]')\n ax.set_ylabel('Velocity v [km/s]')\n ax.set_title('Rotation of Neutral Hydrogen in the Milky Way')\n ax.grid()\n ax.legend()\n\n plt.savefig('rotspecs/final.png')\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n main(sys.argv[1])\n else:\n print (\"usage: python3 rotspec.py [source prefix]\")\n","sub_path":"analysis/rotspec.py","file_name":"rotspec.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"159568980","text":"#!/usr/bin/python3\n## (C) COPYRIGHT Ingenic Limited.\n## ALL RIGHTS RESERVED\n##\n## File : auto_flush.py\n## Authors : zhluo@aries\n## Create Time: 2017-09-12:10:00:54\n## Description:\n## \n##\nfrom time import sleep\nimport sys\nimport time\n\nfor i in range(10):\n sys.stdout.write(' \\r') # clear screen\n sys.stdout.write(str(10-i)+\"\\r\")\n #sleep(1)\n\ndef progress(width, percent):\n print (\"%s %d%%\\r\" % (('%%-%ds' % width) % (width * percent / 100 * '='), percent))\n if percent >= 100:\n print\n sys.stdout.flush()\n \nfor i in range(100):\n progress(100, (i+1))\n time.sleep(1)\n","sub_path":"python/auto_flush.py","file_name":"auto_flush.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"57849438","text":"\"\"\"\nModule for cleaning main dataframe.\n\"\"\"\nimport pandas\nimport pandas as pd\n\nfrom settings import config\nfrom settings.config import reports\n\n\ndef main():\n cd = CleaningData()\n cd.delete_extra_cols(config.extra_cols)\n cd.delete_rows_without_target(config.target_cols[0])\n cd.delete_empty_cols()\n cd.filling_missing_data()\n cd.delete_empty_rows()\n cd.delete_type_object_cols()\n cd.save_data()\n\n\nclass CleaningData:\n\n def __init__(self):\n self.dataframe = pandas.read_parquet(reports['RawData'])\n\n def delete_extra_cols(self, cols: list) -> pd.DataFrame:\n \"\"\"\n Deletes extra columns from dataset.\n :param cols: list of columns for delete\n :return: Dataframe without extra cols\n \"\"\"\n try:\n print('data_cleaning.py: Delete extra cols from config.extra_cols list...')\n self.dataframe = self.dataframe.drop(cols, axis=1)\n return self.dataframe\n except IndexError:\n print('error: Cols not in index of cols')\n\n def delete_rows_without_target(self, target) -> None:\n \"\"\"\n Removes lines with no target value.\n :param target: target cols\n :return: None\n \"\"\"\n self.dataframe[target] = self.dataframe[target].astype(float)\n print('data_cleaning.py: Delete rows without target...')\n self.dataframe = self.dataframe[\n self.dataframe[target].notnull()\n ]\n\n def delete_empty_cols(self) -> pd.DataFrame:\n \"\"\"\n Removes columns with poorly populated data.\n :return: Dataframe with reach populated cols\n \"\"\"\n full_cols = []\n for col in self.dataframe.columns:\n if self.dataframe[col].isnull().sum() / len(self.dataframe) \\\n < config.BAD_FULLNESS_RATE:\n full_cols.append(col)\n print('data_cleaning.py: Delete empty cols...')\n self.dataframe = self.dataframe[full_cols]\n return self.dataframe\n\n def filling_missing_data(self) -> pd.DataFrame:\n \"\"\"\n Filling in missing data based on available data.\n :return: Dataframe with reach populated rows\n \"\"\"\n print('data_cleaning.py: Filing missing data...')\n self.dataframe = self.dataframe.fillna(method='backfill')\n return self.dataframe\n\n def delete_empty_rows(self) -> pd.DataFrame:\n \"\"\"\n Removes rows with Nan.\n :return: Dataframe with reach populated rows\n \"\"\"\n print('data_cleaning.py: Delete empty rows...')\n self.dataframe = self.dataframe.dropna(axis=0)\n return self.dataframe\n\n def cols_to_datetime(self, cols: list) -> pd.DataFrame:\n \"\"\"\n Transforming objects of dataframes to datetime.\n :param cols: List of objects cols\n :return: None\n \"\"\"\n for col in cols:\n try:\n print('data_cleaning.py: Cols to datetime...')\n self.dataframe[col] = pandas.to_datetime(self.dataframe[col])\n return self.dataframe\n except KeyError:\n pass\n\n def delete_type_object_cols(self) -> None:\n \"\"\"\n Deletes all non-float columns.\n :return: None\n \"\"\"\n print('data_cleaning.py: Delete type object cols...')\n for i in self.dataframe.columns:\n try:\n self.dataframe[i] = self.dataframe[i].astype(float)\n except (ValueError, TypeError):\n self.dataframe.drop(i, axis=1, inplace=True)\n\n def save_data(self) -> None:\n address = f'{config.DATA_DIRECTORY}/data_clean.parquet'\n print(f'Saving to {address}...')\n self.dataframe.to_parquet(address)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"data/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"619657914","text":"import logging\nimport time\n\nimport httpx\nfrom tqdm import tqdm\n\nfrom ...config import AUTO_RETRY, QUALITY\nfrom .hls_download import hls_yield\n\ndef sanitize_filename(f):\n return ''.join(' - ' if _ in '<>:\"/\\\\|?*' else _ for _ in f)\n\n\ndef single_threaded_download(url, _path, tqdm_bar_init, headers):\n logger = logging.getLogger(\"Download @ \".format(_path.stem))\n\n session = httpx.Client()\n verify = headers.pop('ssl_verification', True)\n response_headers = session.head(\n url,\n allow_redirects=True,\n headers=headers)\n content_length = int(response_headers.headers.get('content-length') or 0)\n tqdm_bar = tqdm_bar_init(content_length)\n\n with open(_path, 'ab') as sw:\n d = sw.tell()\n tqdm_bar.update(d)\n while content_length > d:\n try:\n with session.stream('GET', url, allow_redirects=True, headers={'Range': 'bytes=%d-' % d, **(headers or {})}, timeout=3) as content_stream:\n for chunks in content_stream.iter_bytes():\n size = len(chunks)\n d += size\n tqdm_bar.update(size)\n sw.write(chunks)\n except httpx.HTTPError as e:\n \"\"\"\n A delay to avoid rate-limit(s).\n \"\"\"\n logger.error(\n 'Downloading error due to \"{!r}\", retrying.'.format(e))\n time.sleep(AUTO_RETRY)\n tqdm_bar.close()\n\n\ndef hls_download(\n quality_dict,\n _path,\n episode_identifier,\n _tqdm=True,\n preferred_quality=QUALITY):\n\n session = httpx.Client()\n _tqdm_bar = None\n\n with open(_path, 'ab') as sw:\n for content in hls_yield(\n session,\n quality_dict,\n preferred_quality=preferred_quality):\n if _tqdm and not _tqdm_bar:\n _tqdm_bar = tqdm(\n desc=\"[HLS] %s \" %\n episode_identifier,\n total=content.get(\n 'total',\n 0),\n unit='ts')\n sw.write(content.get('bytes'))\n if _tqdm:\n _tqdm_bar.update(1)\n\n if _tqdm:\n _tqdm_bar.close()\n","sub_path":"animdl/core/codebase/downloader/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"193991231","text":"import pygame as pg\nimport local\nimport json\nfrom gamelogger import *\npg.init()\nRES={}\njson_config=None\nconfig={}\nRESIMG={}\nORE={}\nRESITEMS={}\nVERSION='Omega 0.0.0 (Load Failed)'\nMUSIC_ON=False\n\ndef load_config():\n global json_config,config,ORE\n f=open('data/gamedata.json',encoding='utf-8')\n json_config=json.load(f)\n config.update(json_config[0]['items'])\n ORE.update(json_config[0]['ore'])\n VERSION=json_config[1]['version']\n f.close()\n logger('basic config load complete.','info')\n return json_config\ndef loadres(file):\n return pg.image.load(file+'.png').convert_alpha()\ndef startload():\n global RESIMG,RESITEMS\n for itemname,value in config.items():\n texture=loadres(value['texture'])\n typegetting=vars(local)[value['type']]\n vardict={'name':itemname,'texture':texture}\n vardict.update(value['vars'])\n RESITEMS.update({itemname:typegetting(**vardict)})\n for imgname,path in json_config[0]['texture'].items():\n texture=loadres(path)\n RESIMG.update({imgname:texture})\n logger('texture load complete','info')\n \ndef getitemconfig(name):\n global json_config\n return json_config[0]['items'][name]\ndef getresour(name):\n global RESIMG\n return RESIMG[name]\ndef getitem(name):\n return getitem_uncopy(name).copy()\ndef getitem_uncopy(name):\n global RESITEMS\n return RESITEMS[name]\ndef getjsonconfig():\n global json_config\n return json_config","sub_path":"configmanager.py","file_name":"configmanager.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"236618884","text":"\n\nfrom xai.brain.wordbase.verbs._rue import _RUE\n\n#calss header\nclass _RUED(_RUE, ):\n\tdef __init__(self,): \n\t\t_RUE.__init__(self)\n\t\tself.name = \"RUED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"rue\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_rued.py","file_name":"_rued.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"297310874","text":"\"\"\"\nThe GC-content of a DNA string is given by the percentage of symbols in the string that are\n'C' or 'G'. For example, the GC-content of \"AGCTATAG\" is 37.5%. Note that the reverse\ncomplement of any DNA string has the same GC-content.\n\nDNA strings must be labeled when they are consolidated into a database. A commonly used\nmethod of string labeling is called FASTA format. In this format, the string is introduced\nby a line that begins with '>', followed by some labeling information. Subsequent lines\ncontain the string itself; the first line to begin with '>' indicates the label of the\nnext string.\n\nIn Rosalind's implementation, a string in FASTA format will be labeled by the ID\n\"Rosalind_xxxx\", where \"xxxx\" denotes a four-digit code between 0000 and 9999.\n\nGiven: At most 10 DNA strings in FASTA format (of length at most 1 kbp each).\n\nReturn: The ID of the string having the highest GC-content, followed by the GC-content\nof that string. Rosalind allows for a default error of 0.001 in all decimal answers\nunless otherwise stated; please see the note on absolute error below.\n\n>Rosalind_6404 CCTGCGGAAGATCGGCACTAGAATAGCCAGAACCGTTTCTCTGAGGCTTCCGGCCTTCCC TCCCACTAATAATTCTGAGG\n>Rosalind_5959 CCATCGGTAGCGCATCCTTAGTCCAATTAAGTCCCTATCCAGGCGCTCCGCCGAAGGTCT ATATCCATTTGTCAGCAGACACGC\n>Rosalind_0808 CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGAC TGGGAACCTGCGGGCAGTAGGTGGAAT\n\"\"\"\n\nprint(\"Enter each input separately, and then type 'done' to continue\")\ncontents = []\ncount = 0\nwhile count < 10:\n x = input(\"Enter input: \")\n if x == \"done\":\n if count == 0:\n print(\"There must be an input\")\n continue\n else:\n count = 10\n continue\n else:\n x = ''.join(x.split())\n # check if x starts with >\n if x.startswith('>Rosalind_'):\n contents.append(x)\n count += 1\n else:\n print(\"Ignored input '\" + x + \"' as it does not have to proper input structure: >Rosalind_XXXX\")\n\n# print(contents)\n\nsum_ = []\ncount = 0\nwhile count < len(contents):\n cg = 0\n total = 0\n string = contents[count]\n string = string[14:]\n # print(string)\n if len(string) > 1000:\n print(\"Sequences must be less than 1 kbp each\")\n quit()\n else:\n for char in string:\n if char == 'C' or char == 'G':\n cg += 1\n total += 1\n elif char == 'A' or char == 'T':\n total += 1\n else:\n print(\"Sequence(s) include input other than ATCG\")\n quit()\n # cg/total *100 for cg percentage\n sum = (cg/total)*100\n sum_.append(sum)\n count += 1\n\n# find highest cg value\nhighest = max(sum_)\n# find index of highest cg value\nmax_index = sum_.index(highest)\n\n# fasta of same index\nfasta_max = contents[max_index]\nfasta_max = fasta_max[1:14]\nprint(fasta_max, highest)","sub_path":"computing-gc-content.py","file_name":"computing-gc-content.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"345592739","text":"import pymysql\n\nconn_dcloud_base = pymysql.connect(host=\"192.168.8.205\", port=3306, user=\"root\", password=\"admin123!@#qwe\",\n database=\"dcloud_base\", charset=\"utf8\")\nconn_dcloud_data = pymysql.connect(host=\"192.168.8.205\", port=3306, user=\"root\", password=\"admin123!@#qwe\",\n database=\"dcloud_data\", charset=\"utf8\")\n\ncursor_dcloud_base = conn_dcloud_base.cursor()\n\ncursor_dcloud_data = conn_dcloud_data.cursor()\n\n\ndef connectDcloudBaseMyDBAndSelect(sql):\n cursor_dcloud_base.execute(sql)\n # 数据库提交命令\n # self.conn.commit() # 执行update操作时需要写这个,否则就会更新不成功\n rs = cursor_dcloud_base.fetchall() # 获取执行sql后的结果\n # for r in rs:\n # # print(r)\n # mn_id = r[0]\n # break\n # print(mn_id)\n return rs\n\ndef connectDcloudDataMyDBAndSelect(sql):\n cursor_dcloud_data.execute(sql)\n # 数据库提交命令\n # self.conn.commit() # 执行update操作时需要写这个,否则就会更新不成功\n rs = cursor_dcloud_data.fetchall() # 获取执行sql后的结果\n # for r in rs:\n # # print(r)\n # mn_id = r[0]\n # break\n # print(mn_id)\n return rs\n\n\n\n\n#根据支干/断面名称查询水质类别\n#支干/断面名称\n# zhigan_name = \"昌平区测试设备1号\"\nzhigan_name =\"支干/断面名称202001010119210000000000000000\"\nzhigan_id = \"f11ca149004c4520a7645742610a5ec3\"\n#根据支干/断面查询tb_station表获取对应数据的\nmysql_yuju = \"\"\"\n SELECT * FROM tb_station WHERE NAME='%s'\n \"\"\"% zhigan_name\n\nmysql_yuju_id = \"\"\"\n SELECT * FROM tb_station WHERE ID='%s'\n \"\"\"% zhigan_id\n# cursor_dcloud_base.execute(mysql_yuju)\n# # 数据库提交命令\n# # self.conn.commit() # 执行update操作时需要写这个,否则就会更新不成功\n# rs = cursor_dcloud_base.fetchall() # 获取执行sql后的结果\n# for r in rs:\n# # print(r)\n# mn_id = r[0]\n# break\n\n# tbStation = connectDcloudBaseMyDBAndSelect(mysql_yuju)[0]\ntbStation = connectDcloudBaseMyDBAndSelect(mysql_yuju_id)[0]\n# print(tbStation)\nprint(\"ID:%s\" % tbStation[0])\nprint(\"AREA_ID(所属省ID):%s\" % tbStation[1])\nprint(\"CITY_ID(所属流域ID):%s\" % tbStation[2])\nprint(\"MCUSN(MN号):%s\" % tbStation[3])\nprint(\"NAME(支干/断面):%s\" % tbStation[4])\nprint(\"UPLOAD_INTERVAL:%s\" % tbStation[5])\nprint(\"ADDRESS(站点地址):%s\" % tbStation[6])\n\nprint(\"STANDARD_ID:%s\" % tbStation[7])\nprint(\"STANDARD_FILE_NAME(执行标准):%s\" % tbStation[8])\nprint(\"STANDARD_FILE_RUL(执行标准URL路径):%s\" % tbStation[9])\nprint(\"STANDARD_FILE_NEW_NAME:%s\" % tbStation[10])\nprint(\"OPERTOR(联系人):%s\" % tbStation[11])\nprint(\"TELEPHONE(联系方式):%s\" % tbStation[12])\nprint(\"file_id(站点图片ID):%s\" % tbStation[13])\n\nprint(\"remarks(备注):%s\" % tbStation[14])\nprint(\"CREATETIME:%s\" % tbStation[15])\nprint(\"LONGITUDE:%s\" % tbStation[16])\nprint(\"LATITUDE:%s\" % tbStation[17])\n\nmn_id = tbStation[0]\narea_id = tbStation[1]\ncityid = tbStation[2]\n\nfileid = tbStation[13]\n\n#根据所属省ID查询所属省域名称\nmysql_yuju = \"\"\"\n SELECT AREA_NAME FROM tb_area WHERE ID='%s'\n \"\"\"% area_id\n\nareaname = connectDcloudBaseMyDBAndSelect(mysql_yuju)[0]\nprint(\"CITY_NAME(所属省名称):%s\" % areaname)\n\n\n\n#根据所属流域ID查询所属流域名称\nmysql_yuju = \"\"\"\n SELECT CITY_NAME FROM tb_city WHERE ID='%s'\n \"\"\"% cityid\n\ncityname = connectDcloudBaseMyDBAndSelect(mysql_yuju)[0]\nprint(\"CITY_NAME(所属流域名称):%s\" % cityname)\n\n\n#根据支干/断面的ID查询td_station_state表获取对应STATION_ID为mn_id数据的 WATER_TYPE字段\nmysql_yuju = \"\"\"\n SELECT * FROM td_station_state WHERE STATION_ID='%s' ORDER BY ID ASC\n \"\"\" % mn_id\n# cursor_dcloud_base.execute(mysql_yuju)\n# # 数据库提交命令\n# # self.conn.commit() # 执行update操作时需要写这个,否则就会更新不成功\n# rs = cursor_dcloud_base.fetchall() # 获取执行sql后的结果\n# for r in rs:\n# # print(r)\n# mn_id = r[0]\n# break\ntdstationstate = connectDcloudBaseMyDBAndSelect(mysql_yuju)[0]\nprint(tdstationstate)\nwater_type = tdstationstate[5]\noffline_time = tdstationstate[3]\nprint(water_type)\n\n\n#根据站点ID查询td_real_upload中的数据(实时数据)\nmysql_yuju = \"\"\"\n SELECT * FROM td_real_update WHERE STATION_ID='%s'\n \"\"\"% mn_id\n# cursor_dcloud_base.execute(mysql_yuju)\n# # 数据库提交命令\n# # self.conn.commit() # 执行update操作时需要写这个,否则就会更新不成功\n# rs = cursor_dcloud_base.fetchall() # 获取执行sql后的结果\n# for r in rs:\n# # print(r)\n# mn_id = r[0]\n# break\n\nyinzi_list = connectDcloudBaseMyDBAndSelect(mysql_yuju)\nyinzi_jieguo_list = []\nfor yinzi in yinzi_list:\n # print(yinzi)\n yinzi_shujushijian = yinzi[2]\n yinzi_chaobiaoshijian =yinzi[3]\n yinzi_code = yinzi[4]\n yinzi_value = yinzi[5]\n\n # print(yinzi_code)\n #根据因子code查询tb_factor得到因子名称\n mysql_yuju = \"\"\"\n SELECT * FROM tb_factor WHERE FACTOR_CODE='%s'\n \"\"\" % yinzi_code\n tbfactor = connectDcloudBaseMyDBAndSelect(mysql_yuju)[0]\n # print(tbfactor)\n yinzi_name = tbfactor[2]\n # print(yinzi_name)\n yinzi_shangxian = tbfactor[9]\n yinzi_xiaxian = tbfactor[10]\n # # print(yinzi_value)\n print(\"%s:%s 上限:%s,下限:%s,超标时间:%s\" % (yinzi_name,yinzi_value,yinzi_shangxian,yinzi_xiaxian,yinzi_chaobiaoshijian))\n\n\n#超标限值\n\n\nprint(\"--------------------------------------------------------------\")\nprint(\"支干/断面对应的省:%s\"% areaname)\nprint(\"支干/断面对应的流域:%s\"% cityname)\nprint(\"支干/断面对应的支干/断面:%s\"% tbStation[4])\nprint(\"支干/断面对应的最后在线时间:%s\"% offline_time)\nprint(\"--------------------------------------------------------------\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"WWSBGJTest/util/fuwu/地图专题/超标站点详情.py","file_name":"超标站点详情.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"600766509","text":"from xcp2k.inputsection import InputSection\n\n\nclass _krylov2(InputSection):\n def __init__(self):\n InputSection.__init__(self)\n self.Nkrylov = None\n self.Nblock = None\n self.Eps_krylov = None\n self.Eps_std_diag = None\n self.Check_mos_conv = None\n self._name = \"KRYLOV\"\n self._keywords = {'Nkrylov': 'NKRYLOV', 'Nblock': 'NBLOCK', 'Eps_krylov': 'EPS_KRYLOV', 'Eps_std_diag': 'EPS_STD_DIAG', 'Check_mos_conv': 'CHECK_MOS_CONV'}\n\n","sub_path":"xcp2k/classes/_krylov2.py","file_name":"_krylov2.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22667797","text":"import torch\nfrom torch.utils.data import Dataset\n# import random\nimport os\nimport numpy as np\nimport pandas as pd\n\nimport nrrd \nimport SimpleITK as sitk\n\nfrom config import Config as cfg\n\n# from mylib.voxel_transform import rotation, reflection, crop, random_center\n# from mylib.utils import _triple, categorical_to_one_hot\n\nclass SITK_Transform():\n def __init__(self, image, mask, move, noise_sd, rot, empty_mask=False):\n self.image = sitk.GetImageFromArray(image)\n self.mask = sitk.GetImageFromArray(mask)\n self.noise_sd = noise_sd\n self.rot = rot \n self.move = move\n self.empty_mask = empty_mask\n \n def find_centre(self):\n centre_idx = np.array(self.image.GetSize()) / 2.\n offset = np.random.randint(-self.move, self.move + 1, size=3)\n return np.array(self.image.TransformContinuousIndexToPhysicalPoint(centre_idx)) + offset\n\n def find_centroid(self):\n stats = sitk.LabelShapeStatisticsImageFilter()\n stats.Execute(sitk.Cast(self.mask, sitk.sitkUInt8))\n centroid_coords = stats.GetCentroid(1)\n return np.asarray(centroid_coords, dtype=np.float64)\n\n def add_noise(self):\n \"\"\"add noise and convert to np array\"\"\"\n noise = np.random.normal(0, self.noise_sd, self.image.GetSize()[::-1]).astype(np.float32)\n # noise = sitk.GetImageFromArray(noise)\n # noise.CopyInformation(self.image)\n self.image = sitk.GetArrayFromImage(self.image) + noise\n\n # noise = np.random.normal(0, self.noise_sd, self.image.shape).astype(np.float32)\n # noise = sitk.GetImageFromArray(noise).CopyInformation(x)\n # return self.image + noise\n\n def reflect(self, axis):\n if axis != -1:\n ref1 = np.flip(self.image, axis)\n ref2 = np.flip(self.mask, axis)\n else:\n ref1 = np.copy(self.image)\n ref2 = np.copy(self.mask)\n return ref1, ref2\n\n def sitk_transform(self, to_copy=True):\n \n image_ref = sitk.Image(self.image.GetSize(), sitk.sitkFloat32)\n # mask_ref = sitk.Image(self.mask.GetSize(), sitk.sitkFloat32)\n # TODO: Set spacing or origin???\n \n # transform_group = sitk.Transform(3, sitk.sitkComposite)\n img_center = self.find_centre()\n affineT = sitk.AffineTransform(3)\n affineT.SetCenter(img_center)\n affineT.Rotate(0, 1, self.rot)\n # affineT.ToTensor()\n\n if not self.empty_mask:\n translation = sitk.TranslationTransform(3, (self.find_centroid() - img_center).tolist())\n transform_group = sitk.CompositeTransform(translation)\n transform_group.AddTransform(affineT)\n else:\n transform_group = affineT\n\n img_fill_val = float(sitk.GetArrayViewFromImage(self.image).min())\n # msk_fill_val = float(sitk.GetArrayViewFromImage(self.mask).min())\n\n self.image = sitk.Resample(self.image, image_ref, transform_group, sitk.sitkLinear, img_fill_val)\n self.add_noise()\n # self.mask = sitk.GetArrayFromImage(sitk.Resample(self.mask, mask_ref, transform_group, sitk.sitkLinear, msk_fill_val))\n\n # self.image = sitk.GetArrayFromImage(self.image)\n self.mask = sitk.GetArrayFromImage(self.mask)\n self.image, self.mask = self.reflect(0) if np.random.random(1)[0] > 0.5 else self.reflect(1) # TODO: which orientation to reflect?\n\n # if to_copy:\n # self.image = np.stack([self.image,self.image,self.image],0)\n\n return torch.from_numpy(self.image.astype(np.float32)), torch.from_numpy(self.mask.astype(np.float32))\n\nclass LIDCSegDataset(Dataset):\n def __init__(self, data_path=cfg.DATASET_IRC, train=True, copy_channels=True):\n super().__init__()\n self.data_path = data_path\n self.copy_channels = copy_channels\n\n df = pd.read_csv(os.path.join(data_path, 'fmap.csv'))\n if train: \n self.names = df[df['split']==\"train\"]['image'].values\n else:\n self.names = df[df['split']==\"test\"]['image'].values\n\n def __getitem__(self, index):\n \n img, _ = nrrd.read(os.path.join(self.data_path, \"images\", self.names[index].split('/')[-1]))\n msk, _ = nrrd.read(os.path.join(self.data_path, \"masks\", self.names[index].split('/')[-1]))\n\n # img = sitk.ReadImage(os.path.join(self.data_path, \"images\", self.names[index].split('/')[-1]))\n # msk = sitk.ReadImage(os.path.join(self.data_path, \"masks\", self.names[index].split('/')[-1]))\n \n if len(msk.shape) == 0:\n msk = np.zeros(img.shape)\n transform = SITK_Transform(img, msk, cfg.MOVE, cfg.NOISE_SD, cfg.ROT, empty_mask=True)\n else:\n transform = SITK_Transform(img, msk, cfg.MOVE, cfg.NOISE_SD, cfg.ROT)\n \n img, msk = transform.sitk_transform(self.copy_channels)\n\n # img, msk = torch.from_numpy(img.astype(np.float32)), torch.from_numpy(msk.astype(np.float32))\n\n return img, msk\n\n def __len__(self):\n return len(self.names)\n\n\n# original script defined in \n# https://github.com/M3DV/ACSConv/blob/c2ad11dd46718598459fc4e928f456d88fae3789/experiments/lidc/lidc_dataset.py\n\n# class Transform:\n# def __init__(self, size, move=None, train=True, copy_channels=True):\n# self.size = _triple(size)\n# self.move = move\n# self.copy_channels = copy_channels\n# self.train = train\n\n# def __call__(self, voxel, seg):\n# shape = voxel.shape\n# voxel = voxel/255. - 1\n# if self.train:\n# if self.move is not None:\n# center = random_center(shape, self.move)\n# else:\n# center = np.array(shape) // 2\n# voxel_ret = crop(voxel, center, self.size)\n# seg_ret = crop(seg, center, self.size)\n \n# angle = np.random.randint(4, size=3)\n# voxel_ret = rotation(voxel_ret, angle=angle)\n# seg_ret = rotation(seg_ret, angle=angle)\n\n# axis = np.random.randint(4) - 1\n# voxel_ret = reflection(voxel_ret, axis=axis)\n# seg_ret = reflection(seg_ret, axis=axis)\n# else:\n# center = np.array(shape) // 2\n# voxel_ret = crop(voxel, center, self.size)\n# seg_ret = crop(seg, center, self.size)\n \n# if self.copy_channels:\n# return np.stack([voxel_ret,voxel_ret,voxel_ret],0).astype(np.float32), \\\n# np.expand_dims(seg_ret,0).astype(np.float32)\n# else:\n# return np.expand_dims(voxel_ret, 0).astype(np.float32), \\\n# np.expand_dims(seg_ret,0).astype(np.float32)\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":6694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"454270113","text":"import pyparsing as pp\n\nfrom pydbml.classes import ReferenceBlueprint\n\nfrom .common import _\nfrom .common import _c\nfrom .common import c\nfrom .common import n\nfrom .generic import name\n\npp.ParserElement.setDefaultWhitespaceChars(' \\t\\r')\n\nrelation = pp.oneOf(\"> - <\")\nref_inline = pp.Literal(\"ref:\") - relation('type') - name('table') - '.' - name('field')\n\n\ndef parse_inline_relation(s, l, t):\n '''\n ref: < table.column\n '''\n return ReferenceBlueprint(type_=t['type'],\n table2=t['table'],\n col2=t['field'])\n\n\nref_inline.setParseAction(parse_inline_relation)\n\non_option = (\n pp.CaselessLiteral('no action')\n | pp.CaselessLiteral('restrict')\n | pp.CaselessLiteral('cascade')\n | pp.CaselessLiteral('set null')\n | pp.CaselessLiteral('set default')\n)\nupdate = pp.CaselessLiteral(\"update:\").suppress() + _ + on_option\ndelete = pp.CaselessLiteral(\"delete:\").suppress() + _ + on_option\n\nref_setting = _ + (update('update') | delete('delete')) + _\n\nref_settings = (\n '['\n + ref_setting\n + (\n ','\n + ref_setting\n )[...]\n + ']' + c\n)\n\n\ndef parse_ref_settings(s, l, t):\n '''\n [delete: cascade]\n '''\n result = {}\n if 'update' in t:\n result['on_update'] = t['update'][0]\n if 'delete' in t:\n result['on_delete'] = t['delete'][0]\n if 'comment' in t:\n result['comment'] = t['comment'][0]\n return result\n\n\nref_settings.setParseAction(parse_ref_settings)\n\ncomposite_name = (\n '(' + pp.White()[...]\n - name + pp.White()[...]\n + (\n pp.White()[...] + \",\"\n + pp.White()[...] + name\n + pp.White()[...]\n )[...]\n + ')'\n)\nname_or_composite = name | pp.Combine(composite_name)\n\nref_body = (\n name('table1')\n - '.'\n - name_or_composite('field1')\n - relation('type')\n - name('table2')\n - '.'\n - name_or_composite('field2') + c\n + ref_settings('settings')[0, 1]\n)\n\n\nref_short = _c + pp.CaselessLiteral('ref') + name('name')[0, 1] + ':' - ref_body\nref_long = _c + (\n pp.CaselessLiteral('ref') + _\n + name('name')[0, 1] + _\n + '{' + _\n - ref_body + _\n - '}'\n)\n\n\ndef parse_ref(s, l, t):\n '''\n ref name: table1.col1 > table2.col2\n or\n ref name {\n table1.col1 < table2.col2\n }\n '''\n init_dict = {\n 'type_': t['type'],\n 'table1': t['table1'],\n 'col1': t['field1'],\n 'table2': t['table2'],\n 'col2': t['field2']\n }\n if 'name' in t:\n init_dict['name'] = t['name']\n if 'settings' in t:\n init_dict.update(t['settings'])\n\n # comments after settings have priority\n if 'comment' in t:\n init_dict['comment'] = t['comment'][0]\n if 'comment' not in init_dict and 'comment_before' in t:\n comment = '\\n'.join(c[0] for c in t['comment_before'])\n init_dict['comment'] = comment\n\n ref = ReferenceBlueprint(**init_dict)\n return ref\n\n\nref_short.setParseAction(parse_ref)\nref_long.setParseAction(parse_ref)\n\nref = ref_short | ref_long + (n | pp.StringEnd())\n","sub_path":"pydbml/definitions/reference.py","file_name":"reference.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"497446025","text":"import requests\nfrom pymongo import MongoClient\nconn = MongoClient('localhost', 27017)\ndb = conn.self\n\n\nclass DealIp(object):\n def __init__(self):\n self.ip_url = 'http://331935043663215668.standard.hutoudaili.com/?num=1&area_type=3&scheme=1&anonymity=3'\n\n # 一次只请求一个\n def get_html_returnip(self):\n html = requests.get(self.ip_url)\n ip = html.text\n return ip\n\n def add_new_and_remove_old(self):\n http = self.get_html_returnip()\n db.ip.remove()\n db.ip.insert_one({'http': http})\n return self.get_ip()\n\n def get_ip(self):\n try:\n http = db.ip.find_one()['http']\n return http\n except Exception as e:\n print(e)\n return self.add_new_and_remove_old()\n","sub_path":"dealIp.py","file_name":"dealIp.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"70027036","text":"from django.shortcuts import render, redirect\nfrom .models import Product, Contact, Orders, OrderUpdate\nfrom django.contrib.auth.forms import UserCreationForm\n\nfrom django.contrib.auth import authenticate, login, logout\n\nfrom django.contrib import messages\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group\n\nfrom .forms import CreateUserForm, CustomerForm\nfrom .decorators import unauthenticated_user, allowed_users, admin_only\nfrom math import ceil\nimport json\nfrom django.views.decorators.csrf import csrf_exempt\nfrom PayTm import Checksum\nfrom django.http import HttpResponse\nMERCHANT_KEY = '_cNH09mTKV7Qa7SI'\n\n@unauthenticated_user \ndef registerPage(request):\n form = CreateUserForm()\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n user = form.save()\n username = form.cleaned_data.get('username')\n\n group = Group.objects.get(name='customer')\n user.groups.add(group)\n\n messages.success(request, 'Account was created for ' + username)\n\n return redirect('login')\n\n context = {'form': form}\n return render(request, 'shop/register.html', context)\n \n\n@unauthenticated_user \ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('/')\n \n else:\n messages.info(request, 'Username OR password is incorrect')\n \n context = {}\n return render(request, 'shop/login.html', context)\n \n \n\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n@login_required(login_url='login')\n@admin_only\ndef index(request):\n #products = Product.objects.all()\n #print(products)\n #n = len(products)\n #nSlides = (n//4 + ceil((n/4)-(n//4))) #calculation of the number of slides\n #params = {'no_of_slides': nSlides, 'range': range(1, nSlides), 'product': products} \n#I am going to make a list, where it contains the sliders list, so basically i will have a list of list for which that list will represent a slider for it will contain the category of each products\n#the list of lists will be called allProds( it contains all products)\n\n#its just the category which are different, that's why there are two lists, \n# but the number of slides will remain the same!\n #allProds = [[products, range(1, nSlides), nSlides], \n # [products, range(1, nSlides), nSlides]]\n allProds = []\n catprods = Product.objects.values('category', 'id')\n #this method will returns a list of a given object's own enumerable property values, \n #in the same order as that provided by a for...in loop\n # 'category', 'id' are object whose enumerable own property values are to be returned.\n cats = {item['category'] for item in catprods}\n #I have created a setter just above which has a set comprehension in other word,\n #has a list/set comprihension where i will fetch item category for items in catprods in the method above.\n for cat in cats:\n prod = Product.objects.filter(category=cat)\n n = len(prod)\n nSlides = n // 4 + ceil((n / 4) - (n // 4)) #calculation of the number of slides\n allProds.append([prod, range(1, nSlides), nSlides])\n params = {'allProds':allProds}\n #In the parameters, first of all, i am going to add the number of slides, \n #nSlides function and the function range for range of slides it will contain,\n #then i am going to pass the template for products\n return render(request, 'shop/index.html', params)\n\ndef userPage(request):\n context = {}\n return render(request, 'shop/index.html', context)\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['customer'])\ndef accountSettings(request):\n customer = request.user.customer\n form = CustomerForm(instance=customer)\n\n if request.method == 'POST':\n form = CustomerForm(request.POST, request.FILES, instance=customer)\n if form.is_valid():\n form.save()\n\n context = {'form':form}\n return render(request, 'shop/account_settings.html', context)\n\n\ndef searchMatch(query, item):\n '''return true only if the query matches the required items!'''\n if query in item.desc.lower() or query in item.product_name.lower() or query in item.category.lower():\n return True\n else:\n return False\n\n\ndef search(request):\n query = request.GET.get('search')\n allProds = []\n catprods = Product.objects.values('category', 'id')\n cats = {item['category'] for item in catprods}\n for cat in cats:\n prodtemp = Product.objects.filter(category=cat)\n prod = [item for item in prodtemp if searchMatch(query, item)]\n\n n = len(prod)\n nSlides = n // 4 + ceil((n / 4) - (n // 4))\n if len(prod) != 0:\n allProds.append([prod, range(1, nSlides), nSlides])\n params = {'allProds':allProds, \"msg\": \"\"}\n if len(allProds) == 0 or len(query) < 4:\n params = {'msg': \"Please make sure to enter the right searches!\"}\n return render(request, 'shop/search.html', params)\n\ndef about(request):\n return render(request, 'shop/about.html')\n\ndef privacy(request):\n return render(request, 'shop/privacy.html')\n\ndef faq(request):\n return render(request, 'shop/faq.html')\n\n\ndef contact(request):\n thank = False\n if request.method==\"POST\":\n name = request.POST.get('name', '')\n email = request.POST.get('email', '')\n phone = request.POST.get('phone', '')\n desc = request.POST.get('desc', '')\n contact = Contact(name=name, email=email, phone=phone, desc=desc)\n contact.save()\n thank = True\n return render(request, 'shop/contact.html', {'thank': thank})\n\n@login_required(login_url='login')\ndef tracker(request):\n if request.method==\"POST\": #if that request is a post, it will execute the orderId, email else it will return blank.\n orderId = request.POST.get('orderId', '')\n email = request.POST.get('email', '')\n try: #The try block here lets me test that block of code for errors, so if the if statement for order is greater than zero, it will update the response else it will return blank.\n order = Orders.objects.filter(order_id=orderId, email=email)\n if len(order)>0:\n update = OrderUpdate.objects.filter(order_id=orderId)\n updates = []\n for item in update:\n updates.append({'text': item.update_desc, 'time': item.timestamp})\n response = json.dumps({\"status\":\"success\", \"updates\": updates, \"itemsJson\": order[0].items_json}, default=str)\n return HttpResponse(response)\n else:\n return HttpResponse('{\"status\":\"noitem\"}')\n except Exception as e: #The except block here let me handled the error! \n return HttpResponse('{\"status\":\"error\"}')\n\n return render(request, 'shop/tracker.html')\n\n\n\n\n@login_required(login_url='login')\ndef productView(request, myid):\n\n # retrieve the product using the id = myid\n product = Product.objects.filter(id=myid)\n return render(request, 'shop/prodView.html', {'product':product[0]})\n\n@login_required(login_url='login')\ndef checkout(request):\n if request.method==\"POST\":\n items_json = request.POST.get('itemsJson', '')\n name = request.POST.get('name', '')\n amount = request.POST.get('amount', '')\n email = request.POST.get('email', '')\n address = request.POST.get('address1', '') + \" \" + request.POST.get('address2', '')\n city = request.POST.get('city', '')\n state = request.POST.get('state', '')\n zip_code = request.POST.get('zip_code', '')\n phone = request.POST.get('phone', '')\n order = Orders(items_json=items_json, name=name, email=email, address=address, city=city,\n state=state, zip_code=zip_code, phone=phone, amount=amount)\n order.save()\n update = OrderUpdate(order_id=order.order_id, update_desc=\"The order has been placed\") #I want to make it such that whenever someone place an order, it should appear like the orders have been place\n update.save()\n thank = True\n id = order.order_id\n # return render(request, 'shop/checkout.html', {'thank':thank, 'id': id})\n # Request paytm to transfer the amount to your account after payment by user\n param_dict = {\n\n 'MID':'IsoNWP77745394082690',\n 'ORDER_ID': str(order.order_id),\n 'TXN_AMOUNT': str(amount),\n 'CUST_ID': email,\n 'INDUSTRY_TYPE_ID': 'Retail',\n 'WEBSITE': 'WEBSTAGING',\n 'CHANNEL_ID': 'WEB',\n 'CALLBACK_URL':'http://127.0.0.1:8000/shop/handlerequest/',\n\n }\n param_dict['CHECKSUMHASH'] = Checksum.generate_checksum(param_dict, MERCHANT_KEY)\n return render(request, 'shop/paytm.html', {'param_dict': param_dict})\n\n return render(request, 'shop/checkout.html')\n\n@login_required(login_url='login')\n@csrf_exempt\ndef handlerequest(request):\n # paytm will send you post request here\n form = request.POST\n response_dict = {}\n for i in form.keys():\n response_dict[i] = form[i]\n if i == 'CHECKSUMHASH':\n checksum = form[i]\n\n verify = Checksum.verify_checksum(response_dict, MERCHANT_KEY, checksum)\n if verify:\n if response_dict['RESPCODE'] == '01':\n print('order successful')\n else:\n print('order was not successful because' + response_dict['RESPMSG'])\n return render(request, 'shop/paymentstatus.html', {'response': response_dict})\n","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"364481878","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/2/8 11:11:03\n# @Author : dlg\n# @File : my_queue.py\n\"\"\"\n\n队列基本功能实现\n\n\"\"\"\n\nimport queue\n\nclass my_queue(object):\n\n def __init__(self, size):\n \"\"\"\n :param size: 队列大小\n \"\"\"\n self.size = size\n self.index_left = 0\n self.index_right = 0\n self.arr = [0] * size # 初始化数组\n self.count = 0 # 记录当前队列中元素个数\n\n def push(self, data):\n \"\"\"\n 右端入队列\n :param data:\n \"\"\"\n if self.count < self.size:\n self.arr[self.index_right] = data\n self.index_right = (self.index_right + 1) % self.size\n self.count += 1\n else:\n print('queue has full.')\n def pop(self):\n \"\"\"\n 左端出队列\n :return:\n \"\"\"\n if self.count > 0:\n temp_data = self.arr[self.index_left]\n self.index_left = (self.index_left + 1) % self.size\n self.count -= 1\n return temp_data\n else:\n print('queue is empty.')\n\n def print(self):\n temp_index_left = self.index_left\n while self.count > 0:\n print(self.arr[temp_index_left])\n temp_index_left = (temp_index_left + 1) % self.size\n print('---------------------------------------')\n\nif __name__ == '__main__':\n q = my_queue(3)\n q.push(1)\n q.push(2)\n q.push(3)\n q.push(4)\n\n print(q.pop())\n print(q.pop())\n q.push(5)\n print(q.pop())\n q.push(6)\n print(q.pop())\n q.push(7)\n print(q.pop())\n q.push(8)\n print(q.pop())\n\n q.push(1)\n q.push(2)\n q.push(3)\n q.push(4)\n\n print(q.pop())\n print(q.pop())\n q.push(5)\n print(q.pop())\n q.push(6)\n print(q.pop())\n q.push(7)\n print(q.pop())\n q.push(8)\n print(q.pop())\n print(q.pop())\n print(q.pop())\n print(q.pop())\n\n\n","sub_path":"python/pycharm_workspace/python_cookbook/chapter01/my_queue.py","file_name":"my_queue.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"108492560","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: /Users/alfredo/python/vpygora/pygora/tests/test_pygora.py\n# Compiled at: 2010-09-16 20:46:02\nimport sys\nif '../' not in sys.path:\n sys.path.append('../')\nimport unittest, os\nfrom pygora import Pygora\nimport shutil\n\nclass TestPygora(unittest.TestCase):\n\n def setUp(self):\n if os.path.isdir('/tmp/pygora'):\n shutil.rmtree('/tmp/pygora')\n os.mkdir('/tmp/pygora')\n source_file = open('/tmp/pygora/source.py', 'w')\n test_file = open('/tmp/pygora/test_source.py', 'w')\n ignore_file = open('/tmp/pygora/ignoreme.py', 'w')\n ignore_file.write('some source code')\n ignore_file.close()\n source_file.write('# comment line\\n def a_func():\\n \"docstring\"\\n pass\\n ')\n source_file.close()\n test_file.write('def test_source():\\n pass')\n test_file.close()\n\n def tearDown(self):\n try:\n os.remove('/tmp/pygora')\n except Exception:\n pass\n\n def test_test_lines(self):\n \"\"\"Test lines should always be cero\"\"\"\n goat = Pygora(path='.')\n expected = 0\n actual = goat.test_lines\n self.assertEqual(actual, expected)\n\n def test_source_lines(self):\n \"\"\"Source lines should always be cero\"\"\"\n goat = Pygora(path='.')\n expected = 0\n actual = goat.source_lines\n self.assertEqual(actual, expected)\n\n def test_path(self):\n \"\"\"Path should always be the cwd\"\"\"\n expected = os.getcwd()\n goat = Pygora()\n actual = goat.path\n self.assertEqual(actual, expected)\n\n def test_skip_line_comment(self):\n \"\"\"Return False if line starts with #\"\"\"\n goat = Pygora()\n line = '# a commented out line'\n actual = goat.skip_line(line)\n self.assertFalse(actual)\n\n def test_skip_line_docstring(self):\n \"\"\"Do NOT Return False if line starts with quote\"\"\"\n goat = Pygora()\n line = '\"a docstring\" '\n actual = goat.skip_line(line)\n self.assertTrue(actual)\n\n def test_skip_line_empty(self):\n \"\"\"Return False if line is empty\"\"\"\n goat = Pygora()\n line = ' '\n actual = goat.skip_line(line)\n self.assertFalse(actual)\n\n def test_recognize_Test(self):\n \"\"\"Return Test if the file is a test file\"\"\"\n goat = Pygora(path='/tmp/test_source.py')\n actual = goat.test_lines\n expected = 10\n self.assertEqual(actual, expected)\n\n def test_line_count(self):\n \"\"\"Return the total number of lines for a file\"\"\"\n goat = Pygora()\n expected = '3'\n actual = goat.line_count('/tmp/pygora/source.py')\n self.assertEqual(actual, expected)\n\n def test_line_count_test(self):\n \"\"\"Return the total number of lines for a test files\"\"\"\n goat = Pygora()\n expected = '2'\n actual = goat.line_count('/tmp/pygora/test_source.py')\n self.assertEqual(actual, expected)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/pygora_phchcc-0.0.13-py3-none-any/test_pygora.py","file_name":"test_pygora.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"289943839","text":"\"\"\"Bookmark User Relationship and optional field\n\nRevision ID: 56d37c48fd9c\nRevises: dd790f425d0c\nCreate Date: 2019-01-09 17:22:23.175554\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '56d37c48fd9c'\ndown_revision = 'dd790f425d0c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('settings', sa.String(length=5), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'settings')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/56d37c48fd9c_bookmark_user_relationship_and_optional_.py","file_name":"56d37c48fd9c_bookmark_user_relationship_and_optional_.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"106172006","text":"import RPi.GPIO as GPIO\nimport time\n# Use port numbering\nGPIO.setmode(GPIO.BCM)\n# Declare output ports\nout1 = 26\nout2 = 19\nout3 = 13\n# Declare input ports\nin1 = 21\nin2 = 20\n# Set out1, out2, and out3 as output ports\nGPIO.setup(out1, GPIO.OUT)\nGPIO.setup(out2, GPIO.OUT)\nGPIO.setup(out3, GPIO.OUT)\n# Set in1 and in2 as input ports and activate pull down\nGPIO.setup(in1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(in2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n# Make threaded callback function\ndef LEDwithButton(pin):\n if (pin==in1):\n GPIO.output(out1, 0)\n time.sleep(.5)\n GPIO.output(out1, 1)\n time.sleep(.5)\n GPIO.output(out1, 0)\n print('LED')\n elif (pin==in2):\n GPIO.output(out2, 0)\n time.sleep(.5)\n GPIO.output(out2, 1)\n time.sleep(.5)\n GPIO.output(out2, 0)\n\nGPIO.add_event_detect(in1, GPIO.RISING, callback=LEDwithButton, bouncetime=100)\nGPIO.add_event_detect(in2, GPIO.RISING, callback=LEDwithButton, bouncetime=100)\n\ntry:\n while True:\n GPIO.output(out3, 0)\n time.sleep(0.5)\n GPIO.output(out3, 1)\n time.sleep(.5)\nexcept KeyboardInterrupt:\n print('\\nExiting')\nexcept Exception as e:\n print('\\ne')\n\nGPIO.cleanup()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"231592687","text":"\"\"\"Define automations for plants.\"\"\"\n\n# pylint: disable=attribute-defined-outside-init,unused-argument\n\nfrom typing import Union\n\nfrom automation import Automation, Feature # type: ignore\n\n\nclass PlantAutomation(Automation):\n \"\"\"Define an automation for plants.\"\"\"\n\n\nclass LowMoisture(Feature):\n \"\"\"Define a feature to notify us of low moisture.\"\"\"\n\n @property\n def current_moisture(self) -> int:\n \"\"\"Define a property to get the current moisture.\"\"\"\n return int(self.hass.get_state(self.entities['current_moisture']))\n\n def initialize(self) -> None:\n \"\"\"Initialize.\"\"\"\n self._low_moisture = False\n\n self.hass.listen_state(\n self.low_moisture_detected,\n self.entities['current_moisture'],\n constrain_input_boolean=self.enabled_toggle)\n\n def low_moisture_detected( # pylint: disable=too-many-arguments\n self, entity: Union[str, dict], attribute: str, old: str, new: str,\n kwargs: dict) -> None:\n \"\"\"Notify when the plant's moisture is low.\"\"\"\n if (not (self._low_moisture)\n and int(new) < int(self.properties['moisture_threshold'])):\n self.hass.log(\n 'Notifying people at home that plant is low on moisture')\n\n self._low_moisture = True\n self.handles[\n self.hass.\n friendly_name] = self.hass.notification_manager.repeat(\n '{0} is Dry 💧'.format(self.hass.friendly_name),\n '{0} is at {1}% moisture and needs water.'.format(\n self.hass.friendly_name, self.current_moisture),\n self.properties['notification_interval'],\n target='home')\n else:\n self._low_moisture = False\n if self.hass.friendly_name in self.handles:\n self.handles.pop(self.hass.friendly_name)()\n","sub_path":"appdaemon/settings/apps/plants.py","file_name":"plants.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"497121581","text":"from rest_framework.authtoken.views import obtain_auth_token\nfrom django.urls import path, include\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n# this library is to import the json web token views\nfrom user_app.api import views\n\nurlpatterns = [\n path('login/', obtain_auth_token, name='login'),\n path('logout/', views.logout_view, name='logut'),\n path('register/', views.registration_view, name='register'),\n \n # These are the custom urls for jwt(json web token)\n # api/token/ - url takes username and password and generates the access token and refresh token as well\n # access token is required to access the authenticated views [IsAuthenticatedOrReadOnly]\n # access token life is 5 minutes and it expires after that\n # refresh token is to regenerate the new access token for those particular credentials\n # so the url api/token/refresh/ - takes refresh as input and then gives new access token\n # the life of refresh token is for 24 hours\n path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n\n # Json web tokens are not stored in our database but is stored in the users local storage not in the servers'\n # database\n]\n","sub_path":"user_app/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"120853602","text":"import time\nimport datetime\nimport os\n\n# clears data by removing interpunction, digits, line endings, redudant spaces and converting to lower case\ndef clearData(data): # need to pass language name in parameter to check custom interpunction\n standardInterpunction = [\"`\", \"~\", \"!\", \"@\", \"#\", \"$\", \"%\", \"^\", \"&\", \"*\", \"(\", \")\", \"-\", \"_\", \"=\", \"+\", \"[\", \"{\", \"]\", \"}\", \"\\\\\", \"|\", \";\", \":\", \"'\", \"\\\"\", \",\", \"<\", \".\", \">\", \"/\", \"?\", \"*\"]\n digits = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"0\"]\n customInterpunction = list()\n\n outputData = \"\"\n for literal in data:\n if not isinstance(literal, str):\n continue\n if literal in standardInterpunction:\n continue\n if literal in customInterpunction:\n continue\n if literal in digits:\n continue\n if literal == \"\\n\" or literal == \"\\r\" or literal == \"\\r\\n\":\n outputData = outputData + \" \"\n \n outputData = outputData + literal.lower()\n\n return outputData\n\ntry:\n # loading up files and clearing data\n filesList = os.listdir(\"text samples\")\n for fileName in filesList:\n f = open(\"text samples/\" + fileName, \"r\", encoding=\"utf8\", newline=\"\\n\")\n fileContent = f.read()\n f.close()\n clearedData = clearData(fileContent)\n # print(\"clear data\")\n\n f = open(\"cleared data/\" + fileName, \"w\", encoding=\"utf8\")\n f.write(clearedData)\n f.close()\n \n print(\"Finished succesfully!\")\n\nexcept Exception as ex:\n time = time.time()\n timestamp = datetime.datetime.fromtimestamp(time).strftime('%Y%m%d%H%M%S')\n logFile = open(\"error logs/errorlog\" + timestamp + \".log\", \"w\")\n logFile.write(str(ex))\n logFile.close()\n print(\"Error: \" + str(ex))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104702499","text":"import unittest\nimport sample_unittest\n\nclass TestSample(unittest.TestCase):\n def test_add(self):\n actual = sample_unittest.add(1,2)\n expected = 4\n self.assertEqual(actual, expected)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_sample_unittest2.py","file_name":"test_sample_unittest2.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"595763668","text":"import os\nfrom functools import wraps\nfrom flask import Blueprint, abort, request, jsonify, render_template, make_response, g\nfrom flask.ext.httpauth import HTTPBasicAuth\nfrom sqlalchemy.inspection import inspect\n\nfrom models import *\nfrom config import basedir\nfrom app.lib.api.factory import resource_factory\n\nadmin = Blueprint('admin', __name__, template_folder=os.path.join(basedir, 'app/lib/admin/templates'))\n\n# ---------------------------------------\n# Security Setup\n# ---------------------------------------\n\nauth = HTTPBasicAuth()\n\n\n# Auth module callback functions\n# -------------------------------\n@auth.verify_password\ndef verify_password(username_or_token, password):\n # first try to authenticate by token\n user = User.verify_auth_token(username_or_token)\n if not user:\n # try to authenticate with username/password\n user = User.query.filter_by(username=username_or_token).first()\n if not user or not user.verify_password(password):\n return False\n g.user = user\n return True\n\n\n@auth.error_handler\ndef unauthorized():\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n\n\n# Token Auth route\n# -------------------------------\n@admin.route('/users/token', methods=['GET', 'POST'])\n@auth.login_required\ndef get_auth_token():\n token = g.user.generate_auth_token()\n return jsonify({'token': token.decode('ascii')})\n\n\n# User Decorators\n# -------------------------------\ndef get_user(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth = request.authorization\n if request.method != 'OPTIONS':\n if auth:\n password = verify_password(auth.username, auth.password)\n else:\n g.user = None\n return f(*args, **kwargs)\n return decorated\n\n\n# Role Decorators\n# -------------------------------\ndef roles_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n roles = kwargs['relationship'] if 'relationship' in kwargs else kwargs['resource']\n id = kwargs['rel_id'] if 'rel_id' in kwargs else None\n id = kwargs['relationship_id'] if 'relationship_id' in kwargs else id\n id = kwargs['id'] if 'id' in kwargs else id\n\n if not g.user.roles_required([roles]):\n if id and id not in [entity.id for entity in getattr(g.user, resource)]:\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n elif not id:\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n else:\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n return f(*args, **kwargs)\n\n return decorated_function\n\n\ndef roles_accepted(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n roles = kwargs['relationship'] if 'relationship' in kwargs else kwargs['resource']\n id = kwargs['rel_id'] if 'rel_id' in kwargs else None\n id = kwargs['relationship_id'] if 'relationship_id' in kwargs else id\n id = kwargs['id'] if 'id' in kwargs else id\n\n if not g.user.roles_accepted([roles]):\n if id and id not in [entity.id for entity in getattr(g.user, resource)]:\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n elif not id:\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n else:\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef admin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not g.user.roles_required(['admin']):\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n return f(*args, **kwargs)\n return decorated_function\n\n\n# ---------------------------------------\n# Admin Template Endpoints\n# ---------------------------------------\n\n@admin.route('/