diff --git "a/6250.jsonl" "b/6250.jsonl" new file mode 100644--- /dev/null +++ "b/6250.jsonl" @@ -0,0 +1,711 @@ +{"seq_id":"42839191075","text":"#-*-coding:utf-8-*-\r\n\r\nimport codecs\r\n\r\n#所有词性\r\nww = []\r\n#所有的词性\r\npos = []\r\n\r\nww_pos_text = open('ww_pos.txt', 'w')\r\nyuliao_text = open('yuliao.txt', 'w')\r\npos_text = open('pos.txt', 'w')\r\nww_text = open('ww.txt', 'w')\r\nfin = codecs.open(\"199801.txt\", \"r\")\r\nwhile(True):\r\n text = fin.readline()\r\n # print(text)\r\n if(text == \"\"):\r\n break\r\n tmp = text.split()\r\n if tmp:\r\n tmp.pop(0)\r\n print(tmp)\r\n for each in tmp:\r\n yuliao_text.write(each + \" \")\r\n yuliao_text.write('\\n')\r\n\r\n n = len(tmp)\r\n for i in range(0, n):\r\n word = tmp[i].split('/')\r\n ww_pos_text.write(word[0] + '/' + word[1] + '\\n')\r\n if(word[1] not in pos):\r\n pos.append(word[1])\r\n pos_text.write(word[1])\r\n pos_text.write('\\n')\r\n word[0].replace('[', '')\r\n if(word[0] not in ww):\r\n ww.append(word[0])\r\n ww_text.write(word[0])\r\n ww_text.write('\\n')\r\nww_text.close()\r\npos_text.close()\r\nyuliao_text.close()\r\nww_pos_text.close()\r\n\r\n\r\n\r\n","repo_name":"yorrsirr/HMM-Viterbi","sub_path":"renmin.py","file_name":"renmin.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74470846025","text":"import os\nimport json\nimport base64\n\nclass Load_Streaming_Data(object):\n\n def __init__( self, app, auth,render_template,request,\n app_files,sys_files,redis_old_handle, redis_new_handle,gm ):\n self.app = app\n self.auth = auth\n self.render_template = render_template\n self.request = request\n self.app_files = app_files\n self.sys_files = sys_files\n self.redis_old_handle = redis_old_handle\n self.redis_new_handle = redis_new_handle\n self.gm = gm\n temp = self.gm.match_terminal_relationship( \"MINUTE_ACQUISITION\")[0]\n self.minute_store = temp[\"measurement\"]\n \n a1 = auth.login_required( self._one_minute )\n app.add_url_rule('/irrigation_streaming_data/display_minute_irrigation/',\n \"display_minute_irrigation\",a1,methods=[\"GET\"])\n\n def _one_minute(self,stream_index): \n sel_prop = {}\n sel_prop[\"flow\"] = {}\n irrigation_data = []\n temp_data = self.redis_new_handle.lrange(self.minute_store, 0,1440) \n for i in temp_data:\n irrigation_data.append(json.loads(i))\n return self.render_template(\"streaming_data/streaming_data\",title=\"Irrigation Streaming Data\",\n header_name = \"Irrigation Streaming Data\", data = irrigation_data, start_index = stream_index) \n\n\n\n\nif __name__ == \"__main__\":\n pass\n\n\n'''\nsel_prop = {}\nsel_prop[\"flow\"] = {}\nsel_prop[\"flow\"][\"header\"] = \"Flow Rate History GPM\"\nsel_prop[\"flow\"][\"queue\"] = \"/ajax/sel_strip_chart/QUEUES:SPRINKLER:FLOW:\"\nsel_prop[\"flow\"][\"limit_low\"] = 0\nsel_prop[\"flow\"][\"limit_high\"] = 40\nsel_prop[\"flow\"][\"sel_function\"] = '/ajax/flow_sensor_names'\nsel_prop[\"flow\"][\"sel_label\"] = \"Flow Sensors\"\nsel_prop[\"flow\"][\"x_axis\"] = \"Time\"\nsel_prop[\"flow\"][\"y_axis\"] = \"GPM\"\n@app.route('/sel_chart/',methods=[\"GET\"])\n@authDB.requires_auth\ndef sel_chart(filename):\n if sel_prop.has_key(filename ):\n header_name = sel_prop[filename][\"header\"]\n queue = sel_prop[filename][\"queue\"]\n limit_low = sel_prop[filename][\"limit_low\"]\n limit_high = sel_prop[filename][\"limit_high\"]\n sel_function = sel_prop[filename][\"sel_function\"]\n sel_label = sel_prop[filename][\"sel_label\"]\n x_axis = sel_prop[filename][\"x_axis\"]\n y_axis = sel_prop[filename][\"y_axis\"]\n\n \n return render_template(\"sel_chart\", queue= queue, header_name = header_name,limit_low = limit_low,limit_high=limit_high, \n sel_function = sel_function,sel_label = sel_label, x_axis=x_axis,y_axis=y_axis )\n\n\n \n'''","repo_name":"NanoDataCenter/nano_data_center","sub_path":"code/future_web/flask_web_modular_py3/load_streaming_data_py3.py","file_name":"load_streaming_data_py3.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"17216249663","text":"from flask_restful import fields\n\n\"\"\"\n Fields lets to filter and format response data\n attribute: allows to map output name to internal name\n Nested fields can be mapped like: fields.String(attribute='people_list.0.person_dictionary.name')\n default: In case data object dont have the attribute, we can specify default value to it like: fields.String(default='Guest')\n\"\"\"\n\n\"\"\"\n CUSTOM FORMATTING\n -----------------\n Subclass the 'fields.Raw' class and implement the format function.\n\"\"\"\n\n\nclass Rating(fields.Raw):\n def format(self, value):\n return '{}/10'.format(value)\n\n\nfilm_fields = {\n 'id': fields.String(attribute='film_id'),\n 'name': fields.String,\n 'genres': fields.List(fields.String),\n 'rating': Rating(attribute='rating'),\n 'language': fields.String,\n 'uri': fields.Url(endpoint='film', absolute=True)\n}\n\n\n# List of Nested objects\n# film_list_fields = {\n# fields.List(fields.Nested(film_fields))\n# }\n","repo_name":"mochatek/flask_restful","sub_path":"app/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"4229432781","text":"\nimport os\nfrom bs4 import BeautifulSoup\nfrom parse_content import ContentParser\n\n\nclass Chapter(object):\n\n def __init__(self, html_file):\n # print(\" + Init Chapter from html {}\".format(html_file))\n self.chapter_name = os.path.basename(html_file)\n html = open(html_file, 'rb').read()\n bs = BeautifulSoup(html, features='html.parser')\n\n self.content = ContentParser(os.path.dirname(html_file))\n self.content.parse(bs.body)\n # print(\" + Found {} contents.\".format(len(self.content.contents)))\n\n def write_txt(self, fout):\n # first, set label with html file name\n fout.write(\"---------------[Label: {}]--------------\\n\".format(self.chapter_name))\n self.content.write_txt(fout)\n\n def write_pdf(self, pdf_writer):\n pdf_writer.write_new_page()\n pdf_writer.write_label(self.chapter_name)\n self.content.write_pdf(pdf_writer)\n\nclass Book(object):\n\n def __init__(self, opf_file):\n print(\" + Init Book from opf {}\".format(opf_file))\n\n xml = open(opf_file, 'rb').read()\n bs = BeautifulSoup(xml, features='xml')\n\n chapter_files = []\n opf_dir = os.path.dirname(opf_file)\n\n # find all chapter html files\n for item in bs.package.manifest.findAll('item'): \n href = item['href']\n if href and href.endswith('html') or href.endswith(\"htm\"):\n chapter_files.append(os.path.join(opf_dir, href))\n print(\" + Found {} chapter html files.\".format(len(chapter_files)))\n\n # Init Chapter one by one\n self.chapters = []\n for chapter_file in chapter_files:\n self.chapters.append(Chapter(chapter_file))\n\n # outline guides\n self.guides = []\n for guide in bs.package.guide.findAll('reference'):\n href = guide['href']\n title = guide['title']\n self.guides.append([title, href])\n print(\" + Found {} guide references.\".format(len(self.guides)))\n\n def write_txt(self, fout):\n for chapter in self.chapters:\n chapter.write_txt(fout)\n\n def write_pdf(self, pdf_writer):\n pdf_writer.set_guides(self.guides)\n for chapter in self.chapters:\n chapter.write_pdf(pdf_writer)\n\nif __name__ == \"__main__\":\n c = Chapter(\"chapter2.html\")\n fout = open(\"tmp.txt\", 'w')\n c.write_txt(fout)\n fout.close()","repo_name":"huochaitiantang/epub2pdf","sub_path":"book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17311012898","text":"import os, sys\nfrom flask import Flask, request\nfrom utils import wit_response\nfrom pymessenger import Bot\nfrom pymessenger import Button\n\napp = Flask(__name__)\n\nPAGE_ACCESS_TOKEN = \"YOUR_PAGE_ACCESS_TOKEN\"\n\nbot = Bot(PAGE_ACCESS_TOKEN)\n\n@app.route(\"/\", methods=['GET'])\ndef verify():\n\tif request.args.get(\"hub.mode\") == \"subscribe\" and request.args.get(\"hub.challenge\"):\n\t\tif not request.args.get(\"hub.verify_token\") == \"hello\":\n\t\t\treturn \"Verification token mismatch\", 403\n\t\treturn request.args[\"hub.challenge\"], 200\n\treturn \"Hello world\", 200\n\n\n@app.route('/', methods=['POST'])\ndef webhook():\n\tdata = request.get_json()\n\tlog(data)\n\n\tif data['object'] == 'page':\n\t\tfor entry in data['entry']:\n\t\t\tfor messaging_event in entry['messaging']:\n\n\n\t\t\t\tsender_id = messaging_event['sender']['id']\n\t\t\t\trecipient_id = messaging_event['recipient']['id']\n\n\t\t\t\tif messaging_event.get('message'):\n\t\t\t\t\tif 'text' in messaging_event['message']:\n\t\t\t\t\t\tmessaging_text = messaging_event['message']['text']\n\t\t\t\t\telse:\n\t\t\t\t\t\tmessaging_text = 'no text'\n\n\t\t\t\t\tresponse = None\n\n\t\t\t\t\tentity, value = wit_response(messaging_text)\n\t\t\t\t\tif entity == 'wit_greetings':\n\t\t\t\t\t\tresponse = \"Hi, how are u?\"\n\t\t\t\t\telif entity == 'wit_mood':\n\t\t\t\t\t\tresponse = \"I am too, do u want some info about company?\"\n\t\t\t\t\telif entity == 'wit_confirmation':\n\t\t\t\t\t\tresponse = bot.send_button_message(sender_id, text, buttons)\n\t\t\t\t\telif entity == 'wit_negation':\n\t\t\t\t\t\tresponse = \"Okey, see u next time. Good bye!\"\n\n\t\t\t\t\tif response == None:\n\t\t\t\t\t\tresponse = \"Sry, but i dont understand\"\n\n\t\t\t\t\tbot.send_text_message(sender_id, response)\n\n\treturn \"ok\", 200\n\ndef log(message):\n\tprint(message)\n\tsys.stdout.flush()\n\n\nbuttons = []\nbutton = Button(title='Site', type='web_url', url='your site')\nbuttons.append(button)\ntext = \"Click here\"\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug = True, port = 80)","repo_name":"ZaharBoxing/FBBot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35709846332","text":"# https://atcoder.jp/contests/abc132/submissions/6251344\nimport numpy as np\nN, K = map(int, input().split())\nmod = 1000000007\n\nm = N ** 0.5\ncnt = [N // i - N // (i + 1) for i in range(1, int(m) + 1)]\ncnt = np.array((cnt + [1 for _ in range(N - sum(cnt))])[::-1])\nnxt = cnt[:]\nfor _ in range(K - 1):\n cnt = nxt * np.cumsum(cnt)[::-1] % mod\n\nprint(sum(cnt) % mod)\n","repo_name":"iwataka/atcoder","sub_path":"beginner/132/small-products.py","file_name":"small-products.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41855466058","text":"# Import packages\nimport os\nimport nipype.pipeline.engine as pe\nimport nipype.interfaces.utility as util\nimport scipy.ndimage as nd\nimport numpy as np\nimport nibabel as nb\n\ndef check_if_file_is_empty(in_file):\n \"\"\"\n Raise exception if regressor fie is empty.\n\n Parameters\n ----------\n\n in_file : nii file (string)\n regressor file\n\n Returns\n -------\n\n in_file : string\n return same file\n\n \"\"\"\n import nibabel as nb\n import numpy as np\n nii = nb.load(in_file)\n data = nii.get_data()\n if data.size == 0 or np.all(data==0) or np.all(data==np.nan):\n raise ValueError('File {0} is empty. Use a lower threshold or turn '\n 'off regressors.'.format(in_file))\n return in_file\n\n\ndef pick_wm_prob_0(probability_maps):\n\n \"\"\"\n Returns the csf probability map from the list of segmented probability maps\n\n Parameters\n ----------\n\n probability_maps : list (string)\n List of Probability Maps\n\n Returns\n -------\n\n file : string\n Path to segment_prob_0.nii.gz is returned\n\n \"\"\"\n\n if isinstance(probability_maps, list):\n if len(probability_maps) == 1:\n probability_maps = probability_maps[0]\n for filename in probability_maps:\n if filename.endswith(\"prob_0.nii.gz\"):\n return filename\n return None\n\n\ndef pick_wm_prob_1(probability_maps):\n\n \"\"\"\n Returns the gray matter probability map from the list of segmented probability maps\n\n Parameters\n ----------\n\n probability_maps : list (string)\n List of Probability Maps\n\n Returns\n -------\n\n file : string\n Path to segment_prob_1.nii.gz is returned\n\n \"\"\"\n\n if isinstance(probability_maps, list):\n if len(probability_maps) == 1:\n probability_maps = probability_maps[0]\n for filename in probability_maps:\n if filename.endswith(\"prob_1.nii.gz\"):\n return filename\n return None\n\n\ndef pick_wm_prob_2(probability_maps):\n\n \"\"\"\n Returns the white matter probability map from the list of segmented probability maps\n\n Parameters\n ----------\n\n probability_maps : list (string)\n List of Probability Maps\n\n Returns\n -------\n\n file : string\n Path to segment_prob_2.nii.gz is returned\n\n \"\"\"\n\n if isinstance(probability_maps, list):\n if len(probability_maps) == 1:\n probability_maps = probability_maps[0]\n for filename in probability_maps:\n if filename.endswith(\"prob_2.nii.gz\"):\n return filename\n return None\n\n\ndef pick_wm_class_0(tissue_class_files):\n\n \"\"\"\n Returns the csf tissu class file from the list of segmented tissue class files\n\n Parameters\n ----------\n\n tissue_class_files : list (string)\n List of tissue class files\n\n Returns\n -------\n\n file : string\n Path to segment_seg_0.nii.gz is returned\n\n \"\"\"\n\n if isinstance(tissue_class_files, list):\n if len(tissue_class_files) == 1:\n tissue_class_files = tissue_class_files[0]\n for filename in tissue_class_files:\n if filename.endswith(\"seg_0.nii.gz\"):\n return filename\n return None\n\n\ndef pick_wm_class_1(tissue_class_files):\n\n \"\"\"\n Returns the gray matter tissue class file from the list of segmented tissue class files\n\n Parameters\n ----------\n\n tissue_class_files : list (string)\n List of tissue class files\n\n Returns\n -------\n\n file : string\n Path to segment_seg_1.nii.gz is returned\n\n \"\"\"\n\n if isinstance(tissue_class_files, list):\n if len(tissue_class_files) == 1:\n tissue_class_files = tissue_class_files[0]\n for filename in tissue_class_files:\n if filename.endswith(\"seg_1.nii.gz\"):\n return filename\n return None\n\n\ndef pick_wm_class_2(tissue_class_files):\n\n \"\"\"\n Returns the white matter tissue class file from the list of segmented tissue class files\n\n Parameters\n ----------\n\n tissue_class_files : list (string)\n List of tissue class files\n\n Returns\n -------\n\n file : string\n Path to segment_seg_2.nii.gz is returned\n\n \"\"\"\n\n if isinstance(tissue_class_files, list):\n if len(tissue_class_files) == 1:\n tissue_class_files = tissue_class_files[0]\n for filename in tissue_class_files:\n if filename.endswith(\"seg_2.nii.gz\"):\n return filename\n return None\n\n# This functionality is adapted from poldracklab/niworkflows:\n# https://github.com/poldracklab/niworkflows/blob/master/niworkflows/interfaces/utils.py\n# https://fmriprep.readthedocs.io/\n# https://poldracklab.stanford.edu/\n# We are temporarily maintaining our own copy for more granular control.\n\ndef mask_erosion(roi_mask = None, skullstrip_mask = None, mask_erosion_mm = None, mask_erosion_prop = None):\n\n \"\"\"\n Returns eroded segment mask and skull-stripped brain mask\n\n Parameters\n ----------\n\n roi_mask : string\n Path to binarized segment mask\n\n skullstrip_mask : string\n Path to skull-stripped brain mask\n\n mask_erosion_prop : float\n Proportion of erosion skull-stripped brain mask\n\n Returns\n -------\n\n output_roi_mask : string\n Path to eroded segment mask\n\n eroded_skullstrip_mask : string\n Path to eroded skull-stripped brain mask\n\n \"\"\"\n skullstrip_mask_img = nb.load(skullstrip_mask)\n skullstrip_mask_data = skullstrip_mask_img.get_fdata()\n\n roi_mask_img = nb.load(roi_mask)\n roi_mask_data = roi_mask_img.get_fdata()\n erode_in = (mask_erosion_mm is not None and mask_erosion_mm > 0 or\n mask_erosion_prop is not None and mask_erosion_prop < 1 and mask_erosion_prop > 0)\n if erode_in:\n if mask_erosion_mm:\n iter_n = max(int(mask_erosion_mm / max(skullstrip_mask_img.header.get_zooms())), 1)\n skullstrip_mask_data = nd.binary_erosion(skullstrip_mask_data, iterations=iter_n)\n else :\n orig_vol = np.sum(skullstrip_mask_data > 0)\n while np.sum(skullstrip_mask_data > 0) / (orig_vol*1.0) > mask_erosion_prop :\n skullstrip_mask_data = nd.binary_erosion(skullstrip_mask_data, iterations=1)\n\n roi_mask_data[~skullstrip_mask_data] = 0\n\n hdr = roi_mask_img.get_header()\n output_roi_mask_img = nb.Nifti1Image(roi_mask_data, header=hdr,\n affine=roi_mask_img.get_affine())\n output_roi_mask = os.path.join(os.getcwd(), 'segment_tissue_eroded_mask.nii.gz')\n output_roi_mask_img.to_filename(output_roi_mask)\n\n hdr = skullstrip_mask_img.get_header()\n output_skullstrip_mask_img = nb.Nifti1Image(skullstrip_mask_data, header=hdr,\n affine=skullstrip_mask_img.get_affine())\n eroded_skullstrip_mask = os.path.join(os.getcwd(), 'eroded_skullstrip_mask.nii.gz')\n\n output_skullstrip_mask_img.to_filename(eroded_skullstrip_mask)\n\n return output_roi_mask, eroded_skullstrip_mask\n\n\n# This functionality is adapted from poldracklab/niworkflows:\n# https://github.com/poldracklab/niworkflows/blob/master/niworkflows/interfaces/utils.py\n# https://fmriprep.readthedocs.io/\n# https://poldracklab.stanford.edu/\n# We are temporarily maintaining our own copy for more granular control.\n\ndef erosion(roi_mask = None, erosion_mm = None, erosion_prop = None):\n\n\n \"\"\"\n Returns eroded tissue segment mask\n\n Parameters\n ----------\n\n roi_mask : string\n Path to binarized segment (ROI) mask\n\n erosion_prop : float\n Proportion of erosion segment mask\n\n Returns\n -------\n\n eroded_roi_mask : string\n Path to eroded segment mask\n\n \"\"\"\n\n roi_mask_img = nb.load(roi_mask)\n roi_mask_data = roi_mask_img.get_fdata()\n orig_vol = np.sum(roi_mask_data > 0)\n\n erode_out = (erosion_mm is not None and erosion_mm > 0 or\n erosion_prop is not None and erosion_prop < 1 and erosion_prop > 0)\n if erode_out:\n if erosion_mm:\n iter_n = max(int(erosion_mm / max(roi_mask_img.header.get_zooms())), 1)\n iter_n = int(erosion_mm / max(roi_mask_img.header.get_zooms()))\n roi_mask_data = nd.binary_erosion(roi_mask_data, iterations=iter_n)\n else:\n while np.sum(roi_mask_data > 0) / (orig_vol*1.0) > erosion_prop :\n roi_mask_data = nd.binary_erosion(roi_mask_data, iterations=1)\n\n hdr = roi_mask_img.get_header()\n output_img = nb.Nifti1Image(roi_mask_data, header=hdr,\n affine=roi_mask_img.get_affine())\n eroded_roi_mask = os.path.join(os.getcwd(), 'segment_tissue_mask.nii.gz')\n\n output_img.to_filename(eroded_roi_mask)\n\n return eroded_roi_mask\n\n\ndef hardcoded_antsJointLabelFusion(anatomical_brain, anatomical_brain_mask, template_brain_list, template_segmentation_list):\n \n \"\"\"\n run antsJointLabelFusion.sh\n\n Parameters\n ----------\n\n anatomical_brain : string (nifti file)\n Target image to be labeled.\n\n anatomical_brain_mask: string (nifti file)\n Target mask image\n\n template_brain_list: list\n Atlas to be warped to target image.\n\n template_segmentation_list: list \n Labels corresponding to atlas.\n\n Returns\n -------\n\n multiatlas_Intensity : string (nifti file)\n\n multiatlas_Labels : string (nifti file)\n\n\n \"\"\"\n\n import os\n import subprocess\n\n cmd = [\"${ANTSPATH}${ANTSPATH:+/}antsJointLabelFusion.sh\"] \n cmd.append(\" -d 3 -o ants_multiatlas_ -t {0} -x {1} -y b -c 0\".format(anatomical_brain, anatomical_brain_mask))\n \n if (not len(template_brain_list) == len(template_segmentation_list)):\n err_msg = '\\n\\n[!] C-PAC says: '\\\n 'Please check ANTs Prior-based Segmentation setting. ' \\\n 'For performing ANTs Prior-based segmentation method '\\\n 'the number of specified segmentations should be identical to the number of atlas image sets.'\\\n '\\n\\n'\n raise Exception(err_msg) \n else:\n for index in range(len(template_brain_list)):\n cmd.append(\" -g {0} -l {1}\".format(template_brain_list[index], template_segmentation_list[index]))\n\n # write out the actual command-line entry for testing/validation later\n command_file = os.path.join(os.getcwd(), 'command.txt')\n with open(command_file, 'wt') as f:\n f.write(' '.join(cmd))\n \n str = \"\"\n bash_cmd = str.join(cmd) \n\n try:\n retcode = subprocess.check_output(bash_cmd, shell=True) \n except Exception as e:\n raise Exception('[!] antsJointLabel segmentation method did not complete successfully.'\n '\\n\\nError details:\\n{0}\\n{1}\\n'.format(e, e.output))\n\n multiatlas_Intensity = None\n multiatlas_Labels = None\n\n files = [f for f in os.listdir('.') if os.path.isfile(f)]\n\n for f in files:\n if \"Intensity\" in f:\n multiatlas_Intensity = os.getcwd() + \"/\" + f\n if \"Labels\" in f:\n multiatlas_Labels = os.getcwd() + \"/\" + f\n\n if not multiatlas_Labels:\n raise Exception(\"\\n\\n[!] No multiatlas labels file found.\"\n \"antsJointLabelFusion may not have completed successfully.\\n\\n\")\n\n return multiatlas_Intensity, multiatlas_Labels\n\n\ndef pick_tissue_from_labels_file(multiatlas_Labels, csf_label, \n left_gm_label, left_wm_label,\n right_gm_label, right_wm_label):\n\n\n \"\"\"\n pick tissue mask from multiatlas labels file\n based off of FreeSurferColorLUT https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/AnatomicalROI/FreeSurferColorLUT\n or user provided label value\n\n Parameters\n ----------\n\n multiatlas_Labels : string (nifti file)\n\n csf_label: integer \n label value corresponding to CSF in multiatlas file\n\n left_gm_label: integer \n label value corresponding to Left Gray Matter in multiatlas file\n\n left_wm_label: integer \n label value corresponding to Left White Matter in multiatlas file\n\n right_gm_label: integer \n label value corresponding to Right Gray Matter in multiatlas file\n\n right_wm_label: integer \n label value corresponding to Right White Matter in multiatlas file\n\n Returns\n -------\n\n csf_mask : string (nifti file)\n\n gm_mask : string (nifti file)\n\n wm_mask : string (nifti file)\n\n \"\"\"\n import os\n import nibabel as nb\n import numpy as np\n\n img = nb.load(multiatlas_Labels)\n data = img.get_data()\n\n # pick tissue mask from multiatlas labels file\n # based off of FreeSurferColorLUT or user provided label values\n # hard-coded csf/gm/wm label values are based off of FreeSurferColorLUT\n\n csf = data.copy()\n if csf_label == None:\n csf[csf != 24] = 0\n csf[csf == 24] = 1\n else:\n csf[csf != csf_label] = 0\n csf[csf == csf_label] = 1\n\n gm = data.copy()\n if left_gm_label == None and right_gm_label == None:\n gm[np.logical_and(gm != 42, gm != 3)] = 0 \n gm[np.logical_or(gm == 42, gm == 3)] = 1\n else:\n gm[np.logical_and(gm != right_gm_label, gm != left_gm_label)] = 0 \n gm[np.logical_or(gm == right_gm_label, gm == left_gm_label)] = 1\n\n wm = data.copy()\n if left_wm_label == None and right_wm_label == None:\n wm[np.logical_and(wm != 41, wm != 2)] = 0\n wm[np.logical_or(wm == 41, wm == 2)] = 1\n else:\n wm[np.logical_and(wm != right_wm_label, wm != left_wm_label)] = 0\n wm[np.logical_or(wm == right_wm_label, wm == left_wm_label)] = 1\n\n \n save_img_csf = nb.Nifti1Image(csf, header=img.get_header(), affine=img.get_affine())\n save_img_gm = nb.Nifti1Image(gm, header=img.get_header(), affine=img.get_affine())\n save_img_wm = nb.Nifti1Image(wm, header=img.get_header(), affine=img.get_affine())\n\n save_img_csf.to_filename('csf_mask.nii.gz')\n save_img_gm.to_filename('gm_mask.nii.gz')\n save_img_wm.to_filename('wm_mask.nii.gz')\n\n csf_mask = os.path.join(os.getcwd(),'csf_mask.nii.gz')\n gm_mask = os.path.join(os.getcwd(),'gm_mask.nii.gz')\n wm_mask = os.path.join(os.getcwd(),'wm_mask.nii.gz')\n\n return csf_mask, gm_mask, wm_mask","repo_name":"persianyagami90xs/C-PAC","sub_path":"CPAC/seg_preproc/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":14246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"19049334107","text":"import json_parsing as jp\nimport proto_parsing as pp\nfrom tqdm import tqdm\nimport os\n\ndef compare_reading():\n js_list, pr_list = [], []\n for fname in tqdm(os.listdir('json_outputs'), postfix='json reading'):\n js_list.append(jp.read_json('json_outputs/' + fname))\n for fname in tqdm(os.listdir('proto_outputs'), postfix='proto reading'):\n pr_list.append(pp.read_proto('proto_outputs/' + fname))\n return js_list, pr_list\n\ndef compare_writing(js_list, pr_list):\n for js in tqdm(js_list, postfix='json writing'):\n jp.write_json(js, 'js_out.json')\n for pr in tqdm(pr_list, postfix='proto writing'):\n pp.write_proto(pr, 'pr_out.pb')\n\ndef main():\n js_list, pr_list = compare_reading()\n compare_writing(js_list, pr_list)\n\nif __name__ == '__main__':\n main()","repo_name":"sfcurre/bme_final_project","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22998140954","text":"import os\nimport unittest\n\nimport requests\n\nfeishu_webhook = os.getenv(\"TRUMPET_FEISHU_WEBHOOK\")\ndingtalk_webhook = os.getenv(\"TRUMPET_DINGTALK_WEBHOOK\")\n\n# https://ding-doc.dingtalk.com/doc#/serverapi2/qf2nxq\n# https://www.feishu.cn/hc/zh-cn/articles/360024984973-%E5%9C%A8%E7%BE%A4%E8%81%8A%E4%B8%AD%E4%BD%BF%E7%94%A8%E6%9C%BA%E5%99%A8%E4%BA%BA\n\n\nclass TestStringMethods(unittest.TestCase):\n def test_dingtalk_to_feishu(self):\n url = f\"http://127.0.0.1:8080/transformers/dingtalk-to-feishu?trumpet_to={feishu_webhook}\"\n cases = [\n {\"msgtype\": \"text\", \"text\": {\"content\": \"快乐小神仙\"}},\n {\n \"markdown\": {\n \"title\": \"哈哈哈 触发了 job test, 构建号:620\",\n \"text\": \"###### 项目 [Unob](https://coding.net/p/unob)\\n[哈哈哈](https://coding.net/u/ljaSkNTntD) 触发了 job \\n> [test](https://coding.net/p/unob/ci/job/260491) 构建号:[620](https://coding.net/p/proj/ci/job/260491/build/620/pipeline)\",\n },\n \"msgtype\": \"markdown\",\n },\n {\n \"markdown\": {\n \"text\": \"###### 项目 [Unob](https://coding.net/p/unob)\\n[哈哈哈](https://coding.net/u/ljaSkNTntD) 触发了 job \\n> [test](https://coding.net/p/unob/ci/job/260491) 构建号:[620](https://coding.net/p/proj/ci/job/260491/build/620/pipeline)\",\n },\n \"msgtype\": \"markdown\",\n },\n ]\n for case in cases:\n resp = requests.post(url, json=case)\n print(resp.status_code)\n print(resp.headers)\n print(resp.text)\n assert resp.ok\n\n def test_feishu_to_dingtalk(self):\n url = f\"http://127.0.0.1:8080/transformers/feishu-to-dingtalk?trumpet_to={dingtalk_webhook}\"\n cases = [{\"msg_type\": \"text\", \"content\": {\"text\": \"快乐小神仙\"}}]\n for case in cases:\n resp = requests.post(url, json=case)\n print(resp.status_code)\n print(resp.headers)\n print(resp.text)\n assert resp.ok\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"elonzh/trumpet","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"19412368344","text":"from google.appengine.ext import db\nfrom google.appengine.api import users\n\nimport cgi\nfrom datetime import datetime\nimport models\nimport views\nimport common\n\nclass CalendarHandler(views.BaseHandler):\n def get(self):\n \"\"\" Renders the Calendar page of the website \"\"\"\n calendars = models.Calendar.all()\n calendars.filter(\"owner = \", common.get_current_user())\n calendars.order(\"name\")\n self.render('calendar.html', 'calendar', {'calendars': calendars})\n\n\nclass CalendarCreateHandler(views.BaseHandler):\n def get(self):\n \"\"\" Renders the Create Calendar page of the website \"\"\"\n projects = models.Project.all()\n projects.filter(\"owner = \", common.get_current_user())\n projects.order(\"name\")\n \n rand_col = common.get_rand_colour()\n self.render('calendar-create.html', 'calendar', {'rand_col':rand_col,\n 'projects':projects})\n \n def post(self):\n \"\"\" Process the new Calendar data, to store in the database \"\"\"\n name = cgi.escape(self.request.get('name'))\n share_type = self.request.get('share_type')\n colour = cgi.escape(self.request.get('colour'))\n \n try:\n project = db.key(self.request.get('project'))\n except AttributeError:\n # Set project to None, as project was set to 'none' by user\n project = None\n \n # Save the data to the database\n calendar = models.Calendar()\n calendar.owner = common.get_current_user()\n calendar.name = name\n calendar.share_type = share_type\n calendar.colour = colour\n calendar.visible = True\n calendar.project = project\n calendar.put()\n\n # Redirect the user to the Calendar page after saving the calendar\n self.redirect('/calendar')\n\n\nclass CalendarModifyHandler(views.BaseHandler):\n def get(self, cal_key):\n \"\"\" Renders the Modify Calendar form to the user \"\"\"\n calendar = db.get(cal_key)\n \n projects = models.Project.all()\n projects.filter(\"owner = \", common.get_current_user())\n projects.order(\"name\")\n \n self.render('calendar-modify.html', 'calendar', {'projects':projects,\n 'calendar':calendar})\n \n def post(self, cal_key):\n \"\"\" Process the new Calendar data, to store in the database \"\"\"\n name = cgi.escape(self.request.get('name'))\n share_type = self.request.get('share_type')\n colour = cgi.escape(self.request.get('colour'))\n \n try:\n project = db.key(self.request.get('project'))\n except AttributeError:\n # Set project to None, as project was set to 'none' by user\n project = None\n \n # Save the data to the database\n calendar = db.get(cal_key)\n calendar.owner = common.get_current_user()\n calendar.name = name\n calendar.share_type = share_type\n calendar.colour = colour\n calendar.visible = calendar.visible\n calendar.project = project\n calendar.put()\n \n # Redirect the user to the Calendar page after saving the calendar\n self.redirect('/calendar')\n\n\nclass EventCreateHandler(views.BaseHandler):\n def get(self):\n \"\"\" Renders the Create Event form to the user \"\"\"\n calendars = models.Calendar.all()\n calendars.filter(\"owner = \", common.get_current_user())\n calendars.order(\"name\")\n \n self.render('event-create.html', 'calendar', {'calendars':calendars})\n \n def post(self):\n \"\"\" Process the Event data to store in the database \"\"\"\n name = cgi.escape(self.request.get('name'))\n start_date = self.request.get('start_date')\n start_time = self.request.get('start_time')\n end_date = self.request.get('end_date')\n end_time = self.request.get('end_time')\n calendar = db.Key(self.request.get('calendar'))\n location = cgi.escape(self.request.get('location'))\n notes = cgi.escape(self.request.get('notes'))\n sharing = self.request.get('sharing')\n \n if self.request.get('all_day') == \"1\":\n all_day = True\n start = start_date\n end = end_date\n time_format = \"%d/%m/%Y\"\n else:\n all_day = False\n start = \"%s %s\" % (start_date, start_time)\n end = \"%s %s\" % (end_date, end_time)\n time_format = \"%d/%m/%Y %I:%M%p\"\n\n # Change strings into datetime for database\n start_dt = datetime.strptime(start, time_format)\n end_dt = datetime.strptime(start, time_format)\n\n # Store the event in the database\n event = models.Event()\n event.name = name #db.StringProperty()\n event.start_time = start_dt #db.DateTimeProperty()\n event.end_time = end_dt #db.DateTimeProperty()\n event.all_day = all_day #db.BooleanProperty()\n event.calendar = calendar #db.ReferenceProperty()\n event.location = location #db.StringProperty(multiline=True)\n event.notes = notes #db.TextProperty()\n event.sharing = sharing #db.StringProperty()\n event.put()\n \n #self.response.out.write(start_dt)\n #self.response.out.write(\"  ,  \")\n #self.response.out.write(end_dt)\n #self.response.out.write(\"

\")\n #self.response.out.write((name, start_date, start_time, end_date, end_time))\n #self.response.out.write(str(all_day))\n \n # Redirect user to Calendar page after saving the event\n self.redirect('/calendar')\n \nclass EventModifyHandler(views.BaseHandler):\n def get(self, event_key):\n \"\"\" Render the Modify Event form to the user \"\"\"\n event = db.get(event_key)\n \n calendars = models.Calendar.all()\n calendars.filter(\"owner = \", common.get_current_user())\n calendars.order(\"name\")\n \n self.render('event-modify.html', 'calendar', {'event':event,\n 'calendars':calendars})\n\n def post(self, event_key):\n pass\n \n\nclass EventViewHandler(views.BaseHandler):\n def get(self, event_key):\n \"\"\" Render the Event Details page of the website \"\"\"\n event = db.get(event_key)\n calendar = db.get(event.calendar.key())\n \n self.render('event-view.html', 'calendar', {'event':event,\n 'calendar':calendar})\n ","repo_name":"joeladdison/schedule-pro","sub_path":"handlers/calendar.py","file_name":"calendar.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74812111946","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\nfrom localization import N_\nfrom outputable import Outputable\nfrom changes import FileDiff\nimport comment\nimport filtering\nimport interval\nimport missing\nimport subprocess\n\n__metric_eloc__ = {\"java\": 500, \"c\": 500, \"cpp\": 500, \"h\": 300, \"hpp\": 300, \"php\": 500, \"py\": 500, \"glsl\": 1000,\n \"rb\": 500, \"js\": 500, \"sql\": 1000, \"xml\": 1000}\n\nclass MetricsLogic:\n\tdef __init__(self):\n\t\tself.eloc = {}\n\t\tls_tree_r = subprocess.Popen(\"git ls-tree --name-only -r \" + interval.get_ref(), shell=True, bufsize=1,\n\t\t stdout=subprocess.PIPE).stdout\n\n\t\tfor i in ls_tree_r.readlines():\n\t\t\ti = i.strip().decode(\"unicode_escape\", \"ignore\")\n\t\t\ti = i.encode(\"latin-1\", \"replace\")\n\t\t\ti = i.decode(\"utf-8\", \"replace\").strip(\"\\\"\").strip(\"'\").strip()\n\n\t\t\tif FileDiff.is_valid_extension(i) and not filtering.set_filtered(FileDiff.get_filename(i)):\n\t\t\t\tif not missing.add(i):\n\t\t\t\t\tfile_r = open(i.strip(), \"rb\")\n\t\t\t\t\textension = FileDiff.get_extension(i)\n\t\t\t\t\tlines = MetricsLogic.get_eloc(file_r, extension)\n\n\t\t\t\t\tif __metric_eloc__.get(extension, None) != None and __metric_eloc__[extension] < lines:\n\t\t\t\t\t\tself.eloc[i.strip()] = lines\n\n\t@staticmethod\n\tdef get_eloc(file_r, extension):\n\t\tis_inside_comment = False\n\t\teloc_counter = 0\n\n\t\tfor j in file_r.readlines():\n\t\t\tj = j.decode(\"utf-8\", \"replace\")\n\t\t\t(_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, j)\n\n\t\t\tif not is_inside_comment and not comment.is_comment(extension, j):\n\t\t\t\teloc_counter += 1\n\n\t\treturn eloc_counter\n\nELOC_INFO_TEXT = N_(\"The following files are suspiciously big (in order of severity)\")\nMETRICS_MISSING_INFO_TEXT = N_(\"No metrics violations were found in the repository\")\n\nclass Metrics(Outputable):\n\tdef output_text(self):\n\t\tmetrics_logic = MetricsLogic()\n\n\t\tif not metrics_logic.eloc:\n\t\t\tprint(\"\\n\" + _(METRICS_MISSING_INFO_TEXT) + \".\")\n\t\telse:\n\t\t\tprint(\"\\n\" + _(ELOC_INFO_TEXT) + \":\")\n\t\t\tfor i in sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True):\n\t\t\t\tprint(i[1] + \" (\" + str(i[0]) + \" eloc)\")\n\n\tdef output_html(self):\n\t\tmetrics_logic = MetricsLogic()\n\t\tmetrics_xml = \"
\"\n\n\t\tif not metrics_logic.eloc:\n\t\t\tmetrics_xml += \"

\" + _(METRICS_MISSING_INFO_TEXT) + \".

\"\n\t\telse:\n\t\t\tmetrics_xml += \"

\" + _(ELOC_INFO_TEXT) + \".

\"\n\t\t\tfor i in sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True):\n\t\t\t\tmetrics_xml += \"

\" + i[1] + \" (\" + str(i[0]) + \" eloc)

\"\n\n\t\tmetrics_xml += \"
\"\n\t\tprint(metrics_xml)\n\n\tdef output_xml(self):\n\t\tmetrics_logic = MetricsLogic()\n\n\t\tif not metrics_logic.eloc:\n\t\t\tprint(\"\\t\\n\\t\\t\" + _(METRICS_MISSING_INFO_TEXT) + \"\\n\\t\")\n\t\telse:\n\t\t\teloc_xml = \"\"\n\t\t\tfor i in sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True):\n\t\t\t\teloc_xml += \"\\t\\t\\t\\t\\t\\n\"\n\t\t\t\teloc_xml += \"\\t\\t\\t\\t\\t\\t\" + i[1] + \"\\n\"\n\t\t\t\teloc_xml += \"\\t\\t\\t\\t\\t\\t\" + str(i[0]) + \"\\n\"\n\t\t\t\teloc_xml += \"\\t\\t\\t\\t\\t\\n\"\n\n\t\t\tprint(\"\\t\\t\\n\\t\\t\\t\\n\\t\\t\\t\\t\" + _(ELOC_INFO_TEXT) +\n\t\t\t \"\\n\\t\\t\\t\\t\\n\" + eloc_xml + \"\\t\\t\\t\\t\\n\\t\\t\\t\\n\\t\\t\")\n","repo_name":"geekan/cowry","sub_path":"code/python/gitstat/gitinspector-0.3.1/gitinspector/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"81"} +{"seq_id":"73269479304","text":"#diretorio da minha máquina\r\n\r\n\r\nimport openpyxl\r\n\r\nbook1 = openpyxl.Workbook()\r\n\r\nbook2 = openpyxl.load_workbook('dados.xlsx')\r\n#print(book2.sheetnames)\r\n\r\nfuncionarios = book1['Sheet']\r\n\r\ntabelinha = book2['Planilha1']\r\nprint\r\n\r\nlinhas = []\r\n #pegando os dados de linhas, atribuindo as linhas da tabela à lista \"linhas\"\r\nfor rows in tabelinha.iter_rows(min_row=2):\r\n linha = [] #executado a cada linha\r\n for cell in rows: #executado a cada celula\r\n linha.append(cell.value)\r\n linhas.append(linha)\r\n\r\nprint(linhas)\r\ndef tratar_funcionario():\r\n funcionarios.append([\"EMPRESA\", \"R.E\", \"NOME\", \"STATUS\", \"DOC P\"])\r\n documentos = [\"Ficha de registro\", \"Contrato de trabalho\", \"Docs Diversos\", \"Sindicato Carta\", \"VT Opção\", \"ASO\", \"LGPD\"]\r\n dados = []\r\n lista = []\r\n funcionario = []\r\n\r\n print('Digite os dados: ')\r\n\r\n empresa = str(input(\"Empresa: \\n-- > \"))\r\n dados.append(empresa)\r\n\r\n re = str(input(\"R.E: \\n-- > \"))\r\n dados.append(re)\r\n\r\n nome = str(input(\"Nome: \\n-- > \"))\r\n dados.append(nome)\r\n\r\n status = str(input(\"Status: \\n-- > \"))\r\n dados.append(status)\r\n\r\n for a in range(len(documentos)):\r\n lista = dados.copy()\r\n lista.append(documentos[a])\r\n funcionarios.append(lista)\r\n\r\nbook1.save('Tabela_de_teste_1.xlsx')","repo_name":"Lucaszeera/Projects","sub_path":"Back-End/Diretorio_python/criar_planilha.py","file_name":"criar_planilha.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72705388104","text":"import cv2\nfrom helper import (\n get_sample_paths,\n get_frames,\n get_calibration_markers_list,\n find_circle_marker,\n)\nfrom external.circle_detector import find_pupil_circle_marker\n\n\ndef get_raw_data(sample):\n for world_index, frame in enumerate(get_frames(sample)):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # ellipses_list = find_circle_marker(gray)\n ellipses_list = find_pupil_circle_marker(gray, scale=1)\n marker_list = []\n for ellipses_ in ellipses_list:\n ellipses = ellipses_[\"ellipses\"]\n img_pos = ellipses[0][0]\n marker_list.append(\n {\n \"world_index\": world_index,\n \"ellipses_center\": img_pos,\n \"marker_type\": ellipses_[\"marker_type\"],\n }\n )\n\n yield marker_list, frame\n\n\ndef draw(img):\n cv2.imshow(\"frame\", img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n return\n\n\nif __name__ == \"__main__\":\n # Point the camera to that marker to see if it detects the marker\n for marker_list, frame in get_raw_data(0):\n no_draw = False\n for marker in marker_list:\n center = (\n round(int(float(marker[\"ellipses_center\"][0]))),\n round(int(float(marker[\"ellipses_center\"][1]))),\n )\n frame = cv2.circle(frame, center, 10, (0, 0, 255), 2)\n draw(frame)\n no_draw = True\n if no_draw == False:\n draw(frame)\n","repo_name":"Lifestohack/eye_tracking","sub_path":"webcam_marker_detect.py","file_name":"webcam_marker_detect.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"799938235","text":"'''\nCreated on 05/08/2014\n\n@author: Ismail Faizi\n'''\nfrom protorpc import messages\nimport endpoints\nfrom models.i18n import Language\n\nclass OAuthInfo():\n CLIENT_IDS = [\n '1019526038686.apps.googleusercontent.com',\n '1019526038686-5jg4a5s4rrqfadms9cmmko6lnrqb80v6.apps.googleusercontent.com',\n endpoints.API_EXPLORER_CLIENT_ID\n ]\n SCOPES = [\n 'https://www.googleapis.com/auth/userinfo.email'\n ]\n AUDIENCES = CLIENT_IDS\n\nclass AdminUtils():\n\n @classmethod\n def calc_ingredient_name(cls, ingredient):\n label_name = ingredient.get_label_name()\n inci_name = None\n if len(ingredient.inci_names):\n inci_name = ingredient.inci_names[0]\n\n # build the name as \"(INCI name|Label name) E-number\"\n if len(ingredient.e_numbers) and not label_name and not inci_name:\n return [Language.get_by_code('en'),\n ingredient.e_numbers[0]]\n if len(ingredient.e_numbers) and inci_name:\n return [Language.get_nc_lang(),\n '(%s) %s' % (inci_name, ingredient.e_numbers[0])]\n if len(ingredient.e_numbers) and label_name:\n return [label_name.language.get(),\n '(%s) %s' % (label_name.name, ingredient.e_numbers[0])]\n\n # build the name as \"INCI name\"\n if inci_name:\n return [Language.get_nc_lang(),\n inci_name]\n\n # build the name as \"Label name\"\n if label_name:\n return [label_name.language.get(), \n label_name.name]\n\n return [Language.get_unknown(),\n 'UNKNOWN']\n\n### Cross-API Messages\nclass UserResponse(messages.Message):\n user_key = messages.StringField(1, required=True)\n\nclass LanguageResponse(messages.Message):\n language_key = messages.StringField(1, required=True)\n name = messages.StringField(2)\n code = messages.StringField(3)\n\n","repo_name":"shubashri/src","sub_path":"api/internal/admin/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14460390391","text":"def inicio():\n print(\"\"\"\n -------------------------------------------\n |Bienvenido al programa de saltos de linea|\n -------------------------------------------\n \n \"\"\")\n\n\ndef final():\n print(\"\"\"\n \n -----------------------------------------\n | Muchas gracias por usar el programa |\n | Contactame por @josuebarrenoz en |\n |Github, Telegram y otras redes sociales|\n -----------------------------------------\n \"\"\")\n \n \ndef run():\n inicio();\n archivo = str(input(\"Escribe el nombre del archivo con su extension: \"));\n ruta = f\"/home/wade/proy/experimentos/py3{archivo}\";\n with open(ruta, 'r', encoding=\"utf8\") as f:\n lines = f.readlines()\n\n #lines = [line.replace(r'\\n', ' ').replace(r' ', ' ') for line in lines];\n #print(lines);\n #lines = lines.rstrip()\n \n lines = [line.replace(\"\\n\", \" \") for line in lines];\n s = \"\".join(lines);\n \n with open(r'Resultados.txt', 'w', encoding=\"utf8\") as f:\n f.writelines(s)\n \n final();\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"josuebarrenoz/Experimentos","sub_path":"py3/salto.py","file_name":"salto.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31341595676","text":"import pytest\nimport numpy as np\nimport numpy.testing as npt\nimport ukat.utils.tools as tools\nfrom ukat.utils import arraystats\n\n\nclass TestConvertToPiRange:\n # Gold Standard = [mean, std, minimum, maximum]\n # Input: {np.arange(12).reshape((2, 2, 3)) - 6 * np.ones((2, 2, 3))}\n gold_standard = [-7.401486830834377e-17, 1.9718077939258474,\n -3.141592653589793, 3.1415926535897922]\n\n # Create arrays for testing\n array = np.arange(12).reshape((2, 2, 3)) - 6 * np.ones((2, 2, 3))\n array_positive = np.arange(12).reshape((2, 2, 3))\n array_negative = -np.arange(12).reshape((2, 2, 3))\n array_pi_range = array / 2\n\n def test_pi_range_result(self):\n pi_range_calculated = tools.convert_to_pi_range(self.array)\n stats = arraystats.ArrayStats(pi_range_calculated).calculate()\n npt.assert_allclose([stats[\"mean\"][\"3D\"], stats[\"std\"][\"3D\"],\n stats[\"min\"][\"3D\"], stats[\"max\"][\"3D\"]],\n self.gold_standard, rtol=1e-6, atol=1e-4)\n\n def test_if_ranges(self):\n # Test for values > 3.2\n result_over = tools.convert_to_pi_range(self.array_positive)\n assert np.amax(result_over) < np.amax(self.array_positive)\n # Test for values < -3.2\n result_under = tools.convert_to_pi_range(self.array_negative)\n assert np.amin(result_under) > np.amin(self.array_negative)\n # Test for values > 3.2 and < -3.2\n result_under_over = tools.convert_to_pi_range(self.array)\n assert np.amax(result_under_over) < np.amax(self.array)\n assert np.amin(result_under_over) > np.amin(self.array)\n\n def test_else_range(self):\n result = tools.convert_to_pi_range(self.array_pi_range)\n assert (result == self.array_pi_range).all()\n\n def test_input_array_type_assertion(self):\n # Empty array\n with pytest.raises(ValueError):\n tools.convert_to_pi_range(np.array([]))\n # No input argument\n with pytest.raises(TypeError):\n tools.convert_to_pi_range(None)\n # String\n with pytest.raises(TypeError):\n tools.convert_to_pi_range(\"abcdef\")\n\n\nclass TestResizeArray:\n # Create arrays for testing\n array_2d = np.arange(100).reshape((10, 10))\n array_3d = np.arange(500).reshape((10, 10, 5))\n array_4d = np.arange(5000).reshape((10, 10, 5, 10))\n\n def test_no_resize(self):\n resized_array_2d = tools.resize_array(self.array_2d)\n resized_array_3d = tools.resize_array(self.array_3d, factor=1)\n resized_array_4d = tools.resize_array(self.array_4d, target_size=10)\n assert (resized_array_2d == self.array_2d).all()\n assert (resized_array_3d == self.array_3d).all()\n assert (resized_array_4d == self.array_4d).all()\n\n def test_output_shapes(self):\n resized_array_1 = tools.resize_array(self.array_3d, factor=2)\n resized_array_2 = tools.resize_array(self.array_4d, target_size=20)\n assert np.shape(resized_array_1)[0] != 10\n assert np.shape(resized_array_1)[0] == 20\n assert np.shape(resized_array_1)[0] == np.shape(resized_array_1)[1]\n assert np.shape(resized_array_2)[0] != 10\n assert np.shape(resized_array_2)[0] == 20\n assert np.shape(resized_array_1)[0] == np.shape(resized_array_2)[0]\n assert np.shape(resized_array_1)[1] == np.shape(resized_array_2)[1]\n assert np.shape(resized_array_1)[2] != 10\n assert np.shape(resized_array_1)[2] == np.shape(self.array_3d)[2]\n assert np.shape(resized_array_2)[3] != 20\n assert np.shape(resized_array_2)[3] == np.shape(self.array_4d)[3]\n\n def test_input_array_type_assertion(self):\n # Empty array\n with pytest.raises(IndexError):\n tools.resize_array(np.array([]))\n # No input argument\n with pytest.raises(IndexError):\n tools.resize_array(None)\n # String\n with pytest.raises(IndexError):\n tools.resize_array(\"abcdef\")\n\n\nclass TestMaskSlices:\n shape = (2, 2, 3)\n # Create mask where all pixels from all slices are True\n full_mask = np.full(shape, True)\n # Create mask where only the pixels from slice index 1 are True\n one_slice_mask = np.full(shape, False)\n one_slice_mask[:, :, 1] = True\n # Create mask where only the pixels from slices index 1 and 2 are True\n two_slice_mask = np.full(shape, False)\n two_slice_mask[:, :, 1] = True\n two_slice_mask[:, :, 2] = True\n\n def test_single_slice(self):\n # #1: shape + single slice (int)\n final_mask = tools.mask_slices(self.shape, 1)\n assert (final_mask == self.one_slice_mask).all()\n\n def test_multiple_slices(self):\n # #2: shape + multiple slices (list)\n final_mask = tools.mask_slices(self.shape, [1, 2])\n assert (final_mask == self.two_slice_mask).all()\n\n def test_masked_single_slice(self):\n # #3: shape + single slice + mask\n final_mask = tools.mask_slices(self.shape, 1, self.full_mask)\n assert (final_mask == self.one_slice_mask).all()\n\n def test_masked_masked_slices(self):\n # #4: shape + multiple slices + mask\n final_mask = tools.mask_slices(self.shape, [1, 2], self.full_mask)\n assert (final_mask == self.two_slice_mask).all()\n\n def test_shape_assertion(self):\n # Wrong type `shape`\n non_tuple_shape = [2, 2, 3]\n with pytest.raises(ValueError):\n tools.mask_slices(non_tuple_shape, 1)\n # `mask` with different dimensions than `shape`\n mismatched_mask = np.full((2, 3, 3), True)\n with pytest.raises(AssertionError):\n tools.mask_slices(self.shape, 1, mismatched_mask)\n\n def test_slices_type_assertion(self):\n # Wrong type `slices`\n with pytest.raises(ValueError):\n tools.mask_slices(self.shape, np.array([1, 2]))\n # Not all elements of slices are `ints`\n with pytest.raises(ValueError):\n tools.mask_slices(self.shape, [1, 2.2])\n with pytest.raises(ValueError):\n tools.mask_slices(self.shape, 2.2)\n\n def test_slice_ranges_assertion(self):\n # Slices out of range tests\n with pytest.raises(ValueError):\n tools.mask_slices(self.shape, [0, 3])\n with pytest.raises(ValueError):\n tools.mask_slices(self.shape, 3)\n\n def test_mask_type_assertion(self):\n # Wrong dtype mask\n wrong_type_mask = \"not a numpy array\"\n with pytest.raises(AssertionError):\n tools.mask_slices(self.shape, 1, wrong_type_mask)\n\n def test_mask_dtype_assertion(self):\n # Wrong dtype mask\n wrong_dtype_mask = np.full(self.shape, 2)\n with pytest.raises(AssertionError):\n tools.mask_slices(self.shape, 1, wrong_dtype_mask)\n","repo_name":"UKRIN-MAPS/ukat","sub_path":"ukat/utils/tests/test_tools.py","file_name":"test_tools.py","file_ext":"py","file_size_in_byte":6805,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"38058549892","text":"import os, sys, argparse, json\nimport pandas as pd\nfrom datetime import datetime\n\nnow = datetime.now()\nparser = argparse.ArgumentParser(description='Extract top5 policies per state',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--path', nargs='?', default='schemes/', help='path where the schemes to be evaluated are stored')\nparser.add_argument('--output', nargs='?', default='output/', help='relative path where you want to store the output')\nparser.add_argument('--typ', nargs='?', default='mla', help='relative path where you want to store the output')\nargs = parser.parse_args()\n\nyears = ['2009', '2014', '2019']\nsheets = ['agriculture.xlsx', 'health.xlsx', 'humanDevelopment.xlsx']\nscheme_names = ['agriculture', 'health_hygiene', 'humanDevelopment']\ndef MPs(year, typ):\n\treadFrom = '../../'+ typ +'/output/' + typ + year + '.xlsx'\n\tprint(readFrom)\n\treader = pd.read_excel(readFrom, sheet_name = sheets)\n\tfor idx, sheet in enumerate(sheets):\n\t\tfor i in range(1, 6):\n\t\t\tfor j in range(reader[sheet][i].shape[0]):\n\t\t\t\tif not pd.isna(reader[sheet][i][j]): reader[sheet][i][j] = {eval(reader[sheet][i][j])[0]['Name '].strip(): eval(reader[sheet][i][j])[3]['Alliance'].strip()}\n\t\treader[scheme_names[idx]] = reader.pop(sheet)\n\treturn reader\n\n# df = MPs(years[0])\n# print(df)\n# print(df[scheme_names[0]][0].to_list())","repo_name":"Akshay4Gupta/Center-State_Policy_Emphasis","sub_path":"Analysis/statement/byStatements/readExcel.py","file_name":"readExcel.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6446761945","text":"import pandas as pd\r\nimport yfinance as yf\r\nimport numpy as np\r\n\r\nimport os\r\nfrom tqdm import tqdm\r\nimport time\r\nfrom itertools import combinations\r\n\r\nfrom arch import arch_model\r\n\r\n\r\n# Build optimizer\r\nclass Optimizer():\r\n def __init__(self, assets, interval='1wk', simulations=4000, maximize_return=True):\r\n # Define constants\r\n assets.sort()\r\n self.assets = assets\r\n self.SIMULATIONS = simulations\r\n self.COMBOS = list(combinations(assets, 2))\r\n self.maximize_return = maximize_return\r\n if interval=='1wk':\r\n self.N = 52\r\n else:\r\n self.N = 252\r\n \r\n # Gather asset data\r\n self.df = yf.download(tickers=assets, interval=interval)['Adj Close']\r\n self.df.ffill(inplace=True)\r\n self.df = np.log1p(self.df.pct_change())\r\n\r\n # Gather market data for beta calculations\r\n self.market = yf.download(tickers=assets+['^GSPC'], interval=interval)['Adj Close']\r\n self.market.ffill(inplace=True)\r\n self.market = np.log1p(self.market.pct_change())\r\n\r\n\r\n def garchModeling(self):\r\n garch = {}\r\n for asset in tqdm(self.assets):\r\n tseries = self.df[asset].dropna() * 100\r\n GARCH = arch_model(tseries, p=1,q=1, dist='t')\r\n output = GARCH.fit(disp='off')\r\n garch[asset] = output.conditional_volatility[-1] / 100\r\n\r\n return garch\r\n\r\n def optimize(self, garch):\r\n correlation_matrix = self.df.corr()\r\n portfolios = pd.DataFrame(\r\n columns=['Mu','Sigma','Sharpe','Beta'] + self.assets\r\n )\r\n\r\n for simulation in tqdm(range(self.SIMULATIONS)):\r\n WEIGHTS = np.random.dirichlet(np.ones(len(self.assets)),size=1).reshape(-1,)\r\n WEIGHTS = {a: b for a, b in zip(self.assets, WEIGHTS)}\r\n\r\n # Store weights\r\n portfolios.loc[simulation, self.assets] = np.round(pd.Series(WEIGHTS)*100,2)\r\n\r\n # Calculate portfolio expected return\r\n portfolio_return = np.sum(self.df.mean() * pd.Series(WEIGHTS))\r\n portfolio_return = (np.power((1+portfolio_return), self.N)-1) * 100\r\n portfolios.loc[simulation, 'Mu'] = np.round(portfolio_return, 2)\r\n\r\n # Begin calculating portfolio standard deviation\r\n portfolio_std = [WEIGHTS[_] * np.square(garch[_]) for _ in self.assets]\r\n\r\n # Further calculate portfolio standard deviation\r\n for combo in self.COMBOS:\r\n portfolio_std.append(\r\n 2 * WEIGHTS[combo[0]] * WEIGHTS[combo[1]] * correlation_matrix.loc[combo[0], combo[1]] * garch[combo[0]] * garch[combo[1]]\r\n )\r\n\r\n portfolio_std = np.sqrt(np.sum(portfolio_std))\r\n portfolio_std = (portfolio_std*np.sqrt(self.N))*100\r\n portfolios.loc[simulation, 'Sigma'] = np.round(portfolio_std, 2)\r\n\r\n # Calculate sharpe ratio\r\n sharpe = portfolio_return / portfolio_std\r\n portfolios.loc[simulation, 'Sharpe'] = np.round(sharpe, 2)\r\n\r\n # Calculate portfolio beta\r\n beta = np.sum(pd.Series(WEIGHTS) * np.array([self.market.cov().loc[asset,'^GSPC']/self.market['^GSPC'].var() for asset in self.assets]))\r\n portfolios.loc[simulation, 'Beta'] = np.round(beta,2)\r\n\r\n # Find the top 2.5% of portfolios\r\n if self.maximize_return:\r\n top_portfolios = portfolios.loc[(portfolios.Sharpe >= np.quantile(portfolios.Sharpe, 0.975)) & (portfolios.Mu >= np.quantile(portfolios.Mu, 0.975))]\r\n else:\r\n top_portfolios = portfolios.loc[(portfolios.Sharpe >= np.quantile(portfolios.Sharpe, 0.975)) & (portfolios.Sigma <= np.quantile(portfolios.Sigma, 0.025))]\r\n \r\n return top_portfolios\r\n\r\n\r\n def run(self, iterations=10):\r\n dataframe = pd.DataFrame()\r\n\r\n print('\\n')\r\n print('Modeling data.')\r\n time.sleep(1)\r\n garch = self.garchModeling()\r\n\r\n print('\\n') \r\n print('Gathering portfolios.')\r\n time.sleep(1)\r\n for x in range(iterations):\r\n top_portfolios = self.optimize(garch)\r\n dataframe = pd.concat([dataframe, top_portfolios])\r\n\r\n print('\\n') \r\n print(f'Saving data to {os.getcwd()}')\r\n time.sleep(1)\r\n dataframe.to_csv('portfolios.csv',index=False)\r\n \r\n print('Done.')","repo_name":"salguero23/Portfolio-Optimization-ALPHA","sub_path":"portfolioOptimizer.py","file_name":"portfolioOptimizer.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23981446244","text":"def solution(n):\n ele = list()\n if n == 1:\n return 1\n for i in range(1, int(n ** 0.5)+1):\n if n % i == 0:\n ele.append(i)\n ele.append(n // i)\n answer = sum(set(ele))\n return answer","repo_name":"Ui-Seok/Solve-Problems","sub_path":"프로그래머스/lv1/12928. 약수의 합/약수의 합.py","file_name":"약수의 합.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30733217295","text":"import json\nimport logging\nimport os\nfrom typing import Optional\n\nimport botocore\nfrom boto3.dynamodb.types import TypeDeserializer\nfrom boto3.dynamodb.types import TypeSerializer\nfrom pydantic import BaseModel\n\nfrom app.utility import init_logger\nfrom app.utility.aws import Aws\n\n\nclass LTIPlatformConfig(BaseModel):\n PK: str\n auth_token_url: str\n auth_login_url: str\n client_id: str\n lti_deployment_id: str\n iss: str\n key_set_url: str\n learn_application_key: Optional[str] = None\n learn_application_secret: Optional[str] = None\n\n\nclass LTIPlatformStorage:\n def __init__(self):\n self.TABLE_NAME = os.getenv(\"TABLE_NAME\")\n aws = Aws()\n self.ddbclient = aws.dynamodb\n\n def __new__(cls):\n if not hasattr(cls, \"instance\"):\n cls.instance = super(LTIPlatformStorage, cls).__new__(cls)\n return cls.instance\n\n\nclass LTIPlatform:\n def __init__(\n self,\n lti_storage: LTIPlatformStorage,\n config: Optional[LTIPlatformConfig] = None,\n ):\n init_logger(\"LTIPlatform\")\n self._storage: LTIPlatformStorage = lti_storage\n self.config = LTIPlatformConfig(\n auth_token_url=config.auth_token_url if config is not None else \"\",\n auth_login_url=config.auth_login_url if config is not None else \"\",\n client_id=config.client_id if config is not None else \"\",\n lti_deployment_id=config.lti_deployment_id if config is not None else \"\",\n iss=config.iss if config is not None else \"\",\n key_set_url=config.key_set_url if config is not None else \"\",\n PK=\"\",\n )\n\n def __log(self):\n return logging.getLogger(\"LTIPlatform\")\n\n def load(self, client_id: str, iss: str, lti_deployment_id: Optional[str]):\n response = self._storage.ddbclient.get_item(\n TableName=self._storage.TABLE_NAME,\n Key={\"PK\": {\"S\": f\"CONFIG#{client_id}#{iss}#{lti_deployment_id}\"}},\n )\n if \"Item\" in response is not None:\n deserializer = TypeDeserializer()\n record = deserializer.deserialize({\"M\": response[\"Item\"]})\n self.config = LTIPlatformConfig(**record)\n else:\n msg = f\"No PlatformConfig record found for CONFIG#{client_id}#{iss}#{lti_deployment_id}.\"\n self.__log().warning(msg)\n raise Exception(msg)\n return self\n\n def save(self):\n if (\n self.config is None\n or self.config.auth_token_url is None\n or self.config.auth_login_url is None\n or self.config.client_id is None\n or self.config.iss is None\n or self.config.key_set_url is None\n ):\n raise Exception(\"InvalidParameterException\")\n\n self.config.PK = f\"CONFIG#{self.config.client_id}#{self.config.iss}#{self.config.lti_deployment_id}\"\n try:\n serializer = TypeSerializer()\n item = serializer.serialize(self.config.dict())[\"M\"]\n self._storage.ddbclient.put_item(TableName=self._storage.TABLE_NAME, Item=item)\n except botocore.exceptions.ClientError as error:\n msg = f\"Error persisting PlatformConfig for {self.config.PK}. {json.dumps(error)}\"\n self.__log().error(msg)\n raise Exception(msg)\n return self\n","repo_name":"blackboard/BBDN-lti-1p3-tool-example","sub_path":"app/models/platform_config.py","file_name":"platform_config.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"26353708908","text":"from .base_producer import BaseProducer\n\nTOPIC = 'ECOMMERCE_REPORT_BATCH_REQUEST'\n\n\nclass BatchReportProducer(BaseProducer):\n def __init__(self, topic=TOPIC):\n super(BatchReportProducer, self).__init__(topic, 0)\n\n async def send(self, message):\n await self._send(message)","repo_name":"SergioVenicio/kafka-alura","sub_path":"app/producers/batch_report_producer.py","file_name":"batch_report_producer.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5969929942","text":"from functools import reduce\n\nlists = [100,1001,100]\n\nnumbs = lists\nlists = [100,1001,444]\nprint(numbs)\nprint(lists)\n\nlists2 = [1]\nprint(lists + lists2)\nprint(lists)\nprint(lists2)\n\nlistsSort = [\n {'age':28,'count':50,'name':'aaa'},\n {'age':70,'count':20,'name':'bbb'},\n {'age':90,'count':59,'name':'ccc'},\n {'age':17,'count':70,'name':'ddd'},\n {'age':13,'count':93,'name':'eee'},\n]\n\n# def callbacks(item):\n# return item['age'] #指定一个可以比较的值\n#\n# listsSort.sort(key=callbacks)\n\nlistsSort.sort(key=lambda item:item['age'])\n\nprint(listsSort)\n\n\n\nprint(list(filter(lambda item: item['age'] > 25, listsSort)))\n\n\ndef mapCallback(item):\n item['age'] += 1\n return item\n\n\nprint(list(map(mapCallback , listsSort)))\n\n\ndef reduceCount(x,y):\n return x+y['age']\nprint(reduce(reduceCount,listsSort,0)) #callback,可迭代对象,默认值 如果没有默认值从第0,1开始算,否则从默认,0开始算\n\nprint(abs(-4))","repo_name":"liaozhongxun/lzo-py-project","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4248790021","text":"# Try - ❌\n## 어려워...........\n\nfrom collections import deque\na = input()\n\nstack = []\nq = deque(a)\nop = []\n\nwhile q:\n print(op)\n print(stack)\n x = q.popleft()\n if x.isdigit():\n stack.append(x)\n if op:\n if op[-1] == '*' or op[-1] == '/':\n stack.append(op.pop())\n else:\n if x == ')':\n while True:\n l = op.pop()\n print(l)\n if l == '(':\n break\n else:\n stack.append(l)\n else:\n op.append(x)\n\nwhile op:\n stack.append(op.pop())\n \n \nprint(stack)\n\n# 첫번째 예제만 끼워 맞춤...\n","repo_name":"HyunlangBan/inflean_python_algorithm","sub_path":"section_5/후위표기식/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4123169615","text":"# Given a set of candidate numbers (candidates) (without duplicates) and a target number (target), find all unique\n# combinations in candidates where the candidate numbers sums to target.\n#\n# The same repeated number may be chosen from candidates unlimited number of times.\n#\n# Note:\n#\n# All numbers (including target) will be positive integers.\n# The solution set must not contain duplicate combinations.\n#\n# Example 1:\n# Input: candidates = [2,3,6,7], target = 7,\n# A solution set is:\n# [\n# [7],\n# [2,2,3]\n# ]\n#\n# Example 2:\n# Input: candidates = [2,3,5], target = 8,\n# A solution set is:\n# [\n# [2,2,2,2],\n# [2,3,3],\n# [3,5]\n# ]\n\n# *** returns number of ways to make combination\n# def combination_sum(candidatesr, target):\n# nrows = len(candidatesr) + 1\n# ncols = target + 1\n# dp = [[1] + [0] * target for _ in range(nrows)]\n#\n# for i in range(1, nrows):\n# for j in range(1, ncols):\n# if j >= candidatesr[i - 1]:\n# dp[i][j] = dp[i - 1][j] + dp[i][j - candidatesr[i - 1]]\n# else:\n# dp[i][j] = dp[i - 1][j]\n# for e in dp:\n# print(e)\n# return dp[-1][-1]\n\n\n# *** returns number of ways to make combination - simplified dp\n# def combination_sum(candidates, target):\n# nrows = len(candidates) + 1\n# ncols = target + 1\n# dp = [1] + [0] * target\n# print(dp)\n#\n# for i in range(1, nrows):\n# for j in range(1, ncols):\n# if j >= candidates[i - 1]:\n# dp[j] += dp[j - candidates[i - 1]]\n# print(dp)\n# return dp[-1]\n\n\n# *** returns the array of combinations\ndef combination_sum(candidates, target):\n nrows = len(candidates) + 1\n ncols = target + 1\n dp = [[[]]] + [[] for _ in range(target)]\n # print(dp)\n\n for i in range(1, nrows):\n for j in range(1, ncols):\n if j >= candidates[i - 1]:\n for e in dp[j - candidates[i - 1]]:\n dp[j].append(e + [candidates[i - 1]])\n # print(dp)\n return dp[-1]\n\n\ndef combinationSum(candidates, target):\n candidates, result = sorted(candidates), []\n\n def dfs(target, stack):\n # found a valid combination\n if target == 0:\n return result.append(stack)\n\n # for every candidate\n for candidate in candidates:\n if candidate > target:\n break # anything else would be negative (see sort)\n elif stack and candidate < stack[-1]:\n continue # don't allow dupes, take coins in non-decreasing order\n else:\n dfs(target - candidate, stack + [candidate])\n\n dfs(target, [])\n return result\n\n\n# print(combination_sum([2, 3, 6, 7], 7))\nprint(combination_sum([2, 3, 5], 8))\nprint(combinationSum([2, 3, 5], 8))\n","repo_name":"vpc20/python-dynamic-programming","sub_path":"CombinationSum.py","file_name":"CombinationSum.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38586685571","text":"# -*- coding:utf-8 -*-\r\n\"\"\" \r\n @author: xiaolinzi\r\n @file: data.py \r\n @time: 2018/05/07\r\n\"\"\"\r\nimport os\r\nimport random\r\n\r\ndef get_random(max_num,min_num):\r\n max_num = float(max_num)\r\n min_num = float(min_num)\r\n rap = max_num - min_num\r\n return random.random() * rap + min_num\r\n\r\ndef get_data(file_name):\r\n dir_name = os.path.dirname(__file__)\r\n file_path = os.path.join(dir_name, file_name)\r\n with open(file_path, 'r') as f:\r\n data = f.read()\r\n tmp_data = data.split()\r\n arr1 = tmp_data[:20] # 最大值\r\n arr2 = tmp_data[20:] # 最小值\r\n res = []\r\n for max_num, min_num in zip(arr1, arr2):\r\n result = get_random(max_num,min_num)\r\n res.append(round(result,5))\r\n\r\n return res\r\n\r\ndef save_data(file_name,data):\r\n dir_name = os.path.dirname(__file__)\r\n file_path = os.path.join(dir_name,file_name)\r\n str_res = ''\r\n for day in data:\r\n str_res += ','.join([str(tmp) for tmp in day]) + \",Happy\"+'\\n'\r\n with open(file_path,'w') as f:\r\n f.write(str_res)\r\n\r\n\r\nif __name__ == '__main__':\r\n file_name = 'happy2.txt'\r\n datas = []\r\n for _ in range(400):\r\n res = get_data(file_name)\r\n datas.append(res)\r\n\r\n file_name = 'data_happy2.txt'\r\n save_data(file_name,datas)\r\n\r\n","repo_name":"Capitalkiller/bishe","sub_path":"FinalFiles/模拟数据/makeData/makeData.py","file_name":"makeData.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17468641516","text":"from gym import spaces\nimport numpy as np\nimport random\nfrom itertools import groupby\nfrom itertools import product\n\n\n\nclass TicTacToe():\n\n def __init__(self):\n \"\"\"initialise the board\"\"\"\n \n # initialise state as an array\n self.state = [np.nan for _ in range(9)] # initialises the board position, can initialise to an array or matrix\n print('States initialized---',self.state)\n \n # all possible numbers\n self.all_possible_numbers = [i for i in range(1, len(self.state) + 1)] # , can initialise to an array or matrix\n print('Initialize all states---',self.all_possible_numbers)\n \n self.reset()\n\n\n\n def is_winning(self, curr_state):\n \"\"\"Takes state as an input and returns whether any row, column or diagonal has winning sum\n Example: Input state- [1, 2, 3, 4, nan, nan, nan, nan, nan]\n Output = False\"\"\"\n \n #print('Current State---', curr_state)\n if self.checkrow(curr_state) or self.checkcol(curr_state) or self.checkdiag(curr_state):\n print ('current state is a winning position')\n return True\n else:\n print ('current state is not a winning position')\n return False\n \n\n def is_terminal(self, curr_state):\n # Terminal state could be winning state or when the board is filled up\n\n if self.is_winning(curr_state) == True:\n return True, 'Win'\n\n elif len(self.allowed_positions(curr_state)) ==0:\n return True, 'Tie'\n\n else:\n return False, 'Resume'\n\n\n def allowed_positions(self, curr_state):\n \"\"\"Takes state as an input and returns all indexes that are blank\"\"\"\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]\n\n\n def allowed_values(self, curr_state):\n \"\"\"Takes the current state as input and returns all possible (unused) values that can be placed on the board\"\"\"\n\n used_values = [val for val in curr_state if not np.isnan(val)]\n agent_values = [val for val in self.all_possible_numbers if val not in used_values and val % 2 !=0]\n env_values = [val for val in self.all_possible_numbers if val not in used_values and val % 2 ==0]\n\n return (agent_values, env_values)\n\n\n def action_space(self, state):\n \"\"\"Takes the current state as input and returns all possible actions, i.e, all combinations of allowed positions and allowed values\"\"\"\n \n agent_actions = product(self.allowed_positions(state), self.allowed_values(state)[0])\n env_actions = product(self.allowed_positions(state), self.allowed_values(state)[1])\n #print('Action space: env ---',list(env_actions))\n return (agent_actions, env_actions)\n\n\n\n def state_transition(self, state, curr_action):\n \"\"\"Takes current state and action and returns the board position just after agent's move.\n Example: Input state- [1, 2, 3, 4, nan, nan, nan, nan, nan], action- [7, 9] or [position, value]\n Output = [1, 2, 3, 4, nan, nan, nan, 9, nan]\n \"\"\"\n #print('Current state-->',state, ' curr_action--->',curr_action)\n if np.isnan(state[curr_action[0]]):\n #print('In if')\n state[curr_action[0]]=curr_action[1] ### TODO: need to add +1\n print('After state transition-->',state)\n return state\n\n\n def step(self, curr_state, curr_action):\n \"\"\"Takes current state and action and returns the next state, reward and whether the state is terminal. Hint: First, check the board position after\n agent's move, whether the game is won/loss/tied. Then incorporate environment's move and again check the board status.\n Example: Input state- [1, 2, 3, 4, nan, nan, nan, nan, nan], action- [7, 9] or [position, value]\n Output = ([1, 2, 3, 4, nan, nan, nan, 9, nan], -1, False)\"\"\"\n reward=0\n stateTerm=False\n curr_state1=curr_state.copy()\n #print ('In step---curr_state->', curr_state, ' curr_state1->',curr_state1)\n result, result_val = self.is_terminal(curr_state1)\n print ('In step: initially - After state transition check state terminal-->',result, ' result_val-->',result_val)\n if result == False and result_val == 'Resume':\n new_state = self.state_transition(curr_state1, curr_action)\n result, result_val = self.is_terminal(new_state)\n print ('In step: after agent action - After state transition check state terminal-->',result, ' result_val-->',result_val)\n if result == True and result_val == 'Win':\n reward = 10\n stateTerm = True\n elif result == True and result_val == 'Tie':\n reward = 0\n stateTerm = True\n else:\n reward = -1\n stateTerm = False\n \n if stateTerm == False:\n agent_actions, env_actions = self.action_space(new_state)\n agent_actions_list = list(agent_actions) \n env_actions_list = list(env_actions)\n #print('Environment - action space---->',env_actions_list)\n new_env_action = env_actions_list[random.randrange(len(env_actions_list))]\n print ('Current action of Environment->', new_env_action)\n new_state = self.state_transition(new_state, new_env_action)\n result, result_val = self.is_terminal(new_state)\n print ('In step - after env action - After state transition check state terminal-->',result, ' result_val-->',result_val)\n if result == True and result_val == 'Win':\n reward = -10 ## Here env wins, agent loses\n stateTerm = True\n elif result == True and result_val == 'Tie':\n reward = 0\n stateTerm = True\n else:\n stateTerm = False\n #print('new state->', new_state,' reward->', reward, ' state terminal->',stateTerm, ' current_state->',curr_state)\n return (new_state, reward, stateTerm)\n\n def reset(self):\n return self.state\n \n \"\"\" New methods added \"\"\"\n def checkrow(self, state):\n if (state[0]+state[1]+state[2] == 15) or (state[3]+state[4]+state[5] == 15) or (state[6]+state[7]+state[8] == 15):\n return True\n else:\n return False\n \n def checkcol(self, state):\n if (state[0]+state[3]+state[6] == 15) or (state[1]+state[4]+state[7] == 15) or (state[2]+state[5]+state[8] == 15):\n return True\n else:\n return False\n \n def checkdiag(self, state):\n if (state[0]+state[4]+state[8] == 15) or (state[2]+state[4]+state[6] == 15):\n return True\n else:\n return False","repo_name":"datarpita/PythonWorkspace","sub_path":"ReinforcementLearning/Assignment/TCGame_Env1.py","file_name":"TCGame_Env1.py","file_ext":"py","file_size_in_byte":6844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13959084591","text":"#!/usr/bin/python\n\nimport requests\n#import anydbm\n#import jinja2\nimport json\nimport os\nimport time\nimport re\nimport sys\nimport logging\n\ncliqr_ccm = 'bds-ccc.auslab.cisco.com'\ncliqr_user = 'loyevans_e'\ncliqr_key = ''\n\n\ndeployment = sys.argv[1]\n\n#######################\n\ndef get_deployed_apps():\n\n r2 = 'curl -s -k -H \"Accept:application/json\" -H \"Content-Type:application/json\" -u ' + cliqr_user + ':' + cliqr_key + ' -X GET https://' + cliqr_ccm + '/v1/jobs/'\n f = os.popen(r2)\n result = f.read()\n f.close()\n formed = json.loads(result)\n\n s = '{\"deployed\" : [\\n'\n for i in formed['jobs']:\n if i['deploymentInfo'] == None:\n continue\n print (i)\n print (\"=====================================\")\n print (i['deploymentInfo'])\n s += ' {\"Name\": \"' + i['deploymentInfo']['deploymentName'] + '\", \"App\": \"' + i['appName'] + '\", \"Status\": \"' + i['deploymentInfo']['deploymentStatus'] + '\"},\\n'\n s = s[:-2] + '\\n'\n s += \"]}\\n\"\n\n return s \n\ndef get_deployed_app_ips(deployment):\n\n r2 = 'curl -s -k -H \"Accept:application/json\" -H \"Content-Type:application/json\" -u ' + cliqr_user + ':' + cliqr_key + ' -X GET https://' + cliqr_ccm + '/v1/jobs/'\n f = os.popen(r2)\n result = f.read()\n f.close()\n formed = json.loads(result)\n\n s = '{\"deployed\" : [\\n'\n for i in formed['jobs']:\n if i['deploymentInfo'] == None:\n continue\n if i['deploymentInfo']['deploymentName'] == deployment:\n r3 = 'curl -s -k -H \"Accept:application/json\" -H \"Content-Type:application/json\" -u ' + cliqr_user + ':' + cliqr_key + ' -X GET ' + i['resource']\n f2 = os.popen(r3)\n result = f2.read()\n f2.close()\n formed = json.loads(result)\n #print json.dumps(formed, indent=4)\n for j in formed['jobs']:\n for v in j['virtualMachines']:\n #print json.dumps(v, indent=4)\n hn = v['hostName']\n nis = \"\"\n for ni in v['nodeNetworkInterfaces']:\n nis = nis + ',' + ni['publicIPAddress']\n res = hn + nis\n print (res) \n\n\n #print i\n #print \"=====================================\"\n #print i['deploymentInfo']\n #s += ' {\"Name\": \"' + i['deploymentInfo']['deploymentName'] + '\", \"App\": \"' + i['appName'] + '\", \"Status\": \"' + i['deploymentInfo']['deploymentStatus'] + '\"},\\n'\n #s = s[:-2] + '\\n'\n #s += \"]}\\n\"\n\n #return s \n\n#######################\n\nif __name__ == '__main__':\n get_deployed_app_ips(deployment)\n","repo_name":"loyevans/scratch","sub_path":"get_c3_ips.py","file_name":"get_c3_ips.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"17583628250","text":"'''A module for working with float and string representations\n of angles'''\n\ndef dms(angle):\n '''Convert a float representing an angle to a string representing\n the degrees, minutes, and seconds of that angle'''\n degrees = str(int(angle))\n rem = angle % 1\n minutes = rem * 60\n rem = minutes % 1\n seconds = rem * 60\n minutes = str(int(minutes)).zfill(2)\n seconds = str(int(seconds)).zfill(2)\n\n return f\"{degrees}°{minutes}'{seconds}\\\"\"\n","repo_name":"T-monius/python-small-problems","sub_path":"easy-6/cute-angles/cute_angles.py","file_name":"cute_angles.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22180824977","text":"import pandas as pd\nimport numpy as np\nimport cPickle as pickle\nfrom flask import Flask, request, render_template, jsonify\nfrom scipy.spatial.distance import euclidean\nfrom flask_bootstrap import Bootstrap\nimport re\nfrom summa import textrank\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer\nimport essay_analysis as ea\nimport visualize as v\nimport seaborn as sns\n\napp = Flask(__name__)\nBootstrap(app)\n\nwith open('../data/app/vectorizer.pkl') as f:\n vectorizer = pickle.load(f)\nwith open('../data/app/tfidf_mat.pkl') as f:\n tfidf_mat = pickle.load(f)\nwith open('../data/app/nmf.pkl') as f:\n nmf = pickle.load(f)\n# with open('../data/app/model.pkl') as f:\n# my_model = pickle.load(f)\n\n# home page\n@app.route('/')\ndef index():\n\treturn render_template('index.html', title = 'Welcome')\n\n@app.route('/model', methods = ['GET', 'POST'])\ndef model():\n male = request.form.get('male')\n sat = request.form.get('sat')\n sat_times_taken = request.form.get('sat_times_taken')\n gpa = request.form.get('gpa')\n asian = request.form.get('asian')\n black = request.form.get('black')\n hispanic = request.form.get('hispanic')\n white = request.form.get('white')\n pacific = request.form.get('pacific')\n nativeam = request.form.get('nativeam')\n ecc = request.form.get('ecc')\n return render_template('model.html', sat = sat, sat_times_taken = sat_times_taken, gpa = gpa, male=male, asian=asian, black=black, hispanic=hispanic, white=white, pacific=pacific, nativeam=nativeam, ecc=ecc)\n\n@app.route('/model_results', methods = ['POST'])\ndef model_results():\n sat = request.form.get('sat')\n sat_times_taken = request.form.get('sat_times_taken')\n gpa = request.form.get('gpa')\n male = request.form.get('male')\n asian = request.form.get('asian')\n black = request.form.get('black')\n hispanic = request.form.get('hispanic')\n white = request.form.get('white')\n pacific = request.form.get('pacific')\n nativeam = request.form.get('nativeam')\n ecc = request.form.get('ecc')\n\n with open('../data/app/model.pkl') as f:\n my_model = pickle.load(f)\n df = pd.read_csv('../data/master.csv')\n master_cols = df.columns.values\n\n # Gender\n if male == '1':\n gender = 'Male'\n else:\n gender = 'Female'\n\n # Ethnicity\n ethnicity_vals = [asian, black, hispanic, white, pacific, nativeam]\n ethnicity_words = ['asian', 'black / african american', 'hispanic', 'white non-hispanic', 'native hawaiian / pacific islander', 'native american']\n ethnicity = ''\n for val,word in zip(ethnicity_vals, ethnicity_words):\n if val=='1':\n ethnicity += word + '|'\n\n X = pd.DataFrame([[np.nan for i in xrange(len(master_cols))]], columns=master_cols)\n X['Gender'] = gender\n X['Ethnicity'] = ethnicity\n X['Highest Composite SAT Score'] = float(sat)\n X['How many times did you take the official SAT?'] = sat_times_taken\n X['High School GPA'] = float(gpa)\n X['High School Extracurricular Activities'] = ecc\n\n prediction = (my_model.predict_proba(X)[0][1]*100).round(1)\n\n return render_template('model_results.html', prediction = prediction, sat_times_taken = sat_times_taken)\n\n@app.route('/analyzer', methods = ['GET', 'POST'])\ndef analyzer():\n essay = request.form.get('essay')\n similarity_tfidf = request.form.get('similarity_tfidf')\n similarity_nmf = request.form.get('similarity_nmf')\n return render_template('analyzer.html', essay = essay, similarity_tfidf = similarity_tfidf, similarity_nmf = similarity_nmf)\n\n@app.route('/analyzer_results', methods = ['GET', 'POST'])\ndef analyzer_results():\n essay = request.form.get('essay')\n similarity_nmf = request.form.get('similarity_nmf')\n similarity_tfidf = request.form.get('similarity_tfidf')\n\n # linebreak_idx = [m.start() for m in re.finditer('\\n', essay)]\n s_tokenizer = PunktSentenceTokenizer()\n sentences = s_tokenizer.tokenize(essay)\n top_sentences = textrank.summarize(essay).split('\\n')\n top_idx = []\n for i,sentence in enumerate(sentences):\n if sentence in top_sentences:\n top_idx.append(i)\n # Retokenize to get punctuation marks back\n sentences = s_tokenizer.tokenize(essay)\n sentences = list(enumerate(sentences))\n\n topics,similar_essays = processEssay(essay, similarity_nmf, similarity_tfidf, json_output=False)\n essay1 = similar_essays[0]\n essay2 = similar_essays[1]\n essay3 = similar_essays[2]\n topic1, topic2, topic3, topic4, topic5, topic6, topic7 = topics\n topic_names = ['Family', 'Music', 'Culture', 'Sport', 'Personal/Story', 'Science', 'Career']\n topic_tuples = zip(topic_names, topics)\n\n # Load interactive plot\n interactive_plot = interactivePlot()\n\n return render_template('analyzer_results.html', essay1 = essay1, essay2 = essay2, essay3 = essay3, topic_tuples = topic_tuples, sentences=sentences, top_idx=top_idx, interactive_plot=interactive_plot)\n\n@app.route('/analyzer/api/v1.0/similar_essays', methods = ['GET', 'POST'])\ndef json_results():\n essay = request.form.get('essay')\n similarity_nmf = request.form.get('similarity_nmf')\n similarity_tfidf = request.form.get('similarity_tfidf')\n similar_essays = processEssay(essay, similarity_nmf, similarity_tfidf, json_output=True)\n\n json_output = [{'id':userid, 'essay':content} for userid,content in similar_essays]\n\n return jsonify({'similar_essays':json_output})\n\n@app.route('/interactive_plot', methods = ['GET', 'POST'])\ndef interactive_plot():\n # Load interactive plot\n interactive_plot = interactivePlot()\n return interactive_plot\n\n####################\n## Helper functions\n\ndef processEssay(essay, similarity_nmf, similarity_tfidf, json_output):\n '''\n This method actually analyzes the essay and outputs a tuple of 3 values:\n 1) list of indices for the most 'representative' sentences\n 2) list of most similar essays (stored as tuples of (id,text))\n 3) list of topics\n\n Can be used by analyzer_results() or json_results()\n '''\n mat = vectorizer.transform([essay])\n mat_nmf = nmf.transform(mat)\n mat_nmf_wt = mat_nmf[0] / mat_nmf[0].sum() * 100\n topic1 = mat_nmf_wt[0].round(1)\n topic2 = mat_nmf_wt[1].round(1)\n topic3 = mat_nmf_wt[2].round(1)\n topic4 = mat_nmf_wt[3].round(1)\n topic5 = mat_nmf_wt[4].round(1)\n topic6 = mat_nmf_wt[5].round(1)\n topic7 = mat_nmf_wt[6].round(1)\n topics = (topic1, topic2, topic3, topic4, topic5, topic6, topic7)\n\n # Load in database of essays and topics\n df_essay = pd.read_csv('../data/app/essays_and_topics.csv')\n essays = df_essay['content'].values\n essays_id = df_essay['id'].values\n essays_dict = {essay:i for essay,i in zip(essays, essays_id)}\n topic_names = ['family', 'music', 'culture', 'sport', 'personal', 'science', 'career']\n topic_mat = df_essay.loc[:,topic_names].values\n\n # Get essays based on NMF euclidean distance or TFIDF cosine similarity\n tm = ea.TopicModeling()\n if similarity_nmf == '1':\n similar_essays = tm.similarEssaysNMF(essay, essays, topic_mat, vectorizer, nmf)\n essay1, essay2, essay3 = similar_essays\n elif similarity_tfidf == '1':\n similar_essays = tm.similarEssaysTfidf(essay, essays, tfidf_mat, vectorizer)\n essay1, essay2, essay3 = similar_essays\n\n # Get most 'representative' sentences in their own essay\n # UPDATE: this has been abandoned for the superior TextRank method\n # top_idx = tm.getBestSentence(essay, vectorizer, nmf)\n\n if json_output:\n similar_ids = [essays_dict[essay] for essay in similar_essays]\n return zip(similar_ids, similar_essays)\n else:\n return (topics,similar_essays)\n\ndef interactivePlot():\n # Load in database of essays and topics\n df_essay = pd.read_csv('../data/app/essays_and_topics.csv')\n topics = ['Personal/Story', 'Culture', 'Family', 'Music', 'Career', 'Science', 'Sport']\n summaries = df_essay['summary'].values\n pca1 = df_essay['pca1'].values\n pca2 = df_essay['pca2'].values\n labels = df_essay['topic_cluster_num']\n titles = df_essay['topic_cluster_text']\n cluster_names = {i:topic for i,topic in enumerate(topics)}\n\n html = v.plotEssays(pca1, pca2, labels, titles, cluster_names, ms=5, output='app')\n\n return html\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)\n","repo_name":"yungmsh/mining_the_common_app","sub_path":"code/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8350,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"17494242243","text":"from datetime import date\npessoa = {'Nome': str(input('Nome: ')).strip()}\nnasc = int(input('Ano de nascimento: '))\npessoa['Idade'] = date.today().year - nasc\npessoa['CTPS'] = int(input('Carteira de Trabalho: (0 não tem) '))\nif pessoa['CTPS'] != 0:\n pessoa['Contratação'] = int(input('Ano de contratação: '))\n pessoa['Sálario'] = float(input('Sálario: R$'))\n pessoa['Aposentadoria'] = (35 + pessoa['Contratação']) - nasc\nprint(\"-=\" * 30)\nfor k, v in pessoa.items():\n print(f' - {k} tem o valor {v}')\n","repo_name":"Julio-Cezar-Candido/CursoemVideo-Python-Exerc","sub_path":"Exercícios/ex092.py","file_name":"ex092.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"12629839928","text":"import math\n\n\ndef is_prime_number(num: int) -> bool:\n if num == 1:\n return False\n sqrt = int(math.sqrt(num))\n for i in range(2, sqrt + 1):\n if num % i == 0:\n return False\n return True\n\n\nif __name__ == '__main__':\n N = int(input())\n input_number_list = list(map(int, input().split()))\n\n ans = 0\n\n for input_number in input_number_list:\n if is_prime_number(input_number):\n ans += 1\n\n print(ans)\n","repo_name":"jh9875/PS","sub_path":"Python/BOJ/[1978] 소수 찾기/main_1.py","file_name":"main_1.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32210312260","text":"import numpy as np\nimport scipy.stats as stats\nfrom .matrix_normal_inverse_wishart import sample_matrix_normal_inverse_wishart_omit_b, sample_matrix_normal_inverse_wishart\n\n\ndef set_dimension_parameters(num_states=3, dim_y=2, T=1000, num_lags=2):\n '''\n Returns a dictionary with the specified parameters.\n\n num_states is the number of hmm states\n num_lags is the number of lags in the AR process\n T is the total length of the signal (in number of time bins)\n dim_y is the number of channels in the signal (e.g. 6 if we have 3 acc channels and 3 gyro channels)\n '''\n return {\"num_states\": num_states, \"num_lags\": num_lags, \"T\": T, \"dim_y\": dim_y}\n\n\ndef set_hyperparameters_default(dimensions, include_bias_term=False):\n '''\n Returns a dictionary with some default hyperparameter values\n\n '''\n\n num_lags, dim_y = dimensions[\"num_lags\"], dimensions[\"dim_y\"]\n # set kappa (stickiness), and note that it will not be learned\n kappa = 100\n\n # Autoregressive parameters\n # set nu0 (scalar) as in demo of pyhsmm_autoregressive)\n nu_0 = 5 #\n # set S0 (dim_y x dim_y)\n S_0 = 1 / 100 * np.eye(dim_y)\n # set M0\n M_0 = 0.25 * np.ones((dim_y, dim_y * num_lags))\n if include_bias_term:\n M_0 = np.hstack((M_0, np.zeros((dim_y, 1)))) # mean zero for the bias (b) term\n\n # set K0 (nlags*dim_y+1 x nlags*dim_y+1) the ARD prior, a diagonal matrix with entries drawn from invGamma(1/25, 1/25)\n if include_bias_term:\n k = stats.invgamma.rvs(a=1, scale=25, size=num_lags * dim_y + 1)\n else:\n k = stats.invgamma.rvs(a=1, scale=25, size=num_lags * dim_y)\n # k[num_lags*dim_y+1] = 0 # set last element to zero, so as not to shrink the affine contribution of b\n K_0 = np.diag(k)\n K_0_inv = np.linalg.inv(K_0)\n\n # Parameters that probably won't be learned\n # set gamma (scalar), or sample it from Gamma(1, 1/100). (Should this be learned?)\n gamma = stats.gamma.rvs(a=1, scale=100)\n # set alpha (scalar), or sample it from Gamma(1, 1/100). (Should this be learned?)\n alpha = stats.gamma.rvs(a=1, scale=100)\n\n true_hyperparameters = {\n \"kappa\": kappa,\n \"gamma\": gamma,\n \"alpha\": alpha,\n \"S_0\": S_0,\n \"M_0\": M_0,\n \"K_0\": K_0,\n \"K_0_inv\": K_0_inv,\n \"nu_0\": nu_0,\n }\n\n return true_hyperparameters\n\n\ndef set_parameters_from_model(dimensions, hyperparameters, include_bias_term=False):\n '''\n Draw samples\n '''\n\n\n num_states, dim_y, num_lags = (\n dimensions[\"num_states\"],\n dimensions[\"dim_y\"],\n dimensions[\"num_lags\"],\n )\n alpha, gamma, kappa = (\n hyperparameters[\"alpha\"],\n hyperparameters[\"gamma\"],\n hyperparameters[\"kappa\"],\n )\n nu_0, S_0, M_0, K_0 = (\n hyperparameters[\"nu_0\"],\n hyperparameters[\"S_0\"],\n hyperparameters[\"M_0\"],\n hyperparameters[\"K_0\"],\n )\n\n # set beta (num_states x 1) , needed for the mean of the transition matrix (creates sharing\n # between the transition probabilities from different states)\n beta = np.random.dirichlet(alpha=np.ones(num_states) * gamma / num_states)\n\n # set pi (num_states x num_states) , the transition matrix for states\n pi = np.zeros((num_states, num_states))\n for i in range(num_states):\n kappa_i = np.zeros((num_states,))\n kappa_i[i] = kappa\n pi[i] = np.random.dirichlet(alpha=alpha * beta + kappa_i)\n\n # Autoregressive parameters\n # set A, b, Sigma\n #print(\"calculating A, b, Sigma\")\n A = np.zeros((dim_y, dim_y * num_lags, num_states))\n if include_bias_term:\n b=[]\n Sigma = np.zeros((dim_y, dim_y, num_states))\n # A's taken from pyhsmm-autoregressive example\n As = [\n 0.99 * np.hstack((-np.eye(2), 2 * np.eye(2))),\n np.array(\n [\n [np.cos(np.pi / 6), -np.sin(np.pi / 6)],\n [np.sin(np.pi / 6), np.cos(np.pi / 6)],\n ]\n ).dot(np.hstack((-np.eye(2), np.eye(2))))\n + np.hstack((np.zeros((2, 2)), np.eye(2))),\n np.array(\n [\n [np.cos(-np.pi / 6), -np.sin(-np.pi / 6)],\n [np.sin(-np.pi / 6), np.cos(-np.pi / 6)],\n ]\n ).dot(np.hstack((-np.eye(2), np.eye(2))))\n + np.hstack((np.zeros((2, 2)), np.eye(2))),\n ]\n\n for i in range(num_states):\n if include_bias_term:\n _, b0, Sigma0 = sample_matrix_normal_inverse_wishart(nu_0, S_0, M_0, K_0)\n b.append(b0)\n else:\n A0, Sigma0 = sample_matrix_normal_inverse_wishart_omit_b(nu_0, S_0, M_0, K_0)\n #A[:, :, i] = As[i]\n A[:,:,i] = A0\n Sigma[:, :, i] = Sigma0\n\n parameters = {\"beta\": beta, \"pi\": pi, \"A\": A, \"Sigma\": Sigma}\n if include_bias_term:\n parameters[\"b\"] = b\n return parameters\n\n\ndef simulate_states(pi, T, initial_state=0):\n # returns a vector of states, and counts of each transition\n x = [initial_state]\n num_states = pi.shape[0]\n transition_counts = np.zeros(\n (num_states, num_states)\n ) # transition_counts(k,j) represents counts of transitionfs from state k to j\n for t in range(1, T):\n current_state = x[t - 1]\n next_state = np.random.choice(\n num_states, 1, replace=False, p=pi[current_state].flatten()\n )[0]\n x.append(next_state)\n transition_counts[current_state, next_state] += 1\n\n return x, transition_counts.astype(int)\n\n\ndef simulate_from_generative_model(\n dimensions, hyperparameters, parameters, initial_state=0\n):\n '''\n Simulate hmm states and AR signals, given the autoregressive parameters and markov transition matrix pi\n\n Returns\n -------\n x:\n state sequence\n y: \n AR-based emissions\n\n Notes\n -----\n If parameters has a key 'b', then it will be used as a bias term. (As in, y_t+1 = Ay_t + b).\n If parameters has no key 'b', no bias term is used (or b=0 is used)\n '''\n\n pi, A, Sigma = parameters[\"pi\"], parameters[\"A\"], parameters[\"Sigma\"]\n dim_y, num_lags, T, num_states = dimensions[\"dim_y\"], dimensions[\"num_lags\"], dimensions[\"T\"], dimensions[\"num_states\"]\n \n # if the bias term is included, get it, otherwise set it to all zeros\n b = parameters.get(\"b\", [np.zeros((1, dim_y))] * num_states)\n\n # sample x, the vector of hidden states (length T), each one an integer in 0,..,L-1\n x, transition_counts = simulate_states(pi, T, initial_state=initial_state)\n\n # Observations\n # initialize first few y's\n y = np.empty((dim_y, T))\n for t in range(num_lags):\n y[:, t] = stats.norm.rvs(size=dim_y)\n\n for t in range(num_lags, T):\n # TODO: memoize\n current_state = x[t]\n mean_y = np.matmul(\n A[:, :, current_state],\n (y[:, t - num_lags:t]).reshape(dim_y * num_lags, 1, order=\"F\"),\n ).flatten() # + b[current_state].flatten() # MODIFIED TO INCLUDE B\n ynew = stats.multivariate_normal.rvs(\n mean=mean_y, cov=Sigma[:, :, current_state]\n )\n y[:, t] = ynew\n\n return x, y, transition_counts\n\n\ndef show_samples_from_dynamics(\n A, Sigma, dimensions, hyperparameters, b=None, length_of_trace=10, num_traces=5, state=0\n):\n num_states = dimensions[\"num_states\"]\n dimensions[\"T\"] = length_of_trace\n parameters = {\"pi\": np.identity(num_states), \"A\": A, \"Sigma\": Sigma}\n if b is not None:\n parameters[\"b\"] = b\n x, y, _ = simulate_from_generative_model(\n dimensions, hyperparameters, parameters, initial_state=state\n )\n return x, y\n","repo_name":"karink520/sensor_util_ataxia_public","sub_path":"sensor_util/generative_model.py","file_name":"generative_model.py","file_ext":"py","file_size_in_byte":7570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74672290185","text":"import sys\nimport logging\nimport pprint\nimport datetime\nimport uuid\nimport os\nimport string\nimport requests\nimport requests_cache\nfrom BeautifulSoup import BeautifulSoup\n\n\"\"\" A class to search for British Airways/Oneworld award availability.\n\"\"\"\nclass BA2:\n \n debug_dir = u\"debug\"\n\n classAKA = {\n \"economy\": \"economy\",\n \"business\": \"business\",\n \"premium\": \"premium\",\n \"first\": \"first\",\n \"M\": \"economy\",\n \"S\": \"economy\",\n \"N\": \"economy\",\n \"O\": \"economy\",\n \"B\": \"economy\",\n \"H\": \"economy\",\n \"Y\": \"economy\",\n \"W\": \"premium\",\n \"T\": \"premium\",\n \"E\": \"premium\",\n \"C\": \"business\",\n \"J\": \"business\",\n \"R\": \"business\",\n \"F\": \"first\",\n \"A\": \"first\",\n }\n\n classesReal = {\n \"economy\": \"Economy\",\n \"premium\": \"Premium Economy\",\n \"business\": \"Business Class\",\n \"first\": \"First Class\",\n \"0\": \"Unknown Class\", # Our code used internally here\n }\n\n def __init__(self, debug=False, info=False):\n self.debug = debug\n self.logged_in = False\n self.b = None\n self.logger = logging.getLogger(\"ba\")\n\n requests_cache.install_cache(\"ba_cache\", backend=\"sqlite\", expire_after=60*14) # cache for 14 mins\n\n # ensure mechanize debug logging is on\n if self.debug:\n loggers = [\"mechanize\", \"mechanize.forms\", \"ROOT\"]\n for loggername in loggers:\n logger = logging.getLogger(loggername)\n logger.addHandler(logging.StreamHandler(sys.stdout))\n logger.setLevel(logging.DEBUG)\n\n if info:\n self.logger.addHandler(logging.StreamHandler(sys.stdout))\n self.logger.setLevel(logging.INFO)\n elif debug:\n self.logger.addHandler(logging.StreamHandler(sys.stdout))\n self.logger.setLevel(logging.DEBUG)\n\n def notify(self, notify):\n try:\n from pync import Notifier\n Notifier.notify(notify, title=\"Award Availability Finder\")\n except Exception as e:\n pass\n\n def write_html(self, html):\n \"\"\" Save HTML to the debug directory, ordered by time (UUID). \"\"\"\n if self.debug:\n if not os.path.exists(self.debug_dir):\n os.mkdir(self.debug_dir)\n f = open(self.debug_dir + os.sep + unicode(uuid.uuid1()) + u\".html\", \"w\")\n f.write(html)\n f.close\n self.cj.save()\n\n def lookup_one(self, src, dst, pax, cls, start, end):\n t = string.Template(\"http://www.baredemptionfinder.com/search?source=$src&destination=$dst&number_of_passengers=$pax&class=$cls\")\n url = t.safe_substitute(src=src,dst=dst,pax=pax,cls=cls)\n self.logger.info(\"Getting URL: {0}\".format(url))\n r = requests.get(url)\n html = r.text\n soup = BeautifulSoup(html)\n results = {}\n for div in soup.findAll(\"div\", {\"class\": \"outbound\"}):\n self.logger.info(\"Outbound\")\n for li in div.findAll(\"li\"):\n self.logger.info(\"Li {0}\".format(li.string))\n day, month, year = li.string.split(\" \")[0].split(\"/\")\n if (len(day) == 1):\n day = \"0\" + day\n if (len(month) == 1):\n month = \"0\" + month \n if (len(year) == 2): \n year = \"20\" + year\n date = year + month + day\n if (int(date) >= int(start) and int(date) <= int(end)):\n nicedate = year + \"/\" + month + \"/\" + day\n if (nicedate not in results.keys()):\n results[nicedate] = []\n results[nicedate].append({\"route\": [src, dst], \"class\": self.classesReal[cls]})\n return results\n\n def lookup_dates(self, from_code, to_code, dates, travel_class, adults, directonly):\n \"\"\" Lookup award availability for a date range. \"\"\"\n\n if \"-\" in dates:\n # multiple dates\n (start_date, end_date) = dates.split(\"-\")\n else:\n # single date\n start_date = dates\n end_date = dates\n\n # multiple departure airports\n from_codes = from_code.split(\",\")\n\n # multiple destinations\n to_codes = to_code.split(\",\")\n\n # multiple classes\n travel_classes = travel_class.split(\",\")\n\n # parse the strings to YYYYMMDD\n start_date = datetime.datetime.strptime(start_date, \"%d/%m/%Y\").strftime(\"%Y%m%d\")\n end_date = datetime.datetime.strptime(end_date, \"%d/%m/%Y\").strftime(\"%Y%m%d\")\n\n results = {}\n sofar = 0\n for current_from_code in from_codes:\n for current_to_code in to_codes:\n for current_travel_class in travel_classes:\n code = self.classAKA[current_travel_class]\n self.logger.info(\"Checking {0}-{1}, {2} ({3} seats)\".format(current_from_code, current_to_code, code, adults))\n result = self.lookup_one(current_from_code, current_to_code, adults, code, start_date, end_date)\n count = len(result)\n sofar += count\n self.logger.info(\"... {0} flights ({1} total)\".format(count, sofar))\n if count > 0:\n self.notify(\"Found {0} flight(s) {1}-{2}\".format(count,current_from_code,current_to_code))\n for day in result.keys():\n if day not in results:\n results[day] = []\n results[day].extend(result[day])\n return results\n\n\n def format_results(self, results):\n \"\"\" Format a structured dict of flight optons (from parse_flights) into a human-readable list. \"\"\"\n if self.debug:\n self.logger.debug(\"Formatting these results: \")\n simple = pprint.pformat(results, indent=2)\n self.logger.debug(simple)\n\n lines = \"\"\n dates = sorted(results) # order the dates\n for date in dates:\n for result in results[date]:\n lines += date +\" \"\n lines += \"-\".join(result['route']) + \" \"\n lines += \"(\" + result['class'] + \")\"\n lines += \"\\n\" # flight newline\n return lines\n\n","repo_name":"danielsmith-eu/britishairways-awards-tool","sub_path":"awards/ba2.py","file_name":"ba2.py","file_ext":"py","file_size_in_byte":6349,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"22660745010","text":"import discord\nfrom key.key import credientals\n\n\nclass Myclient(discord.Client):\n async def on_ready(self):\n print('Logged on as {0}!'.format(self.user))\n\n async def on_message(self, message):\n print('Message from {0.author}: {0.content}'.format(message))\n\n\nif __name__==\"__main__\":\n client = Myclient()\n client.run(credientals['key'])","repo_name":"omkumar01/discord-gugi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"71981284105","text":"#Program to check whether a given date is valid. Date must match this format: DD/MM/YYYY\n\nimport re\n\ndate_regex = re.compile(r'([0-2]\\d|30|31)/(0[1-9]|1[0-2])/((1|2)\\d{3})')\nmonths30 = ('04', '06', '09', '11')\n\ndef isleap(year):\n if year % 400 == 0 or (year % 4 == 0 and year % 100 != 0):\n return True\n else:\n return False\n \ndef isvalid_date(date):\n mo = date_regex.search(date)\n if mo is None:\n print('Invalid: Date format must be DD/MM/YYYY')\n return False\n day, month, year, _ = mo.groups()\n if int(day) > 29 and month == '02' and isleap(int(year)):\n print('Invalid: February has 29 days in leap years')\n return False\n elif int(day) > 28 and month == '02' and not isleap(int(year)):\n print('Invalid: February has 28 days in non-leap years')\n return False\n elif int(day) > 30 and month in months30:\n print('Invalid: This month has 30 days')\n return False\n elif int(day) > 31 and month not in months30:\n print('Invalid: This month has 31 days')\n return False\n else:\n print('Valid')\n return True\n\nisvalid_date('29/02/2024')","repo_name":"lucasadelino/Learning-Python","sub_path":"Automate the Boring Stuff Practice Projects/Chapter 7/datedetector.py","file_name":"datedetector.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19645752752","text":"\n\n\n\nwith open('final_catalog.txt','w') as f:\n f.write('system'+'\\t'+\n 'Porb'+'\\t'+\n 'e'+'\\t'+\n 'sigma_e'+'\\t'+\n 'primary_mass'+'\\t'+\n 'mass_function'+'\\t'+\n 'P_star'+'\\t'+\n 'Pstar_error'+'\\n')\n\n\n with open('Table1.txt','r') as f1:\n next(f1)\n for lines in f1:\n x=lines.split()\n system=x[0]\n Porb=float(x[4])\n e=float(x[5])\n sigma_e=float(x[6])\n primary_mass=float(x[9])\n P_star=float(x[7])\n Pstar_error=float(x[8])\n with open('rv_fit.txt','r') as f2:\n next(f2)\n for lines in f2:\n y=lines.split()\n if y[0]==system:\n K=float(y[1])\n ecc=float(y[2])\n f_M=Porb*K*K*K*((1-ecc*ecc)**1.5)*1.03e-7\n break\n f.write(system+'\\t'+\n repr(Porb)+'\\t'+\n repr(e)+'\\t'+\n repr(sigma_e)+'\\t'+\n repr(primary_mass)+'\\t'+\n repr(f_M)+'\\t'+\n repr(P_star)+'\\t'+\n repr(Pstar_error)+'\\n')\n","repo_name":"ruskin23/QstarFromTidalSynchronization","sub_path":"MMS06_binaries/create_catalog.py","file_name":"create_catalog.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11138563101","text":"import random\r\nfrom SM3 import SM3,sm3hex\r\nfrom SM2 import SM2\r\nimport json\r\nfrom Crypto.Cipher import AES\r\nimport binascii\r\n\r\nclass PGP:\r\n def __init__(self):\r\n self.sm2 = SM2()\r\n self.sm2.init_keys()\r\n self.bs = 16\r\n self.PADDING = lambda s: s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)\r\n self.sk = self.sm2.sk\r\n self.pk = self.sm2.pk\r\n\r\n\r\n def digest(self,msg):\r\n hash = sm3hex(bin(int(msg.hex(),base=16))[2:])\r\n return hash\r\n\r\n def send(self,origin,pk):\r\n hash = self.digest(origin.encode())\r\n mac = self.sm2.sign(hash.encode())\r\n send_msg = origin+'#'+json.dumps(mac, ensure_ascii=True)\r\n text = send_msg\r\n key = b'1234567890abcdef'\r\n aes = AES.new(key, AES.MODE_ECB)\r\n cipher = aes.encrypt(self.PADDING(text).encode())\r\n en_key = self.sm2.encrypt(key.hex()[2:],pk)\r\n return (en_key,cipher)\r\n\r\n def recv(self,kc,pk):\r\n en_key,cipher = kc\r\n key = b'1234567890abcdef'\r\n aes = AES.new(key, AES.MODE_ECB)\r\n plain = aes.decrypt(cipher)\r\n origin_, mac_ = plain.decode().split('#')\r\n mac_ = json.loads(mac_[:-ord(mac_[-1])])\r\n hash = self.digest(origin_.encode())\r\n if self.sm2.verify(hash.encode(),mac_[0],mac_[1],pk) == False:\r\n return False\r\n return origin_\r\n\r\nif __name__ == \"__main__\":\r\n A = PGP()\r\n B = PGP()\r\n bPk = '%064x%064x' % (int(B.pk.x), int(B.pk.y))\r\n aPk = '%064x%064x' % (int(A.pk.x), int(A.pk.y))\r\n email = \"ToDifficult\" #模拟发送的邮件\r\n t = A.send(email, bPk)# t为A发送的数据流\r\n print(t) #打印数据流\r\n print(B.recv(t, aPk))# B解析并解密数据","repo_name":"Zuni-W/course-record","sub_path":"nsp_SM2_imple/PGP.py","file_name":"PGP.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1148626885","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom places.models import Airline, Country, Park, CountryIndex, Animal, Activity\nfrom photos.models import Photo, Comment\nfrom operators.models import TourOperator\nfrom django.conf import settings\nfrom analytics.models import Action\nfrom django.core.files import File\nfrom photos.models import Tag\nimport MySQLdb\nfrom analytics.models import Analytic\nfrom django.contrib.auth.models import User\nfrom django.utils.timezone import make_aware\nimport os.path\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom shutil import copyfile\nimport sys\nimport hashlib\nfrom datetime import datetime\nfrom django.utils import timezone\nimport pytz\n\n\nclass Command(BaseCommand):\n help = 'Import photos'\n\n def add_arguments(self, parser):\n parser.add_argument('-db_host', required=True, type=str)\n parser.add_argument('-db_name', required=True, type=str)\n parser.add_argument('-db_user', required=True, type=str)\n parser.add_argument('-db_pass', required=True, type=str)\n parser.add_argument('-base_location', required=True, type=str)\n\n def handle(self, *args, **options):\n # not allowed in production\n users = {}\n if not settings.DEBUG:\n self.stdout.write(self.style.ERROR(\"DEBUG is off\"))\n db = MySQLdb.connect(\n host=options['db_host'], db=options['db_name'], user=options['db_user'], password=options['db_pass'])\n cursor = db.cursor(MySQLdb.cursors.DictCursor)\n created, updated = 0, 0\n cursor.execute(\"select COUNT(*) as count FROM photo\")\n r = cursor.fetchone()\n count = r['count']\n Photo.objects.all().delete()\n cursor.execute(\"\"\"select \n photo.*,\n touroperator.name as tour_operator,\n countryindex.country_name as countryindex,\n park.park_name as park,\n activity.activity_name as activity,\n email_address\n from \n photo \n LEFT JOIN touroperator ON photo.touroperator_id = touroperator.id\n LEFT JOIN user ON user.id = photo.user_id\n LEFT JOIN countryindex ON photo.countryindex_id = countryindex.id\n LEFT JOIN park ON photo.park_id = park.id\n LEFT JOIN activity ON photo.activity_id = activity.id\n \"\"\")\n result = cursor.fetchall()\n skipped = 0\n smaller = 0\n hashes = {}\n for c in result:\n tour_operator_name = c['tour_operator']\n tour_operator_obj = False\n if tour_operator_name:\n try:\n tour_operator_obj = TourOperator.objects.get(\n name=tour_operator_name)\n except ObjectDoesNotExist:\n print('Tour ' + str(c['tour_operator']) + 'doesnotexists')\n continue\n username = c.pop('email_address')\n try:\n user = User.objects.get(username=username)\n except ObjectDoesNotExist:\n user = None\n pass\n newdict = {}\n newdict['uuid_value'] = c.pop('uuid')\n if not newdict['uuid_value']:\n print(\"UUID is null\", c.pop(\"id\"))\n continue\n newdict['caption'] = c.pop('caption')\n newdict['date_created'] = make_aware(c.pop('date_created'))\n newdict['user'] = user\n date_modified = c.pop('date_modified')\n if date_modified:\n date_modified = make_aware(date_modified)\n newdict['date_modified'] = date_modified\n if tour_operator_obj:\n newdict['tour_operator'] = tour_operator_obj\n if c['countryindex']:\n newdict['country_index'] = CountryIndex.objects.get(\n name=c.pop('countryindex'))\n if c['park']:\n newdict['park'] = Park.objects.get(\n name=c.pop('park'))\n if c['activity']:\n newdict['activity'] = Activity.objects.get(\n name_old=c.pop('activity'))\n uuid = newdict.pop('uuid_value')\n print(uuid)\n obj, is_created = Photo.objects.update_or_create(\n uuid_value=uuid, defaults=newdict\n )\n if is_created:\n created += 1\n else:\n # obj.image.delete(save=True)\n print('updated')\n updated += 1\n #idk why uuid is changed\n obj.uuid_value = uuid\n tags = c['tags']\n if tags != None and tags != '':\n tags = tags.split(',')\n for tag in tags:\n if tag != '':\n o_tag, _ = Tag.objects.update_or_create(name=tag[:999])\n obj.tags.add(o_tag)\n\n if c['gallery_path']:\n cur_image_path = '%sroot%s' % (\n options['base_location'], c['gallery_path'],)\n \n if os.path.exists(cur_image_path):\n filename, file_extension = os.path.splitext(cur_image_path)\n openedFile = open(cur_image_path, 'rb')\n readFile = openedFile.read()\n md5Hash = hashlib.md5(readFile)\n md5Hashed = md5Hash.hexdigest()\n if md5Hashed in hashes:\n skipped += 1\n print('uuid duplicated file ' + str(obj.uuid_value))\n obj.date_deleted = timezone.now()\n hashes[md5Hashed] = 1\n\n month = newdict['date_created'].month\n year = newdict['date_created'].year\n image_path = \"/images/photos/%s/%s/\" % (year, month)\n os.makedirs(settings.MEDIA_ROOT +\n image_path, exist_ok=True)\n image_path += (\"image_%d%s\" %\n (obj.id, file_extension.lower()))\n dst_image_path = settings.MEDIA_ROOT + image_path\n copyfile(cur_image_path, dst_image_path)\n obj.image = image_path[1:]\n if obj.image.width < 300 and obj.image.height < 300:\n smaller += 1\n print(\"image %s is skipped by its size %d ,%d\" %\n (obj.image, obj.image.width, obj.image.height))\n continue\n SQL = \"\"\"\n select animal.animal_name\n FROM\n photo_animal, animal\n WHERE\n photo_animal.animal_id = animal.id AND\n photo_animal.photo_id = '%i'\n \"\"\"\n cursor.execute(SQL % c['id'])\n result_ = cursor.fetchall()\n for c_ in result_:\n animal = Animal.objects.get(name=c_['animal_name'])\n obj.animals.add(animal)\n\n\n\n # comments\n cursor.execute(\"\"\"\n SELECT\n photocomment.*, user.email_address as email_address\n FROM\n photocomment\n LEFT JOIN user ON user.id = photocomment.user_id\n WHERE\n photocomment.photo_id = '%i'\n \"\"\" % c['id'])\n result_ = cursor.fetchall()\n for c_ in result_:\n Comment.objects.filter(content_type__model='Photo').delete()\n comment = Comment()\n comment.comment = c_.pop('photo_comment')\n email_add = c_.pop('email_address')\n try:\n user = User.objects.get(username=email_add)\n except ObjectDoesNotExist:\n print(\"User\" + email_add + \"dont exists\")\n continue\n comment.user = user\n comment.date_created = make_aware(c_.pop('timestamp'))\n comment.content_object = obj\n comment.save()\n\n # photo kudus\n cursor.execute(\"\"\"\n SELECT \n photolike.timestamp,\n user.email_address as email_address\n FROM\n photolike\n LEFT JOIN user ON user.id = photolike.user_id\n WHERE\n photolike.photo_id = %s\n \"\"\" % c['id'])\n result = cursor.fetchall()\n for c_ in result:\n newdict_ = {}\n newdict_['content_object'] = obj\n newdict_['date_created'] = make_aware(c_.pop('timestamp'))\n newdict_['action_type'] = Action.KUDU\n email_add = c_.pop('email_address')\n if email_add != None:\n newdict_['user'] = User.objects.get(username=email_add)\n obj_act = Action(**newdict_)\n obj_act.save()\n obj.save()\n\n # photovisit\n SQL = \"\"\"\n SELECT \n photovisit.*, photo.uuid, user.email_address as email_address,\n photovisit.user_id\n FROM\n photo, photovisit\n LEFT JOIN user ON user.id = photovisit.user_id\n WHERE\n photovisit.photo_id = photo.id\n AND photo.uuid = '%s'\n \"\"\"\n cursor.execute(SQL % uuid)\n result_ = cursor.fetchall()\n for c_ in result_:\n analytic = Analytic()\n analytic.content_object = obj\n analytic.date_created = make_aware(c_.pop('timestamp'))\n analytic.ip_address = c_.pop('ip_address')\n analytic.referer = c_.pop('referer')\n #analytic.country_code = c_.pop('country_code')\n analytic.activity_type = 'VISIT'\n email_add = c_.pop('email_address')\n if email_add != None:\n if not email_add in users:\n try:\n users[email_add] = User.objects.get(\n username=email_add)\n analytic.user = users[email_add]\n except ObjectDoesNotExist:\n print(\"User\" + email_add + \"dont exists\")\n else:\n analytic.user = users[email_add]\n analytic.save()\n obj.save()\n message = 'Imported %i updated %i skipped %i (%i by size) photos' % (\n created, updated, skipped, smaller)\n\n self.stdout.write(self.style.SUCCESS(message))\n","repo_name":"montenegrop/djangotravelportal","sub_path":"extras/management/commands/import_photos.py","file_name":"import_photos.py","file_ext":"py","file_size_in_byte":10588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72071781385","text":"class Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n m = len(matrix[0])\n n = len(matrix)\n dp = [[0] * (m+1) for _ in range(n+1)]\n ans = 0\n\n for i in range(n):\n for j in range(m):\n if matrix[i][j] == '1':\n dp[i+1][j+1] = 1 + min(dp[i][j+1], dp[i][j], dp[i+1][j])\n ans = max(ans, dp[i+1][j+1])\n return ans**2\n","repo_name":"CastleWhite/LeetCodeProblems","sub_path":"221.py","file_name":"221.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"24881563288","text":"from fastapi import FastAPI\nfrom user import routers\n\n\ntags_metadata = [\n {\n \"name\": \"Users\",\n \"description\": \"Operations with users. The **login** logic is also here ✨.\",\n },\n]\n\napp = FastAPI(\n title=\"Kiosk API 👌\",\n description=\"This is the Kiosk's API interactive documentation 😀\",\n version=\"1.0.0\",\n openapi_tags=tags_metadata\n)\n\napp.include_router(routers.router)\n","repo_name":"lewiseman/User_App_-fast-api-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22977640640","text":"from keras.models import load_model\r\nimport cv2\r\nimport numpy as np \r\n\r\n\r\nclass Task1:\r\n def __init__(self, index):\r\n self.index = index\r\n self.stopped = False \r\n self.is_activated = False\r\n print(\"Init task 1\", index)\r\n\r\n np.set_printoptions(suppress=True)\r\n # Disable scientific notation for clarity\r\n\r\n # Load the model\r\n self.model = load_model(\"keras_model.h5\", compile=False)\r\n\r\n # Load the labels\r\n self.class_names = open(\"labels.txt\", \"r\").readlines()\r\n\r\n # CAMERA can be 0 or 1 based on default camera of your computer\r\n self.camera = cv2.VideoCapture(index)\r\n\r\n # Add a variable to track the number of consecutive failed attemps\r\n self.failed_attempts = 0 \r\n self.max_failed_attempts = 3 \r\n\r\n return\r\n\r\n def Task1_Run(self):\r\n if not self.is_activated:\r\n print(\"2 camera task, camera\", self.index, \"is activated\")\r\n self.is_activated = True\r\n\r\n while not self.stopped:\r\n # Grab the webcamera's image.\r\n ret, image = self.camera.read()\r\n \r\n if not ret:\r\n self.failed_attempts += 1\r\n print(\"Failed to capture image from camera\", self.index)\r\n if self.failed_attempts >= self.max_failed_attempts:\r\n print(\"Maximum number of consecutive failed attempts reached. Please try to connect cameras again\")\r\n self.stopped = True\r\n continue\r\n \r\n #Reset failed attempts counter\r\n self.failed_attempts = 0 \r\n \r\n # Resize the raw image into (224-height,224-width) pixels\r\n image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)\r\n\r\n # Show the image in a window\r\n cv2.imshow(\"Webcam Image\", image)\r\n\r\n # Make the image a numpy array and reshape it to the models input shape.\r\n image = np.asarray(image, dtype=np.float32).reshape(1, 224, 224, 3)\r\n\r\n # Normalize the image array\r\n image = (image / 127.5) - 1\r\n\r\n # Predicts the model\r\n prediction = self.model.predict(image)\r\n index = np.argmax(prediction)\r\n class_name = self.class_names[index]\r\n confidence_score = prediction[0][index]\r\n\r\n # Print prediction and confidence score\r\n print(\"Class:\", class_name[2:], end=\"\")\r\n print(\"Confidence Score:\", str(np.round(confidence_score * 100))[:-2], \"%\")\r\n cv2.waitKey(1)\r\n\r\ntask1 = Task1(1) \r\ntask1.Task1_Run()","repo_name":"Kiera02/RTS_Lab_realtime","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26576295649","text":"import os.path\nimport tempfile\n\nfrom fontTools import merge, ttLib\nfrom fontTools.ttLib.tables import otTables\n\n\ndef make_font_name(script):\n if script:\n return \"Noto Sans %s\" % script\n else:\n return \"Noto Sans\"\n\n\ndef make_puncless_font_name(script):\n return make_font_name(script).replace(\" \", \"\").replace(\"-\", \"\")\n\n\ndef make_font_file_name(script, weight, directory=\"individual/unhinted\"):\n filename = \"%s/%s-%s.ttf\" % (directory, make_puncless_font_name(script), weight)\n return filename\n\n\ndef add_ui_alternative(table, target):\n new_target = target + \" UI\"\n sources = table[target]\n new_sources = [source + \" UI\" for source in sources]\n table[new_target] = new_sources\n\n\ndef has_gsub_table(fontfile):\n font = ttLib.TTFont(fontfile)\n return \"GSUB\" in font\n\n\nSCRIPT_TO_OPENTYPE_SCRIPT_TAG = {\n \"CypriotSyllabary\": \"cprt\",\n \"Deseret\": \"dsrt\",\n \"Glagolitic\": \"glag\",\n \"Lisu\": \"lisu\",\n \"Ogham\": \"ogam\",\n \"OldItalic\": \"ital\",\n \"Runic\": \"runr\",\n \"Shavian\": \"shaw\",\n \"Vai\": \"vai \",\n \"Carian\": \"cari\",\n \"EgyptianHieroglyphs\": \"egyp\",\n \"ImperialAramaic\": \"armi\",\n \"LinearB\": \"linb\",\n \"Lycian\": \"lyci\",\n \"Lydian\": \"lydi\",\n \"OldPersian\": \"xpeo\",\n \"OldSouthArabian\": \"sarb\",\n \"OldTurkic\": \"orkh\",\n \"Osmanya\": \"osma\",\n \"Phoenician\": \"phnx\",\n \"SumeroAkkadianCuneiform\": \"xsux\",\n \"Ugaritic\": \"ugar\",\n \"OlChiki\": \"olck\",\n \"TaiLe\": \"tale\",\n # Following keys are added to satisfy the use case in merge_fonts.py\n # Reference:\n # https://www.google.com/get/noto/#sans-xsux\n # https://www.google.com/get/noto/#sans-cprt\n # https://www.google.com/get/noto/#sans-yiii\n # https://www.microsoft.com/typography/otspec/scripttags.htm\n \"Cuneiform\": \"xsux\",\n \"Cypriot\": \"cprt\",\n \"Yi\": \"yi \",\n}\n\n\ndef get_opentype_script_tag(fontfile):\n fontfile = os.path.basename(fontfile)\n if fontfile.startswith(\"NotoSans\"):\n fontfile = fontfile[8:]\n fontfile = fontfile[: fontfile.index(\"-\")]\n return SCRIPT_TO_OPENTYPE_SCRIPT_TAG[fontfile]\n\n\ndef add_gsub_to_font(fontfile):\n \"\"\"Adds an empty GSUB table to a font.\"\"\"\n font = ttLib.TTFont(fontfile)\n gsub_table = ttLib.getTableClass(\"GSUB\")(\"GSUB\")\n gsub_table.table = otTables.GSUB()\n gsub_table.table.Version = 1.0\n gsub_table.table.ScriptList = otTables.ScriptList()\n gsub_table.table.ScriptCount = 1\n gsub_table.table.LookupList = otTables.LookupList()\n gsub_table.table.LookupList.LookupCount = 0\n gsub_table.table.LookupList.Lookup = []\n gsub_table.table.FeatureList = otTables.FeatureList()\n gsub_table.table.FeatureList.FeatureCount = 0\n gsub_table.table.LookupList.FeatureRecord = []\n\n script_record = otTables.ScriptRecord()\n script_record.ScriptTag = get_opentype_script_tag(fontfile)\n script_record.Script = otTables.Script()\n script_record.Script.LangSysCount = 0\n script_record.Script.LangSysRecord = []\n\n default_lang_sys = otTables.DefaultLangSys()\n default_lang_sys.FeatureIndex = []\n default_lang_sys.FeatureCount = 0\n default_lang_sys.LookupOrder = None\n default_lang_sys.ReqFeatureIndex = 65535\n script_record.Script.DefaultLangSys = default_lang_sys\n\n gsub_table.table.ScriptList.ScriptRecord = [script_record]\n\n font[\"GSUB\"] = gsub_table\n\n target_file = tempfile.gettempdir() + \"/\" + os.path.basename(fontfile)\n font.save(target_file)\n return target_file\n\n\ndef main():\n merge_table = {\n \"Historic\": [\n \"Avestan\",\n \"Carian\",\n \"Egyptian Hieroglyphs\",\n \"Imperial Aramaic\",\n \"Pahlavi\", # Should be 'Inscriptional Pahlavi',\n \"Parthian\", # Should be 'Inscriptional Parthian',\n \"Linear B\",\n \"Lycian\",\n \"Lydian\",\n \"Mandaic\",\n \"Old Persian\",\n \"Old South Arabian\",\n \"Old Turkic\",\n \"Osmanya\",\n \"Phags-Pa\",\n \"Phoenician\",\n \"Samaritan\",\n \"Sumero-Akkadian Cuneiform\",\n \"Ugaritic\",\n ],\n \"South Asian\": [\n \"Devanagari\",\n \"Bengali\",\n \"Gurmukhi\",\n \"Gujarati\",\n \"Oriya\",\n \"Tamil\",\n \"Telugu\",\n \"Kannada\",\n \"Malayalam\",\n \"Sinhala\",\n \"Thaana\",\n \"Brahmi\",\n \"Kaithi\",\n \"Kharoshthi\", # Move to Historic?\n \"Lepcha\",\n \"Limbu\",\n \"Meetei Mayek\",\n \"Ol Chiki\",\n \"Saurashtra\",\n \"Syloti Nagri\",\n ],\n \"Southeast Asian\": [\n \"Thai\",\n \"Lao\",\n \"Khmer\",\n \"Batak\",\n \"Buginese\",\n \"Buhid\",\n \"Cham\",\n \"Hanunoo\",\n \"Javanese\",\n \"Kayah Li\",\n \"New Tai Lue\",\n \"Rejang\",\n \"Sundanese\",\n \"Tagalog\",\n \"Tagbanwa\",\n \"Tai Le\",\n \"Tai Tham\",\n \"Tai Viet\",\n ],\n \"\": [ # LGC,\n \"Armenian\",\n \"Bamum\",\n \"Canadian Aboriginal\",\n \"Cherokee\",\n \"Coptic\",\n \"Cypriot Syllabary\",\n \"Deseret\",\n \"Ethiopic\",\n \"Georgian\",\n \"Glagolitic\",\n \"Gothic\",\n \"Hebrew\",\n \"Lisu\",\n \"NKo\",\n \"Ogham\",\n \"Old Italic\",\n \"Runic\",\n \"Shavian\",\n \"Tifinagh\",\n \"Vai\",\n ],\n }\n\n add_ui_alternative(merge_table, \"South Asian\")\n add_ui_alternative(merge_table, \"Southeast Asian\")\n\n for merge_target in sorted(merge_table):\n for weight in [\"Regular\", \"Bold\"]:\n merger = merge.Merger()\n source_fonts = merge_table[merge_target]\n if \"\" not in source_fonts:\n source_fonts = [\"\"] + source_fonts # The LGC font\n regular_sources = [\n make_font_file_name(script, weight) for script in source_fonts\n ]\n regular_sources = [font for font in regular_sources if os.path.isfile(font)]\n\n if len(regular_sources) <= 1:\n continue\n\n print(\"Merging Noto Sans %s %s\" % (merge_target, weight))\n\n for index, fontfile in enumerate(regular_sources):\n if not has_gsub_table(fontfile):\n regular_sources[index] = add_gsub_to_font(fontfile)\n\n font = merger.merge(regular_sources)\n\n first_font = source_fonts[0]\n if first_font != merge_target:\n for name_record in font[\"name\"].names:\n name = name_record.string.decode(\"UTF-16BE\")\n name = name.replace(\n make_font_name(first_font), make_font_name(merge_target)\n )\n name = name.replace(\n make_puncless_font_name(first_font),\n make_puncless_font_name(merge_target),\n )\n name_record.string = name.encode(\"UTF-16BE\")\n\n font.save(\n make_font_file_name(merge_target, weight, directory=\"combined/unhinted\")\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"agateau/pixelwheels","sub_path":"tools/fonts/nototools/merge_noto.py","file_name":"merge_noto.py","file_ext":"py","file_size_in_byte":7322,"program_lang":"python","lang":"en","doc_type":"code","stars":366,"dataset":"github-code","pt":"81"} +{"seq_id":"15318812347","text":"from time import time\nfrom multiprocessing import Process, Queue\n\nimport tqdm\nimport torch.multiprocessing as mp\n\nfrom workers import Throughput\nfrom scripts.make_envs import make_crafter\nfrom nn.actors import ActorCrafter\n\n\ndef work(throughput, frames_counter, stop_connection, n_frames):\n env_to_model_process = Process(target=throughput.env_to_model)\n model_to_env_process = Process(target=throughput.model_to_env)\n env_to_model_process.start()\n model_to_env_process.start()\n\n start_time = time()\n\n frames_collected = 0\n p_bar = tqdm.tqdm(desc=f'frames_collected = {frames_collected}, fps = {frames_collected / 1}')\n try:\n while True:\n while not frames_counter.empty():\n frames_collected += frames_counter.get()\n p_bar.set_description(\n desc=f'frames_collected = {frames_collected},'\n f'fps = {frames_collected / (time() - start_time)}'\n )\n if frames_collected >= n_frames:\n elapsed_time = time() - start_time\n print(\n f'done, {n_frames} frames collected in {elapsed_time}, '\n f'fps = {n_frames / elapsed_time}'\n )\n\n # put two stops - one for model worker and one for env worker\n stop_connection.put('stop')\n stop_connection.put('stop')\n break\n\n except KeyboardInterrupt:\n print('throughput interrupted')\n\n env_to_model_process.join()\n model_to_env_process.join()\n\n\ndef main():\n n_env_total = 64\n n_env_workers = 8\n n_env_per_worker = n_env_total // n_env_workers\n n_model_workers = 1\n n_frames = 100_000\n\n frames_counter, stop_connection = Queue(), Queue()\n throughput = Throughput(\n make_crafter, ActorCrafter,\n n_env_workers, n_model_workers, n_env_per_worker,\n frames_counter, stop_connection\n )\n work(throughput, frames_counter, stop_connection, n_frames)\n\n\nif __name__ == '__main__':\n mp.set_start_method('spawn')\n main()\n","repo_name":"CherryPieSexy/fast_reinforcement_learning","sub_path":"workers/throughput_test.py","file_name":"throughput_test.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"26703651100","text":"# Identity matrix is if the diagonal elements are 1 and other elements are zero.\nA = [[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]]\n\nif len(A) == len(A[0]):\n print(\"Identity matrix: \")\n error = False\n for row in range(0, len(A)):\n for col in range(0, len(A[row])):\n print(\"\\t\", A[row][col], end='')\n if (row != col and A[row][col] != 0) or (row == col and A[row][col] != 1): error = True\n print()\n\n if error: print(\"=> Invalid Identity Matrix.\")\nelse:\n print(\"Error! This isn't an Identity matrix. Number of rows and columns must be same\")","repo_name":"naiemofficial/Matrix-Multi-dimensional-Vector-and-Array","sub_path":"Python/Types of Matrix/Identity matrix.py","file_name":"Identity matrix.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9847435907","text":"continuar = \"S\"\nproduto = 1\nnumProd = 1\ntotal = 0\npagoCliente = 0\n\nwhile continuar == \"S\":\n while produto != 0:\n produto = float(input(f\"Produto {numProd}: R$ \"))\n total += produto\n numProd += 1\n\n print(\"--------\")\n print(\"Total: R$ {:.2f}\".format(total))\n\n while pagoCliente < total:\n pagoCliente = float(input(\"Dinheiro: R$ \"))\n if pagoCliente < total:\n print(\"Valor insuficiente.\")\n\n print(\"Troco: R$ {}\".format(pagoCliente - total))\n print(\"===FIM DA COMPRA===\")\n\n continuar = input(\"Novo cliente [S/N]? \").upper().strip()\n\n produto = 1\n numProd = 1\n total = 0\n","repo_name":"Luizcarlosqueiroz/PythonExercises","sub_path":"03_EstruturaDeRepeticao/31.py","file_name":"31.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8651297729","text":"#!/usr/bin/python3\n\"\"\"Visualizing K-NN Boundaries\"\"\"\nimport matplotlib\nmatplotlib.use('Agg')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.neighbors import KNeighborsClassifier as KNN\n\ndf = pd.read_csv('iris-data.csv')\nprint(df.head())\nlabelled_species = [\n 'Iris-setosa',\n 'Iris-versicolor',\n 'Iris-virginica'\n]\n# cool use of enumerate\nfor idx, label in enumerate(labelled_species):\n df.Species = df.Species.replace(label, idx)\n\nprint(df.head())\nmodel = KNN(n_neighbors=3)\nprint(model.fit(X=df[['Sepal Length', 'Petal Width']], y=df.Species))\nspacing = 0.1\npetal_range = np.arange(df['Petal Width'].min() - 1, df['Petal Width'].max() + 1, spacing)\nsepal_range = np.arange(df['Sepal Length'].min() - 1, df['Sepal Length'].max() + 1, spacing)\nxx, yy = np.meshgrid(sepal_range, petal_range) # Create the mesh\nprint(xx)\nprint(yy)\npred_x = np.c_[xx.ravel(), yy.ravel()] # Concatenate the results\nprint(pred_x)\npred_y = model.predict(pred_x).reshape(xx.shape)\n# create color maps\ncmap_light = ListedColormap(['#F6A56F', '#6FF6A5', '#A56FF6'])\ncmap_bold = ListedColormap(['#E6640E', '#0EE664', '#640EE6'])\nmarkers = {\n 'Iris-setosa': {'marker': 'x', 'facecolor': 'k', 'edgecolor': 'k'},\n 'Iris-versicolor': {'marker': '*', 'facecolor': 'none', 'edgecolor':'k'},\n 'Iris-virginica': {'marker': 'o', 'facecolor': 'none', 'edgecolor':'k'},\n}\nplt.figure(figsize=(10, 7))\nfor name, group in df.groupby('Species'):\n species = labelled_species[name]\n plt.scatter(group['Sepal Length'], group['Petal Width'],\n c=cmap_bold.colors[name],\n label=labelled_species[name],\n marker=markers[species]['marker']\n )\nplt.title('Species Classification Sepal Length vs Petal Width')\nplt.xlabel('Sepal Length (mm)')\nplt.ylabel('Petal Width (mm)')\nplt.legend()\nplt.savefig('visualized_boundari.png')\n\n# prediction mesh data\nplt.figure(figsize=(10, 7))\nplt.pcolormesh(xx, yy, pred_y, cmap=cmap_light)\nplt.scatter(df['Sepal Length'], df['Petal Width'], c=df.Species,\n cmap=cmap_bold, edgecolor='k', s=20)\nplt.title('Species Decision Boundaries Sepal Length vs Petal Width')\nplt.xlabel('Sepal Length (mm)')\nplt.ylabel('Petal Width (mm)')\nplt.savefig('decision_boundaries.png')\n","repo_name":"DennisWanjeri/machine_learning","sub_path":"supervised_learning/0x04-classification/6-visualizing_knn_boundaries.py","file_name":"6-visualizing_knn_boundaries.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74462232583","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ndescription:\nauthor: HuangLei\ndate: 2016-06-06 10:48 PM\n\"\"\"\n\nfrom flask import request\nfrom app import app\nimport hashlib\nfrom xml.etree import ElementTree\nimport sys # sys.setdefaultencoding is cancelled by site.py\n\n\n\n# 博客首页\n@app.route('/weixin', methods=['GET'])\ndef verify_weixin():\n # 获取输入参数\n signature = request.args.get('signature')\n timestamp = request.args.get('timestamp')\n nonce = request.args.get('nonce')\n echostr = request.args.get('echostr')\n # 自己的token\n token = \"iamhuanglei123\" # 这里改写你在微信公众平台里输入的token\n # 字典序排序\n list = [token, timestamp, nonce]\n list.sort()\n sha1 = hashlib.sha1()\n map(sha1.update, list)\n hashcode = sha1.hexdigest()\n # sha1加密算法\n\n # 如果是来自微信的请求,则回复echostr\n if hashcode == signature:\n return echostr\n return True\n\n\n@app.route('/weixin', methods=['POST'])\ndef weixin_post():\n print(request)\n str_xml = request.data # 获得post来的数据\n xml = ElementTree.fromstring(str_xml) # 进行XML解析\n content = xml.find(\"Content\").text # 获得用户所输入的内容\n msgType = xml.find(\"MsgType\").text\n fromUser = xml.find(\"FromUserName\").text\n toUser = xml.find(\"ToUserName\").text\n print(\"content:\", content, \" msg:\", msgType, \" from_user:\", fromUser, \" to_user:\", toUser)\n return content\n","repo_name":"SpeedMe/myblog","sub_path":"app/views/weixin.py","file_name":"weixin.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6392192692","text":"# Word Count PySpark Example\n\nimport os\nfrom pyspark import SparkContext\n\nsc = SparkContext()\n\nlines = sc.textFile(os.environ[\"SPARK_HOME\"]+\"/README.md\")\n\nreadme_line_count=lines.count()\n\nprint(\"Total number of lines in README: \",readme_line_count)\n\npython_lines = lines.filter(lambda line: \"Python\" in line)\n\npython_lines_in_readme = python_lines.count()\n\nprint(\"Total number of python lines: \",python_lines_in_readme)\n\nsc.stop()\n","repo_name":"sharattadimalla/technology-notes","sub_path":"spark/word_count/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13174844770","text":"import unittest\n\nfrom azure.functions.decorators.constants import EVENT_GRID_TRIGGER, EVENT_GRID\nfrom azure.functions.decorators.core import BindingDirection, \\\n DataType\nfrom azure.functions.decorators.eventgrid import EventGridTrigger,\\\n EventGridOutput\n\n\nclass TestEventGrid(unittest.TestCase):\n def test_event_grid_trigger_valid_creation(self):\n trigger = EventGridTrigger(name=\"req\",\n data_type=DataType.UNDEFINED,\n dummy_field=\"dummy\")\n\n self.assertEqual(trigger.get_binding_name(), \"eventGridTrigger\")\n self.assertEqual(trigger.get_dict_repr(),\n {'name': 'req',\n \"dataType\": DataType.UNDEFINED,\n \"direction\": BindingDirection.IN,\n 'dummyField': 'dummy',\n \"type\": EVENT_GRID_TRIGGER})\n\n def test_event_grid_output_valid_creation(self):\n output = EventGridOutput(name=\"res\",\n topic_endpoint_uri=\"dummy_topic_endpoint_uri\",\n topic_key_setting=\"dummy_topic_key_setting\",\n connection=\"dummy_connection\",\n data_type=DataType.UNDEFINED,\n dummy_field=\"dummy\")\n\n self.assertEqual(output.get_binding_name(), \"eventGrid\")\n self.assertEqual(output.get_dict_repr(),\n {'connection': 'dummy_connection',\n 'dataType': DataType.UNDEFINED,\n 'direction': BindingDirection.OUT,\n 'dummyField': 'dummy',\n 'topicEndpointUri': 'dummy_topic_endpoint_uri',\n 'topicKeySetting': 'dummy_topic_key_setting',\n 'name': 'res',\n 'type': EVENT_GRID})\n","repo_name":"Azure/azure-functions-python-library","sub_path":"tests/decorators/test_eventgrid.py","file_name":"test_eventgrid.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"81"} +{"seq_id":"26563526131","text":"from TensorflowToolbox.utility import file_io\nfrom TensorflowToolbox.utility import image_utility_func\nimport numpy as np\nimport cv2\nimport sys\n\n#dsize = (256, 256)\n\nif __name__ == \"__main__\":\n if not len(sys.argv) == 3:\n print(\"Usage: msk_resize.py data_dir size_len\")\n exit(1)\n data_dir = sys.argv[1]\n size_len = int(sys.argv[2])\n dsize = (size_len, size_len)\n\n #if len(sys.argv) > 1:\n # data_dir = sys.argv[1]\n #else:\n # data_dir = \"../data\"\n\n mask_dir_list = file_io.get_dir_list(data_dir)\n for mask_dir in mask_dir_list:\n mask_list = file_io.get_listfile(mask_dir, \".msk\")\n for mask in mask_list:\n mask_img_name = mask.replace(\"msk\", \"png\")\n mask_img = cv2.imread(mask_img_name, 0)\n bbox = image_utility_func.get_bbox(mask_img, 127)\n mask_img = mask_img[bbox[1]: bbox[1] + bbox[3],\n bbox[0]: bbox[0] + bbox[2]]\n mask_img = cv2.resize(mask_img, dsize)\n mask_img = mask_img / 255\n mask_img = mask_img.astype(np.float32)\n mask_img[mask_img > 0] = 1.0\n np_name = mask_img_name.replace(\".png\", \"_msk_%d.npy\"%size_len)\n\n mask_img.tofile(np_name)\n","repo_name":"polltooh/video_analysis","sub_path":"data_process_script/msk_resize.py","file_name":"msk_resize.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"70222638026","text":"import sys\nfrom PyQt6.QtWidgets import (\n QApplication, \n QWidget, \n QLineEdit,\n QPushButton,\n QVBoxLayout)\n\nclass ventantaPrincipal(QWidget):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Ponemos un valor en neustro titulo\n self.setWindowTitle('Qt Singlas & Slots - Señales y Ranuras')\n\n # Creamos un widget button y lo conectamos con un evento o señal de clicked\n button = QPushButton('¡Click Me!')\n button.clicked.connect(self.boton_cliqueo)\n\n layout = QVBoxLayout()\n self.setLayout(layout)\n\n layout.addWidget(button)\n\n # Mostramos la ventana\n self.show()\n\n\n def boton_cliqueo(self):\n print('Clicked')\n \n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n # Instanciar clase padre\n ventana = ventantaPrincipal()\n\n # Inicamos nuestro evento\n sys.exit(app.exec())","repo_name":"CiberNefty/Python_dv","sub_path":"Proyectos/2023/PyQt6-Inicios/Señales-y-Ranuras.py","file_name":"Señales-y-Ranuras.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"11304837557","text":"from django.urls import path\n\nfrom . import views\n\nfrom . views import home, start, quiz1, quiz2, quiz3, final, first\n\nurlpatterns = [\n path('',home,name='home'),\n path('start/',views.start,name='start' ),\n path('quiz1/',views.quiz1,name='quiz1' ),\n path('quiz2/',views.quiz2,name='quiz2' ),\n path('quiz3/',views.quiz3,name='quiz3' ),\n path('quiz2/final/',views.final,name='final' ),\n path('quiz2/first/',views.first, name=\"first\"),\n]","repo_name":"Khagesh000/Online-quiz","sub_path":"online/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32813921445","text":"# Approximation\n# f(w, b) = wx + b\nimport numpy as np\nimport pandas as pd\nfrom .regression import LogisticRegression\n\n\ndef run (x: np.ndarray, y: np.ndarray) -> list:\n from sklearn.model_selection import train_test_split\n \n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=1234)\n \n regressor = LogisticRegression(learning_rate=0.01, iterations=1000)\n \n regressor.fit(x_train, y_train)\n\n predictions = regressor.predict(x_test)\n\n regressor.save('model2')\n \n def accuracy (y_test, y_pred):\n accuracy = np.sum(y_test == y_pred) / len(y_test)\n return accuracy\n \n print(\"LR classification accuracy:\", accuracy(y_test, predictions))\n return predictions\n\ndef start(path: str):\n training_data = pd.read_csv(path)\n x = training_data['X'].apply(lambda x: eval(x), 0).tolist()\n X = pd.DataFrame(\n x,\n columns=['a', 'b', 'c', 'd']\n ).to_numpy()\n y = training_data['Y'].astype('int').to_numpy()\n run(X, y)","repo_name":"tkeyext/main","sub_path":"model/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29554609938","text":"import itertools\nimport os\nfrom collections import OrderedDict\nfrom collections.abc import Iterable\n\nimport numpy as np\nimport oneflow as flow\nimport oneflow.typing as oft\n\nimport test_global_storage\n\n\ndef GenCartesianProduct(sets):\n assert isinstance(sets, Iterable)\n for set in sets:\n assert isinstance(set, Iterable)\n if os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"):\n if \"gpu\" in set:\n set.remove(\"gpu\")\n return itertools.product(*sets)\n\n\ndef GenArgList(arg_dict):\n assert isinstance(arg_dict, OrderedDict)\n assert all([isinstance(x, list) for x in arg_dict.values()])\n sets = [arg_set for _, arg_set in arg_dict.items()]\n return GenCartesianProduct(sets)\n\n\ndef GenArgDict(arg_dict):\n return [dict(zip(arg_dict.keys(), x)) for x in GenArgList(arg_dict)]\n\n\nclass Args:\n def __init__(self, flow_args, tf_args=None):\n super().__init__()\n if tf_args is None:\n tf_args = flow_args\n self.flow_args = flow_args\n self.tf_args = tf_args\n\n def __str__(self):\n return \"flow_args={} tf_args={}\".format(self.flow_args, self.tf_args)\n\n def __repr__(self):\n return self.__str__()\n\n\ndef RunOneflowOp(device_type, flow_op, x, flow_args):\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def FlowJob(x: oft.Numpy.Placeholder(x.shape)):\n with flow.scope.placement(device_type, \"0:0\"):\n x += flow.get_variable(\n name=\"v1\",\n shape=(1,),\n dtype=flow.float,\n initializer=flow.zeros_initializer(),\n )\n loss = flow_op(x, *flow_args)\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [0]), momentum=0\n ).minimize(loss)\n\n flow.watch_diff(x, test_global_storage.Setter(\"x_diff\"))\n\n return loss\n\n # OneFlow\n y = FlowJob(x).get().numpy()\n x_diff = test_global_storage.Get(\"x_diff\")\n return y, x_diff\n\n\ndef RunTensorFlowOp(tf_op, x, tf_args):\n import tensorflow as tf\n\n gpus = tf.config.experimental.list_physical_devices(\"GPU\")\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n with tf.GradientTape(persistent=True) as tape:\n x = tf.Variable(x)\n y = tf_op(x, *tf_args)\n x_diff = tape.gradient(y, x)\n return y.numpy(), x_diff.numpy()\n\n\ndef CompareOpWithTensorFlow(\n device_type,\n flow_op,\n tf_op,\n input_shape,\n op_args=None,\n input_minval=-10,\n input_maxval=10,\n y_rtol=1e-5,\n y_atol=1e-5,\n x_diff_rtol=1e-5,\n x_diff_atol=1e-5,\n):\n assert device_type in [\"gpu\", \"cpu\"]\n if op_args is None:\n flow_args, tf_args = [], []\n else:\n flow_args, tf_args = op_args.flow_args, op_args.tf_args\n\n x = np.random.uniform(low=input_minval, high=input_maxval, size=input_shape).astype(\n np.float32\n )\n of_y, of_x_diff, = RunOneflowOp(device_type, flow_op, x, flow_args)\n tf_y, tf_x_diff = RunTensorFlowOp(tf_op, x, tf_args)\n\n assert np.allclose(of_y, tf_y, rtol=y_rtol, atol=y_atol)\n assert np.allclose(of_x_diff, tf_x_diff, rtol=x_diff_rtol, atol=x_diff_atol)\n\n\ntype_name_to_flow_type = {\n \"float16\": flow.float16,\n \"float32\": flow.float32,\n \"double\": flow.double,\n \"int8\": flow.int8,\n \"int32\": flow.int32,\n \"int64\": flow.int64,\n \"char\": flow.char,\n \"uint8\": flow.uint8,\n}\n\ntype_name_to_np_type = {\n \"float16\": np.float16,\n \"float32\": np.float32,\n \"double\": np.float64,\n \"int8\": np.int8,\n \"int32\": np.int32,\n \"int64\": np.int64,\n \"char\": np.byte,\n \"uint8\": np.uint8,\n}\n\n\ndef FlattenArray(input_array):\n output_array = list()\n for x in np.nditer(input_array):\n output_array.append(x.tolist())\n return output_array\n\n\ndef Array2Numpy(input_array, target_shape):\n return np.array(input_array).reshape(target_shape, order=\"C\")\n\n\ndef Index2Coordinate(idx, tensor_shape):\n coordinate = []\n tmp = idx\n for i in range(len(tensor_shape) - 1, -1, -1):\n axis_size = tensor_shape[i]\n coor = tmp % axis_size\n coordinate.insert(0, int(coor))\n tmp = (tmp - coor) / axis_size\n return coordinate\n\n\ndef Coordinate2Index(coordinate, tensor_shape):\n if len(coordinate) != len(tensor_shape):\n raise \"wrong coordinate or shape\"\n idx = 0\n for i, coor in enumerate(coordinate):\n size_at_axis = coor\n for j in range(i + 1, len(tensor_shape)):\n size_at_axis *= tensor_shape[j]\n\n idx += size_at_axis\n return idx\n","repo_name":"wanghongsheng01/framework_enflame","sub_path":"oneflow/python/test/ops/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6519946172","text":"import os\nfrom unittest import TestCase\n\nfrom days.day19 import part1, part2\nfrom test.utils import read_lines\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n\nclass Day19Tests(TestCase):\n def test_part1_test1(self):\n data = read_lines('inputs/day19_test1.txt', '\\n')\n scanners = Day19Tests.preprocess(data)\n self.assertEqual(79, part1(scanners))\n\n def test_part1(self):\n data = read_lines('inputs/day19.txt', '\\n')\n scanners = Day19Tests.preprocess(data)\n self.assertEqual(403, part1(scanners))\n\n def test_part1_test2(self):\n data = read_lines('inputs/day19_test1.txt', '\\n')\n scanners = Day19Tests.preprocess(data)\n self.assertEqual(3621, part2(scanners))\n\n def test_part2(self):\n data = read_lines('inputs/day19.txt', '\\n')\n scanners = Day19Tests.preprocess(data)\n self.assertEqual(10569, part2(scanners))\n\n @staticmethod\n def preprocess(data):\n scanners = []\n scanner = []\n for line in data:\n if not line:\n scanners.append(scanner)\n scanner = []\n else:\n if not line.startswith('---'):\n scanner.append(tuple([int(part) for part in line.split(',')]))\n scanners.append(scanner)\n return scanners\n","repo_name":"Ernyoke/advent-of-code-2021","sub_path":"test/test_day19.py","file_name":"test_day19.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"44011899095","text":"import datetime, os, copy, json, zlib, logging\n\nfrom base64 import b64decode\nfrom slack_sdk import WebClient\nfrom slack_constants import SLACK_BLOCK_TYPE\n\n__all__ = [\"AICO_SLACK\", \"AWS_LOG\"]\n\nclass AWS_LOG:\n\n\tdef __init__(self, pEvent: dict):\n\t\tself.log_data = pEvent\n\t\tself.product_cd = \"상품 코드\" + \" / \" + \"상태 코드\"\n\t\tself.slack_channel_nm = \"슬렉 체널명\"\n\t\tself.default_log_link = \"https://{region}.console.aws.amazon.com/cloudwatch/home?region={region}#logsV2:log-groups\".format(\n\t\t\tregion=os.environ.get('DEFAULT_REGION', 'ap-northeast-2'))\n\t\tself.service_info = None\n\n\tdef __aws_decode(self, pLogData: str) -> dict:\n\t\t''' aws log decode '''\n\t\tcompressed_payload = b64decode(pLogData)\n\t\tjson_payload = zlib.decompress(compressed_payload, 16+zlib.MAX_WBITS)\n\t\treturn json.loads(json_payload)\n\n\tdef __revert_url_cd(self, pUrlCd: str) -> str:\n\t\treturn pUrlCd.replace('$','$2524').replace('/','$252F').replace('[','$255B').replace(']','$255D')\n\n\tdef __make_cloudwatch_url(self, pLogGroup: str,pLogEvent: str) -> str:\n\t\t'''cloud watch pretty log'''\n\t\tencoded_log_group=self.__revert_url_cd(pLogGroup)\n\t\tencoded_log_event=self.__revert_url_cd(pLogEvent)\n\n\t\treturn self.default_log_link+\"/log-group/{log_name}/log-events/{events}\".format( \n\t\t\tlog_name=encoded_log_group,\n\t\t\tevents=encoded_log_event)\n\n\tdef __utc_to_kst(self, pDt: datetime):\n\t\t# +9 (kst)\n\t\thours_added = datetime.timedelta(hours = 9)\n\n\t\treturn (pDt + hours_added).strftime('%Y-%m-%d %H:%M:%S KST')\n\n\tdef __revert_subscription_filter_log(self, pLogData: str):\n\t\t\"\"\"error event에서 원하는 파라미터만 추출\"\"\"\n\t\tlog_data = self.__aws_decode(pLogData)\n\n\t\tlog_group = log_data['logGroup']\n\t\tlog_stream = log_data['logStream']\n\t\tlog_dt = self.__utc_to_kst(datetime.datetime.fromtimestamp(log_data['logEvents'][0]['timestamp']/1000)) \n\t\terror_message= '' #logEvents['message'].split('\\n')[0]\n\t\tfor logEvent in log_data['logEvents']:\n\t\t\terror_message += logEvent['message'] #+'\\n'\n\n\t\t\ttry:\n\t\t\t\tlog_message = logEvent['message'].replace('\\\\', '')\n\t\t\t\tlogging.debug(\"log_message : \"+log_message)\n\t\t\t\tif log_message.find('slack_channel_nm') > 0 \\\n\t\t\t\t\tand log_message.find('{\"product_cd\"') > 0:\n\t\t\t\t\tfrom_idx = log_message.find('{\"product_cd\"')\n\t\t\t\t\tend_idx = -1\n\t\t\t\t\tif log_message.find('\",\"errorType\"') > 0:\n\t\t\t\t\t\tend_idx = log_message.find('\",\"errorType\"')\n\t\t\t\t\telif log_message.find('Traceback') > 0:\n\t\t\t\t\t\tend_idx = log_message.find('Traceback')\n\t\t\t\t\taico_base_error_msg = json.loads(log_message[from_idx:end_idx])\n\t\t\t\t\t\n\t\t\t\t\tself.product_cd = aico_base_error_msg['product_cd']\n\t\t\t\t\tself.slack_channel_nm = aico_base_error_msg['slack_channel_nm']\n\t\t\t\t\tif 'service_info' in aico_base_error_msg.keys():\n\t\t\t\t\t\tself.service_info = aico_base_error_msg['service_info']\n\t\t\texcept ValueError as e:\n\t\t\t\tcontinue\n\n\t\tlogs_link = self.__make_cloudwatch_url(log_group,log_stream)\n\t\t\n\t\treturn log_group,error_message,log_dt,logs_link\n\n\tdef __revert_alarm_log(self, pLogDict: dict):\n\t\tservice_nm = pLogDict['Subject']\n\t\tlog_dt = self.__utc_to_kst(datetime.datetime.strptime(pLogDict['Timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ')) \n\t\terror_msg = pLogDict['Message']\n\n\t\treturn service_nm, log_dt, error_msg\n\n\tdef get_slack_msgs(self):\n\t\tslack_msgs = []\n\t\t\n\t\tif 'awslogs' in self.log_data and 'data' in self.log_data['awslogs']:\n\t\t\tslack_msg = {}\n\t\t\tlog_group,error_message,log_dt,logs_link = self.__revert_subscription_filter_log(self.log_data[\"awslogs\"][\"data\"])\n\t\t\t\n\t\t\tslack_msg['ERROR_MSG'] = error_message #json.dumps(event, ensure_ascii=False)\n\t\t\tslack_msg['MSG_LINK'] = logs_link\n\t\t\tslack_msg['ERROR_TIME'] = log_dt\n\t\t\tslack_msg['PRODUCT_CODE'] = self.product_cd\n\n\t\t\tif os.environ.get('SERVICE_NM', None):\n\t\t\t\tslack_msg['SERVICE_NM'] = os.environ['SERVICE_NM']\n\t\t\telse:\n\t\t\t\tslack_msg['SERVICE_NM'] = log_group\n\t\t\t\t\n\t\t\tif self.service_info:\n\t\t\t\tslack_msg['SERVICE_INFO'] = self.service_info\n\t\t\t\t#초기화\n\t\t\t\tself.service_info = None\n\n\t\t\tslack_msgs.append(slack_msg)\n\n\t\telif 'Records' in self.log_data and isinstance(self.log_data['Records'], type([])):\n\t\t\tfor record in self.log_data['Records']:\n\t\t\t\tif 'Sns' in record:\n\t\t\t\t\tslack_msg = {}\n\t\t\t\t\tservice_nm, log_dt, error_msg = self.__revert_alarm_log(record['Sns'])\n\t\t\t\t\t\n\t\t\t\t\tslack_msg['ERROR_MSG'] = error_msg #json.dumps(event, ensure_ascii=False)\n\t\t\t\t\tslack_msg['MSG_LINK'] = self.default_log_link\n\t\t\t\t\tslack_msg['ERROR_TIME'] = log_dt\n\t\t\t\t\tslack_msg['PRODUCT_CODE'] = self.product_cd\n\t\t\t\t\tslack_msg['SERVICE_NM'] = service_nm\n\n\t\t\t\t\tslack_msgs.append(slack_msg)\n\t\telse:\n\t\t\tlogging.error('[AWS_LOG][get_slack_msgs] No Log Data')\n\t\t\n\t\treturn slack_msgs\n\nclass AICO_SLACK:\n\tdef __init__(self, pSlackChannelNm: str):\n\t\tself.slack_channel = pSlackChannelNm\n\t\tself.client = WebClient(token=os.environ.get('SLACK_BOT_TOKEN', '토큰 값')) \n\t\tself.format_msg = {\n\t\t\t\"ERROR_MSG\": None,\n\t\t\t\"ERROR_TIME\": None,\n\t\t\t\"MSG_LINK\": None,\n\t\t\t\"PRODUCT_CODE\": None,\n\t\t\t\"SERVICE_NM\": None,\n\t\t}\n\t\n\tdef __init_blocks(self, pMsg: dict) -> list:\n\t\t#Header > Occurred at\n\t\tp_header = copy.deepcopy(SLACK_BLOCK_TYPE.HEADER.value[2])\n\t\tp_header['text']['text'] = p_header['text']['text'].replace(\"{SERVICE_NM}\", pMsg[\"SERVICE_NM\"])\n\t\treturn [\n\t\t\tcopy.deepcopy(SLACK_BLOCK_TYPE.DIVIDER.value[2]),\n\t\t\tp_header\n\t\t]\n\n\tdef __post_message(self, pBlocks: list, pThreadTs: str = None) -> dict:\n\t\tresult = None\n\t\tif pThreadTs is None:\n\t\t\tresult = self.client.chat_postMessage(\n\t\t\t\tchannel=self.slack_channel.value[2], blocks=pBlocks\n\t\t\t)\n\t\telse:\n\t\t\tresult = self.client.chat_postMessage(\n\t\t\t\tchannel=self.slack_channel.value[2], thread_ts=pThreadTs, blocks=pBlocks\n\t\t\t)\n\n\t\treturn result\n\n\tdef __check_pre_message_by_today(self, pMsg: dict) -> str:\n\n\t\tthread_ts = None\n\t\thistory = self.client.conversations_history(channel=self.slack_channel.value[2])[\"messages\"]\n\t\ttoday_dt = datetime.date.today().strftime(\"%Y%m%d\")\n\t\tis_breake = False\n\n\t\tfor msg in history:\n\t\t\tif is_breake:\n\t\t\t\tbreak\n\t\t\t\n\t\t\ttry:\n\t\t\t\tmsg_dt = datetime.datetime.fromtimestamp(float(msg[\"latest_reply\"])).strftime(\"%Y%m%d\")\n\t\t\t\tif msg_dt == today_dt:\n\t\t\t\t\tfor block in msg[\"blocks\"]:\n\t\t\t\t\t\ttry: \n\t\t\t\t\t\t\tif \"header\" == block[\"type\"] and pMsg[\"SERVICE_NM\"] in block[\"text\"][\"text\"]:\n\t\t\t\t\t\t\t\tthread_ts = msg[\"ts\"]\n\t\t\t\t\t\t\t\tis_breake = True\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept KeyError as e:\n\t\t\t\t\t\t\tlogging.error(e)\n\t\t\t\t\t\t\tcontinue\n\t\t\texcept KeyError as e:\n\t\t\t\tlogging.error(e)\n\t\t\t\tcontinue\n\t\t\n\t\tif thread_ts is None:\n\t\t\tblocks = self.__init_blocks(pMsg)\n\t\t\tthread_ts = self.__post_message(pBlocks=blocks)[\"ts\"]\n\n\t\treturn thread_ts\n\n\tdef get_format_msg(self) -> dict:\n\t\treturn self.format_msg\n\n\tdef do_message(self, pMsg: dict, pSendThred: bool=True) -> bool:\n\t\tblocks = self.__init_blocks(pMsg)\n\t\t# 쓰레드 메세지 사용유무\n\t\tthread_ts = None\n\t\tif self.slack_channel.value[3] and pSendThred:\n\t\t\tthread_ts = self.__check_pre_message_by_today(pMsg)\n\t\t\tblocks = [\tblocks[0]\t]\n\t\telif self.slack_channel.value[3]:\n\t\t\tself.__check_pre_message_by_today(pMsg)\n\t\t\treturn None\n\n\t\t#Section > Product Code, Occurred time\n\t\tp_st = copy.deepcopy(SLACK_BLOCK_TYPE.SECTION.value[2])\n\t\tif 'PRODUCT_CODE' in pMsg.keys():\n\t\t\tp_st['fields'][0]['text'] = p_st['fields'][0]['text'].replace(\"{PRODUCT_CODE}\", pMsg[\"PRODUCT_CODE\"])\n\t\telse:\n\t\t\tp_st['fields'][0]['text'] = p_st['fields'][0]['text'].replace(\"{PRODUCT_CODE}\", PRODUCT_CODE.none.name)\n\n\t\tif 'MSG_LINK' in pMsg.keys():\n\t\t\tp_st['fields'][1]['text'] = p_st['fields'][1]['text'].replace(\"{ERROR_TIME}\", pMsg[\"ERROR_TIME\"])\n\t\telse:\n\t\t\tdel p_st['fields'][1]\n\t\tblocks.append(p_st)\n\t\t#Button > Link to details\n\t\tif 'MSG_LINK' in pMsg.keys():\n\t\t\tp_bt = copy.deepcopy(SLACK_BLOCK_TYPE.BUTTON.value[2])\n\t\t\tp_bt['accessory']['url'] = p_bt['accessory']['url'].replace(\"{MSG_LINK}\", pMsg[\"MSG_LINK\"])\n\t\t\tblocks.append(p_bt)\n\t\t#Message > service info\n\t\tif 'SERVICE_INFO' in pMsg.keys():\n\t\t\tp_msg = copy.deepcopy(SLACK_BLOCK_TYPE.MESSAGE.value[2])\n\t\t\tp_msg['text']['text'] = p_msg['text']['text'].replace(\"{MESSAGE}\", \"*SERVICE_INFO:*\\n\"+json.dumps(pMsg[\"SERVICE_INFO\"], ensure_ascii=False))\n\t\t\tblocks.append(p_msg)\n\t\t#Message > Error Message\n\t\tif 'ERROR_MSG' in pMsg.keys():\n\t\t\tp_msg = copy.deepcopy(SLACK_BLOCK_TYPE.MESSAGE.value[2])\n\t\t\tp_msg['text']['text'] = p_msg['text']['text'].replace(\"{MESSAGE}\", \"*Reason:*\")\n\t\t\tblocks.append(p_msg)\n\t\t\tp_error_msg = copy.deepcopy(SLACK_BLOCK_TYPE.ERROR_MESSAGE.value[2])\n\t\t\tp_error_msg['elements'][0]['text'] = p_error_msg['elements'][0]['text'].replace(\"{ERROR_MSG}\", pMsg[\"ERROR_MSG\"])\n\t\t\tblocks.append(p_error_msg)\n\n\t\treturn self.__post_message(pBlocks=blocks, pThreadTs=thread_ts)[\"ok\"]\n\n\n","repo_name":"good593/aws-cloudformation","sub_path":"template/sns/slack/slack_sns/main_slack.py","file_name":"main_slack.py","file_ext":"py","file_size_in_byte":8509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"2131213860","text":"import sys\nimport pandas as pd\n\n# read data\nwith open('data.txt') as f:\n data = f.readlines()\n\n# process data\npredicate = {'C' : 'is part of', 'P' : 'is involved in', 'F': 'has'}\ntriples = []\n\nfor line in data:\n line = line[:-1]\n element = line.split('\\t')\n gene = element[2]\n terms = element[5].split('|')\n for term in terms:\n triples.append([gene, predicate[element[9]], term])\n\n# save data\npd_data = pd.DataFrame(triples, columns=['Subject', 'Predicate', 'Object'])\npd_data = pd_data.drop_duplicates()\npd_data.to_csv('GO.txt', sep='\\t', index=False)\n","repo_name":"IBPA/KIDS","sub_path":"kg_constructor/data/dataset/GO/create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"29833570834","text":"import pandas as pd\nfrom modules.preprocessing import *\n\ndef extract_rains(path, start_date, end_date):\n filename = path\n df = pd.read_csv(filename, index_col = 'timestamp')\n df = df.dropna()\n df.index = pd.DatetimeIndex(df.index)\n if start_date is None:\n start_date = df.index.min()\n if end_date is None:\n end_date = df.index.max()\n df = filter_dates(df, start_date, end_date)\n\n scaler = MinMaxScaler()\n df_scaled = pd.DataFrame(scaler.fit_transform(df), columns=df.columns, index=df.index)\n\n if df.precipitation.iloc[0]>0:\n precipitation = pd.concat([pd.Series({min(df.index)-pd.Timedelta('1s'): 0}),df.precipitation])\n else:\n precipitation = df.precipitation\n\n precipitation.index = pd.to_datetime(precipitation.index)\n df_dates = pd.DataFrame(index = precipitation.index)\n df_dates[\"rain_start\"] = precipitation[(precipitation.shift(-1) > 0) & (precipitation == 0)] # compare current to next\n df_dates[\"rain_stop\"] = precipitation[(precipitation.shift(1) > 0) & (precipitation == 0)] # compare current to prev\n dates_rain_start = pd.Series(df_dates.rain_start.index[df_dates.rain_start.notna()])\n dates_rain_stop = pd.Series(df_dates.rain_stop.index[df_dates.rain_stop.notna()])\n\n # filter light rains\n x = 0.1\n ids = []\n if dates_rain_stop.size < dates_rain_start.size:\n dates_rain_start = dates_rain_start[:-1] # drop last starting date for lists to match in size\n for idx in range(dates_rain_start.size):\n d1 = dates_rain_start[idx]\n d2 = dates_rain_stop[idx]\n if np.max(precipitation.loc[d1:d2]) >= x:\n ids.append(idx)\n dates_rain_start_filtered = dates_rain_start[ids]\n dates_rain_stop_filtered = dates_rain_stop[ids]\n df_res = pd.concat([dates_rain_start_filtered, dates_rain_stop_filtered], axis=1)\n df_res.columns = [\"start\", \"stop\"]\n return df_res\n","repo_name":"MORE-EU/more_api","sub_path":"rains.py","file_name":"rains.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"32165817057","text":"\"\"\"Ansible Galaxy dependencies for lists of collections.\"\"\"\nimport os\n\nfrom molecule import logger\nfrom molecule import util\nfrom molecule.dependency.ansible_galaxy.base import AnsibleGalaxyBase\n\n\nLOG = logger.get_logger(__name__)\n\n\nclass Collections(AnsibleGalaxyBase):\n \"\"\"Collection-specific Ansible Galaxy dependency handling.\"\"\"\n\n FILTER_OPTS = (\"role-file\", \"roles-path\") # type: ignore\n COMMANDS = (\"collection\", \"install\")\n\n @property\n def default_options(self):\n general = super(Collections, self).default_options\n specific = util.merge_dicts(\n general,\n {\n \"requirements-file\": os.path.join(\n self._config.scenario.directory, \"collections.yml\"\n ),\n \"collections-path\": os.path.join(\n self._config.scenario.ephemeral_directory, \"collections\"\n ),\n },\n )\n\n return specific\n\n @property\n def default_env(self):\n general = super(Collections, self).default_env\n return util.merge_dicts(\n general, {\"ANSIBLE_COLLECTIONS_PATHS\": self.install_path}\n )\n\n @property\n def install_path(self):\n return self.options[\"collections-path\"]\n\n @property\n def requirements_file(self):\n return self.options[\"requirements-file\"]\n","repo_name":"scorepyo/molecule-yml","sub_path":"molecule/dependency/ansible_galaxy/collections.py","file_name":"collections.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"35304431044","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nHOST = \"localhost\"\nPORT = 4223\nUID = \"XYZ\" # Change XYZ to the UID of your Rotary Encoder Bricklet 2.0\n\nfrom tinkerforge.ip_connection import IPConnection\nfrom tinkerforge.bricklet_rotary_encoder_v2 import BrickletRotaryEncoderV2\n\n# Callback function for count callback\ndef cb_count(count):\n print(\"Count: \" + str(count))\n\nif __name__ == \"__main__\":\n ipcon = IPConnection() # Create IP connection\n re = BrickletRotaryEncoderV2(UID, ipcon) # Create device object\n\n ipcon.connect(HOST, PORT) # Connect to brickd\n # Don't use device before ipcon is connected\n\n # Register count callback to function cb_count\n re.register_callback(re.CALLBACK_COUNT, cb_count)\n\n # Set period for count callback to 1s (1000ms) without a threshold\n re.set_count_callback_configuration(1000, False, \"x\", 0, 0)\n\n input(\"Press key to exit\\n\") # Use raw_input() in Python 2\n ipcon.disconnect()\n","repo_name":"JustinThlk/serverueberwachung","sub_path":"serverueberwachung/source/examples/bricklet/rotary_encoder_v2/example_callback.py","file_name":"example_callback.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36468660361","text":"from abc import abstractproperty\nfrom tkinter import Tk, Button, Canvas, Label, Menu\nimport random\n\nmw = Tk()\nmw.geometry('700x500+200+50')\nmw.title(\"Invasionneurs de l'espace\")\n\n#Fonctions\n\ndef fNewGame():\n Canevas.delete('all')\n \n\ndef fQuitter():\n Canevas.delete('all')\n mw.destroy()\n \ndef fAPropos():\n print(\"A Propos\")\n\ndef fAffScore():\n print(\"Coucou le score\")\n\ndef fAide():\n print(\"Aide\")\n\n#Fin Fonctions\n\n#Menu\nmenubar = Menu(mw)\nmenufichier = Menu(menubar,tearoff=0)\nmenufichier.add_command(label = \"A propos\",command = fAPropos)\nmenufichier.add_command(label = \"Scores\",command = fAffScore)\nmenubar.add_cascade(label = \"Menu\",menu = menufichier)\nmenuaide = Menu(menubar,tearoff=0)\nmenuaide.add_command(label = \"Aide\",command = fAide)\nmenubar.add_cascade(label = \"Aide\",menu = menuaide)\n\n#Affichage Menu\nmw.config(menu = menubar)\n\n\nLargeur = 600\nHauteur = 500\nCanevas = Canvas(mw, width = Largeur, height = Hauteur, bg='white')\nCanevas.pack(side='bottom',padx = 0, pady = 5)\n\nlabel_score = Label(mw,text=\"Score : \")\nlabel_score.pack(side='left',padx=5,pady=5)\n\nlabel_vies = Label(mw,text=\"Vies : \")\nlabel_vies.pack(side='right',padx=5,pady=5)\n\nbutton_nouveau = Button(mw,text=\"New game\",command=fNewGame)\nbutton_nouveau.place(x=650,y=550)\n\nbutton_quit = Button(mw,text=\"Quit\",command=fQuitter)\nbutton_quit.place(x=650,y=550)\n\ndef Cercle(r=10):\n if r= height[stack[-1]]:\n stack.append(i)\n else:\n #如果栈不为空并且当前柱比栈顶柱要低,出栈,更新结果。\n while len(stack) != 0 and cur <= height[stack[-1]]:\n h = height[stack.pop()]\n left = stack[-1] if len(stack)!=0 else -1\n right = i \n ans = max(ans,h*(right-left-1))\n stack.append(i)\n return ans","repo_name":"shiyutang/DL-Prep","sub_path":"04_Algorithms/Leetcode/Monotonic stack.py","file_name":"Monotonic stack.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"zh","doc_type":"code","stars":22,"dataset":"github-code","pt":"81"} +{"seq_id":"32949935917","text":"from __future__ import print_function\nimport pandas as pd\nimport os\nimport logging\nimport argparse\n\n'''\nThis file reads in data related E. coli levels\nin Chicago beaches. It is based on the files\nanalysis.R and split_sheets.R, and is written\nsuch that the dataframe loaded here will match\nthe R dataframe code exactly.\n'''\n\n# This is an adaptation of previous read_data.py so that it runs on Python3\n# Some variable names changed. Notably, Client.ID is now Beach\n# Added day of week and month variables\n# Also adds columns to dataframe:\n# YesterdayEcoli : prior days reading\n# DayBeforeYesterdayEcoli : two days prior reading\n# actual_elevated : where Escherichia_coli >=235\n# predicted_elevated : where Drek_Prediction >=235\n# \n\n\n# TODO: verbose\n# TODO: use multi-level index on date/beach ?\n# TODO: standardize on inplace=True or not inplace\n# TODO: how much consistency do we want between python columns\n# and the R columns?\n# TODO: create better docstrings\n# TODO: remove print statements and the import\n# TODO: loyola/leone the same?\n# TODO: repeats on 2015-06-16 ?\n# and some of 2012?\n# Just check for these everywhere, why is it happening?\n\n\ndef split_sheets(file_name, year, verbose=False):\n '''\n Reads in all sheets of an excel workbook, concatenating\n all of the information into a single dataframe.\n\n The excel files were unfortunately structured such that\n each day had its own sheet.\n '''\n xls = pd.ExcelFile(file_name)\n dfs = []\n standardized_col_names = [\n 'Date', 'Laboratory_ID', 'Beach', 'Reading1',\n 'Reading2', 'Escherichia_coli', 'Units', 'Sample_Collection_Time'\n ]\n\n for i, sheet_name in enumerate(xls.sheet_names):\n if not xls.book.sheet_by_name(sheet_name).nrows:\n # Older versions of ExcelFile.parse threw an error if the sheet\n # was empty, explicitly check for this condition.\n logging.debug('sheet \"{0}\" from {1} is empty'.format(sheet_name,\n year))\n continue\n df = xls.parse(sheet_name)\n\n if i == 0 and len(df.columns) > 30:\n # This is the master/summary sheet\n logging.debug('ignoring sheet \"{0}\" from {1}'.format(sheet_name,\n year))\n continue\n\n if df.index.dtype == 'object':\n # If the first column does not have a label, then the excel\n # parsing engine will helpfully use the first column as\n # the index. This is *usually* helpful, but there are two\n # days when the first column is missing the typical label\n # of 'Laboratory ID'. In this case, peel that index off\n # and set its name.\n msg = '1st column in sheet \"{0}\" from {1} is missing title'.format(\n sheet_name, year)\n logging.debug(msg)\n df.reset_index(inplace=True)\n df.columns = ['Laboratory ID'] + df.columns.tolist()[1:]\n\n # Insert name of sheet as first column, the sheet name is the date\n df.insert(0, u'Date', sheet_name)\n\n for c in df.columns.tolist():\n if 'Reading' in c:\n # There are about 10 days that have >2 readings for some reason\n if int(c[8:]) > 2:\n logging.info('sheet \"{0}\" from {1} has >2 readings'.format(\n sheet_name, year)\n )\n df.drop(c, 1, inplace=True)\n\n # Only take the first 8 columns, some sheets erroneously have >8 cols\n df = df.ix[:,0:8]\n\n # Standardize the column names\n df.columns = standardized_col_names\n\n dfs.append(df)\n\n df = pd.concat(dfs)\n\n df.insert(0, u'Year', str(year))\n\n logging.info('Removing data with missing Client ID')\n df.dropna(subset=['Beach'], inplace=True)\n\n return df\n\n\ndef read_holiday_data(file_name, verbose=False):\n df = pd.read_csv(file_name)\n df['Date'] = pd.to_datetime(df['Date'])\n return df\n\n\ndef read_water_sensor_data(verbose=False):\n '''\n Downloads and reads water sensor data from the Chicago data\n portal. Downsamples the readings into the min, mean, and max\n for each day and for each sensor. Each day only has one row,\n with many columns (one column each per sensor per reading per\n type of down-sampling process)\n '''\n url = 'https://data.cityofchicago.org/api/views/qmqz-2xku/rows.csv?accessType=DOWNLOAD'\n water_sensors = pd.read_csv(url)\n url = 'https://data.cityofchicago.org/api/views/g3ip-u8rb/rows.csv?accessType=DOWNLOAD'\n sensor_locations = pd.read_csv(url)\n\n df = pd.merge(water_sensors, sensor_locations,\n left_on='Beach Name', right_on='Sensor Name')\n\n df.drop(['Sensor Type', 'Location'], 1, inplace=True)\n\n # TODO: map sensor to beach ???\n\n df['Beach Name'] = df['Beach Name'].apply(lambda x: x[0:-6])\n\n df['Measurement Timestamp'] = pd.to_datetime(df['Measurement Timestamp'])\n df['Date'] = pd.DatetimeIndex(df['Measurement Timestamp']).normalize()\n df.drop(['Battery Life', 'Measurement Timestamp', 'Measurement Timestamp Label',\n 'Measurement ID', 'Sensor Name'], axis=1, inplace=True)\n\n df_mins = df.groupby(['Beach Name', 'Date'], as_index=False).min()\n df_means = df.groupby(['Beach Name', 'Date'], as_index=False).mean()\n df_maxes = df.groupby(['Beach Name', 'Date'], as_index=False).max()\n df_mins.drop(['Latitude','Longitude'],1,inplace=True)\n df_means.drop(['Latitude','Longitude'],1,inplace=True)\n df_maxes.drop(['Latitude','Longitude'],1,inplace=True)\n\n cols = df_mins.columns.tolist()\n\n def rename_columns(cols, aggregation_type):\n cols = list(map(lambda x: x.replace(' ', '_'), cols))\n for i in range(2,7):\n cols[i] = cols[i] + '_' + aggregation_type\n return cols\n\n df_mins.columns = rename_columns(cols, 'Min')\n df_means.columns = rename_columns(cols, 'Mean')\n df_maxes.columns = rename_columns(cols, 'Max')\n\n df = pd.merge(df_mins, df_means, on=['Beach_Name', 'Date'])\n df = pd.merge(df, df_maxes, on=['Beach_Name', 'Date'])\n\n df = df.pivot(index='Date', columns='Beach_Name')\n df.columns = ['.'.join(col[::-1]).strip() for col in df.columns.values]\n df.reset_index(inplace=True)\n df.columns = ['Full_date'] + list( map(lambda x: x.replace(' ', '_'), df.columns.tolist()[1:]))\n c = df.columns.tolist()\n c[c.index('Full_date')] = 'Date'\n df.columns = c\n\n return df\n\n\ndef read_weather_station_data(verbose=False):\n '''\n Downloads and reads weather sensor data from the Chicago data\n portal. Downsamples the readings into the min, mean, and max\n for each day and for each sensor. Each day only has one row,\n with many columns (one column each per sensor per reading per\n type of down-sampling process)\n '''\n url = 'https://data.cityofchicago.org/api/views/k7hf-8y75/rows.csv?accessType=DOWNLOAD'\n weather_sensors = pd.read_csv(url)\n url = 'https://data.cityofchicago.org/api/views/g3ip-u8rb/rows.csv?accessType=DOWNLOAD'\n sensor_locations = pd.read_csv(url)\n\n weather_sensors.columns = map(lambda x: x.replace(' ', '_'),\n weather_sensors.columns.tolist())\n sensor_locations.columns = map(lambda x: x.replace(' ', '_'),\n sensor_locations.columns.tolist())\n sensor_locations.columns = ['Station_Name'] + sensor_locations.columns.tolist()[1:]\n\n df = pd.merge(weather_sensors, sensor_locations, on='Station_Name')\n\n df['Beach'] = df['Station_Name']\n\n df['Date'] = pd.DatetimeIndex(df['Measurement_Timestamp']).normalize()\n\n df.drop(['Measurement_Timestamp_Label', 'Measurement_Timestamp',\n 'Sensor_Type', 'Location', 'Measurement_ID', 'Battery_Life','Station_Name'],\n axis=1, inplace=True)\n\n df_mins = df.groupby(['Beach', 'Date'], as_index=False).min()\n df_means = df.groupby(['Beach', 'Date'], as_index=False).mean()\n df_maxes = df.groupby(['Beach', 'Date'], as_index=False).max()\n\n cols = df_mins.columns.tolist()\n\n def rename_columns(cols, aggregation_type):\n cols = list(map(lambda x: x.replace(' ', '_'), cols))\n for i in range(2,15):\n cols[i] = cols[i] + '_' + aggregation_type\n return cols\n\n df_mins.columns = rename_columns(cols, 'Min')\n df_means.columns = rename_columns(cols, 'Mean')\n df_maxes.columns = rename_columns(cols, 'Max')\n\n\n df = pd.merge(df_mins, df_means, on=['Beach', 'Date'])\n df = pd.merge(df, df_maxes, on=['Beach', 'Date'])\n df.drop(['Latitude_x', 'Latitude_y', 'Longitude_x', 'Longitude_y'], axis=1, inplace=True)\n\n df = df.pivot(index='Date', columns='Beach')\n df.columns = ['.'.join(col[::-1]).strip() for col in df.columns.values]\n df.reset_index(inplace=True)\n df.columns = ['Full_date'] + list( map(lambda x: x.replace(' ', '_'), df.columns.tolist()[1:]))\n c = df.columns.tolist()\n c[c.index('Full_date')] = 'Date'\n df.columns = c\n\n return df\n\n\ndef read_locations(file_name, verbose=False):\n locations = pd.read_csv(file_name)\n return locations\n\n\ndef print_full(x):\n '''\n Helper function to plot the *full* dataframe.\n '''\n pd.set_option('display.max_rows', len(x))\n print(x)\n pd.reset_option('display.max_rows')\n\n\ndef date_lookup(s, verbose=False):\n '''\n This is an extremely fast approach to datetime parsing.\n For large data, the same dates are often repeated. Rather than\n re-parse these, we store all unique dates, parse them, and\n use a lookup to convert all dates.\n\n Thanks to fixxxer, found at\n http://stackoverflow.com/questions/29882573\n '''\n dates = {date:pd.to_datetime(date, errors='ignore') for date in s.unique()}\n for date, parsed in dates.items():\n if type(parsed) is not pd.tslib.Timestamp:\n logging.debug('Non-regular date format \"{0}\"'.format(date))\n fmt = '%B %d (%p) %Y'\n dates[date] = pd.to_datetime(date,format=fmt)\n return s.apply(lambda v: dates[v])\n\n\ndef read_data(verbose=False):\n '''\n Read in the excel files for years 2006-2015 found in\n 'data/ChicagoParkDistrict/raw/Standard 18 hr Testing'\n along with drekbeach data.\n\n Also reformats columns in accordance with the transformations\n found in analysis.R\n '''\n\n cpd_data_path = './data/ChicagoParkDistrict/raw/Standard 18 hr Testing/'\n #cpd_data_path = os.path.join(os.path.dirname(__file__), cpd_data_path)\n\n dfs = []\n\n for yr in range(2006,2015):\n dfs.append(split_sheets(cpd_data_path + str(yr) + ' Lab Results.xls', yr))\n dfs.append(split_sheets(cpd_data_path + '2015 Lab Results.xlsx', 2015))\n\n df = pd.concat(dfs)\n\n # Need to reset the index to deal with the repeated concatenations\n df.index = range(0, len(df.index))\n\n # Some records are of the form <1 or >2440\n # Remove the operator and treat the remaining string as the value.\n # Also convert string to float, if possible\n for col in ['Reading1', 'Reading2', 'Escherichia_coli']:\n for i, val in enumerate(df[col].tolist()):\n if isinstance(val, (str,bytes)):\n val = val.replace('<', '').replace('>', '')\n try:\n df.ix[i, col] = float(val)\n except ValueError:\n # Sometimes strings are things like 'Sample Not Received'\n if 'sample' in df.ix[i, col].lower():\n logging.debug('Trying to cast \"{0}\" to numeric'.format(\n df.ix[i, col]\n ))\n else:\n logging.info('Trying to cast \"{0}\" to numeric'.format(\n df.ix[i, col]\n ))\n df.ix[i, col] = float('nan')\n df[col] = df[col].astype('float64')\n # Massage dates, create weekday column\n df.insert(0, 'Full_date', df[['Date', 'Year']].apply(lambda x: ' '.join(x), axis=1).apply(lambda x: x.replace(' (PM)', '') ))\n df['Full_date'] = date_lookup(df['Full_date'])\n df.insert(0, 'Timestamp', pd.to_datetime(df['Full_date'], errors='coerce') )\n months=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\n df.insert(0, 'Month', df['Timestamp'].dt.month.apply(lambda x: months[int(x)-1]) )\n days=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']\n df.insert(0, 'Weekday', df['Timestamp'].dt.dayofweek.apply(lambda x: days[int(x)]) )\n df.drop(['Date','Timestamp'], axis=1,inplace=True )\n\n # Some header rows were duplicated\n df = df[df['Laboratory_ID'] != u'Laboratory ID']\n # Normalize the beach names\n df['Beach'] = df['Beach'].map(lambda x: x.strip())\n cleanbeachnames = pd.read_csv(cpd_data_path + 'cleanbeachnames.csv')\n cleanbeachnames = dict(zip(cleanbeachnames['Old'], cleanbeachnames['New']))\n # There is one observation that does not have a beach name in the\n # Beach column, remove it.\n df = df[df['Beach'].map(lambda x: x in cleanbeachnames)]\n df['Beach'] = df['Beach'].map(lambda x: cleanbeachnames[x])\n\n # Read in drek beach data\n drek_data_path = './data/DrekBeach/'\n drekdata = pd.read_csv(drek_data_path + 'daily_summaries_drekb.csv')\n drekdata.columns = ['Beach', 'Full_date', 'Drek_Reading','Drek_Prediction', 'Drek_Worst_Swim_Status']\n drekdata['Full_date'] = date_lookup(drekdata['Full_date'])\n drekdata['Beach'] = drekdata['Beach'].map(lambda x: x.strip())\n drekdata['Beach'] = drekdata['Beach'].map(lambda x: cleanbeachnames[x])\n\n df = pd.merge(df, drekdata, how='outer', on= ['Beach', 'Full_date'])\n c = df.columns.tolist()\n c[c.index('Full_date')] = 'Date'\n df.columns = c\n\n \n # get rid of some useless columns\n df.drop(['Laboratory_ID','Units','Sample_Collection_Time','Drek_Worst_Swim_Status'], axis=1,inplace=True )\n \n # There was an anamolous reading, the max possible value from the test\n # is around 2420, but one reading was 6488.\n # We need to do the ~(reading 1 > 2500 | reading 2 > 2500) instead of\n # (reading 1 < 2500 & reading 2 < 2500) since the latter returns\n # False if there is a NaN.\n df = df[~((df['Reading1'] > 2500) | (df['Reading2'] > 2500))]\n\n # R code creates a calculated geometric mean column b/c it didn't\n # import the column correctly (it truncated the value). Pandas did\n # import correctly, so no need to create that.\n\n external_data_path = './data/ExternalData/'\n #external_data_path = os.path.join(os.path.dirname(__file__),\n # external_data_path)\n\n holidaydata = read_holiday_data(external_data_path + 'Holidays.csv', verbose)\n # TODO: merge holiday data\n\n watersensordata = read_water_sensor_data(verbose)\n df = pd.merge(df, watersensordata, on='Date', how='outer')\n\n weatherstationdata = read_weather_station_data(verbose)\n df = pd.merge(df, weatherstationdata, on='Date', how='outer')\n\n # TODO: discuss this\n #df.set_index('Date', drop=True, inplace=True)\n\n df['actual_elevated'] = (df['Escherichia_coli']>=235).astype(int)\n df['predicted_elevated'] = (df['Drek_Prediction']>=235).astype(int)\n\n df = df.ix[pd.notnull(df['Beach'])].reset_index()\n df.drop(['index'], axis=1, inplace=True)\n\n # get levels of ecoli from yesterday and day before yesterday\n import datetime as dt\n temp = df.ix[:,['Date','Beach','Escherichia_coli']].reset_index()\n temp['DateTomorrow']= temp['Date'] + dt.timedelta(days=1)\n temp['YesterdayEcoli'] = temp['Escherichia_coli']\n temp.drop(['index','Date','Escherichia_coli'], axis=1, inplace=True)\n df = pd.merge(df, temp, left_on=['Beach', 'Date'], right_on=['Beach', 'DateTomorrow'], how='left')\n df.drop(['DateTomorrow'], 1, inplace=True)\n temp = df.ix[:,['Date','Beach','Escherichia_coli']].reset_index()\n temp['DateTwoDaysAhead']= temp['Date'] + dt.timedelta(days=2)\n temp['DayBeforeYesterdayEcoli'] = temp['Escherichia_coli']\n temp.drop(['index','Date','Escherichia_coli'], axis=1, inplace=True)\n df = pd.merge(df, temp, left_on=['Beach', 'Date'], right_on=['Beach', 'DateTwoDaysAhead'], how='left')\n df.drop(['DateTwoDaysAhead'], 1, inplace=True)\n\n\n return df\n\n\n\n# if __name__ == '__main__':\n # parser = argparse.ArgumentParser(description='Process beach data.')\n # parser.add_argument('-o', '--outfile', nargs=1, type=str,\n # metavar='outfile', help='output CSV filename')\n # parser.add_argument('-v', '--verbose', action='count')\n\n # args = parser.parse_args()\n # print(args)\n\n # #if int(args.verbose) >= 2:\n # # logging.basicConfig(level=logging.DEBUG)\n # #elif int(args.verbose) == 1:\n # # logging.basicConfig(level=logging.INFO)\n # #else:\n # # logging.basicConfig(level=logging.WARNING)\n\n # df = read_data(args.verbose)\n\n # if args.outfile is not None:\n # df.to_csv(args.outfile[0], index=False)\n","repo_name":"meineke/bootcamp-messy-data","sub_path":"read_data3.py","file_name":"read_data3.py","file_ext":"py","file_size_in_byte":17105,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"42187285632","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nfrom __future__ import annotations\n\nimport typing\nfrom typing import AsyncIterator\n\nimport grpc\n\nfrom aiokubemq.enums import SubscribeType\nfrom aiokubemq.proto import kubemq_pb2, kubemq_pb2_grpc\nfrom aiokubemq.requests import (\n Event,\n EventStream,\n QueueMessageStream,\n Subscription,\n AckAllQueueMessages,\n QueueMessage,\n QueueMessageBatch,\n ReceiveQueueMessages,\n RPCRequest,\n RPCResponse,\n Request,\n StreamQueueMessagesRequest,\n)\n\n\nclass KubeMQClient:\n def __init__(\n self,\n client_id: str,\n url: str,\n authentication: str = None,\n metadata: dict = None,\n credentials: grpc.ChannelCredentials = None,\n ):\n self.client_id = client_id\n if credentials is None:\n self._channel = grpc.aio.insecure_channel(url)\n else:\n self._channel = grpc.aio.secure_channel(url, credentials=credentials)\n self._stub = kubemq_pb2_grpc.kubemqStub(self._channel)\n self._metadata = {}\n\n if metadata is not None:\n self._metadata.update(metadata)\n\n if authentication is not None:\n self._metadata[\"authorization\"] = authentication\n\n @property\n def metadata(self) -> list[tuple]:\n return [(key, val) for key, val in self._metadata.items()]\n\n async def __aenter__(self):\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self.close()\n\n async def close(self):\n await self._channel.close()\n\n @typing.overload\n async def send(self, request: Event) -> kubemq_pb2.Result:\n ...\n\n @typing.overload\n async def send(\n self, request: Subscription\n ) -> AsyncIterator[typing.Union[kubemq_pb2.Request, kubemq_pb2.EventReceive]]:\n ...\n\n @typing.overload\n async def send(\n self, request: QueueMessageStream\n ) -> AsyncIterator[kubemq_pb2.StreamQueueMessagesResponse]:\n ...\n\n @typing.overload\n async def send(self, request: EventStream) -> AsyncIterator[kubemq_pb2.Result]:\n ...\n\n @typing.overload\n async def send(\n self, request: AckAllQueueMessages\n ) -> kubemq_pb2.AckAllQueueMessagesResponse:\n ...\n\n @typing.overload\n async def send(self, request: QueueMessage) -> kubemq_pb2.SendQueueMessageResult:\n ...\n\n @typing.overload\n async def send(\n self, request: QueueMessageBatch\n ) -> kubemq_pb2.QueueMessagesBatchResponse:\n ...\n\n @typing.overload\n async def send(\n self, request: ReceiveQueueMessages\n ) -> kubemq_pb2.ReceiveQueueMessagesResponse:\n ...\n\n @typing.overload\n async def send(self, request: RPCRequest) -> kubemq_pb2.Response:\n ...\n\n @typing.overload\n async def send(self, request: RPCResponse) -> None:\n ...\n\n async def send(\n self, request: Request\n ) -> typing.Union[\n kubemq_pb2.Result,\n kubemq_pb2.Response,\n kubemq_pb2.ReceiveQueueMessagesResponse,\n kubemq_pb2.QueueMessagesBatchResponse,\n kubemq_pb2.SendQueueMessageResult,\n kubemq_pb2.AckAllQueueMessagesResponse,\n AsyncIterator[\n typing.Union[\n kubemq_pb2.Request,\n kubemq_pb2.EventReceive,\n kubemq_pb2.StreamQueueMessagesResponse,\n kubemq_pb2.Result,\n ]\n ],\n None,\n ]:\n if isinstance(request, EventStream):\n\n async def request_converter(stream):\n async for req in stream:\n req: Event\n yield req.to_lowlevel_object(client_id=self.client_id)\n\n streamer = self._stub.SendEventsStream(\n request_converter(request.stream), metadata=self.metadata\n )\n return streamer\n elif isinstance(request, Event):\n return await self._stub.SendEvent(\n request.to_lowlevel_object(client_id=self.client_id),\n metadata=self.metadata,\n )\n elif isinstance(request, QueueMessage):\n return await self._stub.SendQueueMessage(\n request.to_lowlevel_object(client_id=self.client_id),\n metadata=self.metadata,\n )\n elif isinstance(request, QueueMessageBatch):\n return await self._stub.SendQueueMessagesBatch(\n request.to_lowlevel_object(client_id=self.client_id),\n metadata=self.metadata,\n )\n elif isinstance(request, QueueMessageStream):\n\n async def request_converter(stream):\n async for req in stream:\n req: StreamQueueMessagesRequest\n yield req.to_lowlevel_object(client_id=self.client_id)\n\n streamer = self._stub.StreamQueueMessage(\n request_converter(request.stream), metadata=self.metadata\n )\n return streamer\n elif isinstance(request, ReceiveQueueMessages):\n return await self._stub.ReceiveQueueMessages(\n request.to_lowlevel_object(client_id=self.client_id),\n metadata=self.metadata,\n )\n elif isinstance(request, AckAllQueueMessages):\n return await self._stub.AckAllQueueMessages(\n request.to_lowlevel_object(client_id=self.client_id),\n metadata=self.metadata,\n )\n elif isinstance(request, RPCRequest):\n return await self._stub.SendRequest(\n request.to_lowlevel_object(client_id=self.client_id),\n metadata=self.metadata,\n )\n elif isinstance(request, RPCResponse):\n return await self._stub.SendResponse(\n request.to_lowlevel_object(client_id=self.client_id),\n metadata=self.metadata,\n )\n elif isinstance(request, Subscription):\n if (\n request.typ == SubscribeType.Events\n or request.typ == SubscribeType.EventsStore\n ):\n return self._stub.SubscribeToEvents(\n request.to_lowlevel_object(client_id=self.client_id),\n metadata=self.metadata,\n )\n elif (\n request.typ == SubscribeType.Queries\n or request.typ == SubscribeType.Commands\n ):\n return self._stub.SubscribeToRequests(\n request.to_lowlevel_object(client_id=self.client_id),\n metadata=self.metadata,\n )\n else:\n raise ValueError(\"Unknown SubscribeType\")\n else:\n raise ValueError(\"Unknown request\")\n\n async def ping(self) -> kubemq_pb2.PingResult:\n return await self._stub.Ping(kubemq_pb2.Empty())\n","repo_name":"TECHNOFAB11/aiokubemq","sub_path":"aiokubemq/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18454916563","text":"\"\"\"\nDebuggando com PDB\nPDB -> Python Debugger\n\nComandos básicos para o PDB\n\nl lista onde estamos no código\nn próxima linha\np imprime variável\nc continua a execuçao - finaliza o debugging\n\n\n\"\"\"\nimport pdb\n\nnome = 'Angelina'\nsobrenome = 'Jolie'\nnome_completo = nome + ' ' + sobrenome\npdb.set_trace()\ncurso = 'Programacao em Python: Essencial'\nfinal = nome_completo + 'faz curso ' + curso","repo_name":"danipereira261/guppe","sub_path":"debugando_com_pdb.py","file_name":"debugando_com_pdb.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14952493698","text":"from Web.models import *\nfrom django.http import HttpResponse\nfrom django.template import RequestContext, loader\nfrom django.template.loader import render_to_string\nfrom django import template\nfrom django.contrib import auth\nfrom django.contrib.auth import logout\nimport time\nimport os\nfrom Web.constanst import APP_GESTION\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef MenuUsuario(request):\n # import pdb; pdb.set_trace()\n try:\n usuario = Usuario.objects.get(user=request.user)\n menus=usuario.perfil.all()[0].menu.all()\n urls=[id.pk for id in menus] \n template = 'web/menu/tree_cats.html' \n folders = Menu.objects.filter(parent = None, aplication=APP_GESTION).order_by('orden')\n context = {\n 'folders': folders,\n 'urls': urls,\n 'perfil': request.session.get('perfil_id')\n }\n rendered = render_to_string(template, context)\n return rendered\n except Exception as e:\n print(e)\n messages.error(request, str(e))\n #return HttpResponse('')\n@register.filter(name=\"multiply\")\ndef multiply(value, arg):\n value_as_int = int(value)\n return int(value_as_int * arg)\n@register.filter(name=\"has_group\")\ndef has_group(usuario,grupo):\n return usuario.groups.filter(name__exact=grupo).exists()\n\n@register.filter\ndef listar(n):\n return [i+1 for i in range(0,n)]\n\n\n@register.simple_tag\ndef UsuarioPerfil(request):\n try:\n idUsuario = request.user.id\n idPerfil = Usuario.objects.get(usuario_id=idUsuario)\n #idPerfil = request.session['id_perfil']\n #perfil = Perfil.objects.get(id_perfil = idPerfil.pk)\n return idPerfil.perfil\n except:\n return redirect('Web/logout/')\n\n\n@register.filter()\n@register.simple_tag\ndef InMenu(perfil, id):\n #import ipdb; ipdb.set_trace()\n p = perfil.filter(id_perfil=id)\n return p\n\n\n@register.simple_tag\ndef relative_url(urlencode):\n url = ''\n for d in urlencode.items():\n if d[0] != 'page':\n url=url + u'&{}={}'.format(d[0],d[1])\n return url\n\n\n@register.simple_tag\ndef number_page(number, page, pages):\n return number + ((page - 1) * pages)","repo_name":"huacre1997/Integracion","sub_path":"Web/templatetags/base_extras.py","file_name":"base_extras.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32716064632","text":"from sre_parse import State\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport time\r\nimport copy\r\n\r\n\r\n# receives user input and outputs materials needed\r\ndef honing_calc(values, armor_1302, armor_1340, weapon_1302, weapon_1340, items):\r\n # putting user input into variables\r\n item = values[0]\r\n slvl = values[1]\r\n elvl = values[2]\r\n num = values[3]\r\n \r\n # armor 1302\r\n if item == 1:\r\n # getting materials needed for each level from json file into a list for each material type\r\n armor_silver_1302 = armor_1302[0]\r\n armor_gold_1302 = armor_1302[1]\r\n armor_shards_1302 = armor_1302[2]\r\n armor_fusion_1302 = armor_1302[3]\r\n armor_crystals_1302 = armor_1302[4]\r\n armor_leapstones_1302 = armor_1302[5]\r\n\r\n # creating item_total objects\r\n for i in range(slvl, elvl):\r\n name = 'armor_1302 +{} ({})'.format(i+1, num)\r\n item_mats = item_totals(name, armor_silver_1302[i] * num, armor_gold_1302[i] * num, armor_shards_1302[i] * num, armor_fusion_1302[i] * num, \r\n 0, armor_crystals_1302[i] * num, 0, armor_leapstones_1302[i] * num, 0)\r\n items.append(item_mats)\r\n \r\n # caclulating total materials\r\n items[0].silver += sum(armor_silver_1302[slvl:elvl]) * num\r\n items[0].gold += sum(armor_gold_1302[slvl:elvl]) * num\r\n items[0].shards += sum(armor_shards_1302[slvl:elvl]) * num\r\n items[0].sfusion += sum(armor_fusion_1302[slvl:elvl]) * num\r\n items[0].gcrystals += sum(armor_crystals_1302[slvl:elvl]) * num\r\n items[0].hleapstones += sum(armor_leapstones_1302[slvl:elvl]) * num\r\n\r\n\r\n # armor 1340\r\n elif item == 2:\r\n armor_silver_1340 = armor_1340[0]\r\n armor_gold_1340 = armor_1340[1]\r\n armor_shards_1340 = armor_1340[2]\r\n armor_fusion_1340 = armor_1340[3]\r\n armor_crystals_1340 = armor_1340[4]\r\n armor_leapstones_1340 = armor_1340[5]\r\n\r\n for i in range(slvl, elvl):\r\n name = 'armor_1340 +{} ({})'.format(i+1, num)\r\n item_mats = item_totals(name, armor_silver_1340[i] * num, armor_gold_1340[i] * num, armor_shards_1340[i] * num, 0, armor_fusion_1340[i] * num, \r\n armor_crystals_1340[i] * num, 0, 0, armor_leapstones_1340[i] * num)\r\n items.append(item_mats)\r\n\r\n items[0].silver = sum(armor_silver_1340[slvl:elvl]) * num\r\n items[0].gold = sum(armor_gold_1340[slvl:elvl]) * num\r\n items[0].shards = sum(armor_shards_1340[slvl:elvl]) * num\r\n items[0].bfusion = sum(armor_fusion_1340[slvl:elvl]) * num\r\n items[0].gcrystals = sum(armor_crystals_1340[slvl:elvl]) * num\r\n items[0].ghleapstones = sum(armor_leapstones_1340[slvl:elvl]) * num\r\n\r\n # weapon 1302\r\n elif item == 3:\r\n weapon_silver_1302 = weapon_1302[0]\r\n weapon_gold_1302 = weapon_1302[1]\r\n weapon_shards_1302 = weapon_1302[2]\r\n weapon_fusion_1302 = weapon_1302[3]\r\n weapon_crystals_1302 = weapon_1302[4]\r\n weapon_leapstones_1302 = weapon_1302[5]\r\n\r\n for i in range(slvl, elvl):\r\n name = 'weapon_1302 +{} ({})'.format(i+1, num)\r\n item_mats = item_totals(name, weapon_silver_1302[i] * num, weapon_gold_1302[i] * num, weapon_shards_1302[i] * num, weapon_fusion_1302[i] * num, \r\n 0, 0, weapon_crystals_1302[i] * num, weapon_leapstones_1302[i] * num, 0)\r\n items.append(item_mats)\r\n\r\n items[0].silver = sum(weapon_silver_1302[slvl:elvl]) * num\r\n items[0].gold = sum(weapon_gold_1302[slvl:elvl]) * num\r\n items[0].shards = sum(weapon_shards_1302[slvl:elvl]) * num\r\n items[0].sfusion = sum(weapon_fusion_1302[slvl:elvl]) * num\r\n items[0].dcrystals = sum(weapon_crystals_1302[slvl:elvl]) * num\r\n items[0].hleapstones = sum(weapon_leapstones_1302[slvl:elvl]) * num\r\n\r\n # weapon 1340\r\n else:\r\n weapon_silver_1340 = weapon_1340[0]\r\n weapon_gold_1340 = weapon_1340[1]\r\n weapon_shards_1340 = weapon_1340[2]\r\n weapon_fusion_1340 = weapon_1340[3]\r\n weapon_crystals_1340 = weapon_1340[4]\r\n weapon_leapstones_1340 = weapon_1340[5]\r\n\r\n for i in range(slvl, elvl):\r\n name = 'weapon_1340 +{} ({})'.format(i+1, num)\r\n item_mats = item_totals(name, weapon_silver_1340[i] * num, weapon_gold_1340[i] * num, weapon_shards_1340[i] * num, 0, weapon_fusion_1340[i] * num, \r\n 0, weapon_crystals_1340[i] * num, 0, weapon_leapstones_1340[i] * num)\r\n items.append(item_mats)\r\n \r\n items[0].silver = sum(weapon_silver_1340[slvl:elvl]) * num\r\n items[0].gold = sum(weapon_gold_1340[slvl:elvl]) * num\r\n items[0].shards = sum(weapon_shards_1340[slvl:elvl]) * num\r\n items[0].bfusion = sum(weapon_fusion_1340[slvl:elvl]) * num\r\n items[0].dcrystals = sum(weapon_crystals_1340[slvl:elvl]) * num\r\n items[0].ghleapstones = sum(weapon_leapstones_1340[slvl:elvl]) * num\r\n\r\n\r\n# get mats from microservice\r\ndef get_mats(item, mat):\r\n f = open(\"./text_files/list.txt\", \"w\")\r\n f.write(\"1\\n\")\r\n f.write(\"./text_files/materials.json\\n\")\r\n f.write(item + \"\\n\")\r\n f.write(mat)\r\n f.close()\r\n time.sleep(2)\r\n f = open(\"./text_files/list.txt\", \"r\")\r\n mats = f.readline()\r\n # if microservice is not running fill arrays with 0\r\n if mats == \"1\\n\":\r\n values = [0] * 20\r\n else:\r\n print(\"Grabbing \" + mat)\r\n temp = mats.split(\",\")\r\n values = [int(i) for i in temp]\r\n f.close()\r\n return values\r\n\r\n\r\n# get mats into arrays\r\ndef get_mats_r():\r\n print(\"Grabbing mats...\")\r\n print(\"Armor 1302...\")\r\n # get mats into lists\r\n armor_1302 = []\r\n armor_1302.append(get_mats('armor1302', 'silver'))\r\n armor_1302.append(get_mats('armor1302', 'gold'))\r\n armor_1302.append(get_mats('armor1302', 'shards'))\r\n armor_1302.append(get_mats('armor1302', 'fusion'))\r\n armor_1302.append(get_mats('armor1302', 'crystals'))\r\n armor_1302.append(get_mats('armor1302', 'leapstones'))\r\n\r\n print(\"Armor 1340...\")\r\n # get mats into lists\r\n armor_1340 = []\r\n armor_1340.append(get_mats('armor1340', 'silver'))\r\n armor_1340.append(get_mats('armor1340', 'gold'))\r\n armor_1340.append(get_mats('armor1340', 'shards'))\r\n armor_1340.append(get_mats('armor1340', 'fusion'))\r\n armor_1340.append(get_mats('armor1340', 'crystals'))\r\n armor_1340.append(get_mats('armor1340', 'leapstones'))\r\n\r\n print(\"Weapon 1302...\")\r\n # get mats into lists\r\n weapon_1302 = []\r\n weapon_1302.append(get_mats('weapon1302', 'silver'))\r\n weapon_1302.append(get_mats('weapon1302', 'gold'))\r\n weapon_1302.append(get_mats('weapon1302', 'shards'))\r\n weapon_1302.append(get_mats('weapon1302', 'fusion'))\r\n weapon_1302.append(get_mats('weapon1302', 'crystals'))\r\n weapon_1302.append(get_mats('weapon1302', 'leapstones'))\r\n\r\n print(\"Weapon 1340...\")\r\n # get mats into lists\r\n weapon_1340 = []\r\n weapon_1340.append(get_mats('weapon1340', 'silver'))\r\n weapon_1340.append(get_mats('weapon1340', 'gold'))\r\n weapon_1340.append(get_mats('weapon1340', 'shards'))\r\n weapon_1340.append(get_mats('weapon1340', 'fusion'))\r\n weapon_1340.append(get_mats('weapon1340', 'crystals'))\r\n weapon_1340.append(get_mats('weapon1340', 'leapstones'))\r\n\r\n return [armor_1302, armor_1340, weapon_1302, weapon_1340]\r\n\r\n\r\n\r\nclass item_totals:\r\n name = ''\r\n silver = 0\r\n gold = 0\r\n shards = 0\r\n sfusion = 0\r\n bfusion = 0\r\n gcrystals = 0\r\n dcrystals = 0\r\n hleapstones = 0\r\n ghleapstones = 0\r\n \r\n def __init__(self, name, silver, gold, shards, sfusion, bfusion, gcrystals, dcrystals, hleapstones, ghleapstones):\r\n self.name = name\r\n self.silver = silver\r\n self.gold = gold\r\n self.shards = shards\r\n self.sfusion = sfusion\r\n self.bfusion = bfusion\r\n self.gcrystals = gcrystals\r\n self.dcrystals = dcrystals\r\n self.hleapstones = hleapstones\r\n self.ghleapstones = ghleapstones\r\n\r\n\r\nif __name__ == '__main__': \r\n item_mats_init = get_mats_r()\r\n armor_1302 = item_mats_init[0]\r\n armor_1340 = item_mats_init[1]\r\n weapon_1302 = item_mats_init[2]\r\n weapon_1340 = item_mats_init[3]\r\n\r\n # current state of materials\r\n item_list = []\r\n u_item_list = []\r\n totals = item_totals(\"Total\", 0, 0, 0, 0, 0, 0, 0, 0, 0)\r\n u_totals = item_totals(\"Total\", 0, 0, 0, 0, 0, 0, 0, 0, 0)\r\n item_list.append(totals)\r\n u_item_list.append(u_totals)\r\n\r\n def reset():\r\n for i in my_game.get_children():\r\n my_game.delete(i)\r\n\r\n global item_list\r\n global u_item_list\r\n\r\n u_item_list = copy.deepcopy(item_list)\r\n\r\n item_list = []\r\n totals = item_totals(\"Total\", 0, 0, 0, 0, 0, 0, 0, 0, 0)\r\n item_list.append(totals)\r\n\r\n undo_button['state'] = ACTIVE\r\n\r\n my_game.grid(row=3, column=5)\r\n \r\n item = \"\"\r\n def add():\r\n for i in my_game.get_children():\r\n my_game.delete(i)\r\n\r\n global item_list\r\n global u_item_list\r\n\r\n u_item_list = copy.deepcopy(item_list)\r\n\r\n values=[item, int(slvl), int(elvl_entry.get()), int(count_entry.get())]\r\n honing_calc(values, armor_1302, armor_1340, weapon_1302, weapon_1340, item_list)\r\n\r\n undo_button['state'] = ACTIVE\r\n\r\n for i in range(len(item_list)):\r\n my_game.insert(parent='',index='end',iid=i,text='',\r\n values=(item_list[i].name, item_list[i].silver, item_list[i].gold, item_list[i].shards, \r\n item_list[i].sfusion, item_list[i].bfusion, item_list[i].gcrystals, \r\n item_list[i].dcrystals, item_list[i].hleapstones, item_list[i].ghleapstones))\r\n\r\n my_game.grid(row=3, column=5)\r\n\r\n\r\n def undo():\r\n def yes():\r\n warning.destroy()\r\n for i in my_game.get_children():\r\n my_game.delete(i)\r\n\r\n global item_list\r\n global u_item_list\r\n\r\n item_list = copy.deepcopy(u_item_list)\r\n\r\n undo_button['state'] = DISABLED\r\n\r\n for i in range(len(item_list)):\r\n my_game.insert(parent='',index='end',iid=i,text='',\r\n values=(item_list[i].name, item_list[i].silver, item_list[i].gold, item_list[i].shards, \r\n item_list[i].sfusion, item_list[i].bfusion, item_list[i].gcrystals, \r\n item_list[i].dcrystals, item_list[i].hleapstones, item_list[i].ghleapstones))\r\n \r\n warning = Toplevel(ws)\r\n warning.geometry(\"360x100\")\r\n warning.title(\"Warning!\")\r\n Label(warning, text=\"There is no redo option. Would you like to continue with undo?\").grid(row=0, column=0)\r\n Button(warning, text=\"Undo\", command=yes).grid(row=1, column=0)\r\n Button(warning, text=\"Cancel\", command=warning.destroy).grid(row=2, column=0)\r\n\r\n my_game.grid(row=3, column=5)\r\n warning.mainloop()\r\n\r\n\r\n def delete():\r\n global item_list\r\n global u_item_list\r\n\r\n u_item_list = copy.deepcopy(item_list)\r\n\r\n selected_item = my_game.focus()\r\n select = my_game.item(selected_item, \"values\")\r\n del_name = select[0]\r\n\r\n for i in range(1, len(item_list)):\r\n if item_list[i].name == del_name:\r\n item_list[0].silver -= item_list[i].silver\r\n item_list[0].gold -= item_list[i].gold\r\n item_list[0].shards -= item_list[i].shards\r\n item_list[0].sfusion -= item_list[i].sfusion\r\n item_list[0].bfusion -= item_list[i].bfusion\r\n item_list[0].gcrystals -= item_list[i].gcrystals\r\n item_list[0].dcrystals -= item_list[i].dcrystals\r\n item_list[0].hleapstones -= item_list[i].hleapstones\r\n item_list[0].ghleapstones -= item_list[i].ghleapstones\r\n item_list.pop(i)\r\n break \r\n \r\n undo_button['state'] = ACTIVE\r\n\r\n for i in my_game.get_children():\r\n my_game.delete(i)\r\n\r\n for i in range(len(item_list)):\r\n my_game.insert(parent='',index='end',iid=i,text='',\r\n values=(item_list[i].name, item_list[i].silver, item_list[i].gold, item_list[i].shards, \r\n item_list[i].sfusion, item_list[i].bfusion, item_list[i].gcrystals, \r\n item_list[i].dcrystals, item_list[i].hleapstones, item_list[i].ghleapstones))\r\n\r\n my_game.grid(row=3, column=5)\r\n\r\n\r\n def item_select(e):\r\n global item\r\n options_1302 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\r\n options_1340 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]\r\n options_armor = [1, 2, 3, 4, 5]\r\n options_weapons = [1]\r\n eoptions_1302 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\r\n eoptions_1340 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\r\n\r\n item_option = item_entry.get()\r\n if item_option == \"Armor 1302\":\r\n item = 1\r\n if item_option == \"Armor 1340\":\r\n item = 2\r\n if item_option == \"Weapon 1302\":\r\n item = 3\r\n if item_option == \"Weapon 1340\":\r\n item = 4\r\n \r\n if item_option == \"Armor 1302\" or item_option == \"Weapon 1302\":\r\n slvl_entry.config(value=options_1302)\r\n elvl_entry.config(value=eoptions_1302)\r\n\r\n else:\r\n slvl_entry.config(value=options_1340)\r\n elvl_entry.config(value=eoptions_1340)\r\n\r\n if item_option == \"Armor 1302\" or item_option == \"Armor 1340\":\r\n count_entry.config(value=options_armor)\r\n\r\n else:\r\n count_entry.config(value=options_weapons)\r\n\r\n slvl_entry.current(0)\r\n elvl_entry.current(0)\r\n count_entry.current(0)\r\n \r\n slvl = 0\r\n def slvl_select(e):\r\n global slvl\r\n elvl = []\r\n item = item_entry.get()\r\n if item == \"Armor 1302\" or item == \"Weapon 1302\":\r\n limit = 16\r\n else:\r\n limit = 21\r\n slvl = slvl_entry.get()\r\n for i in range(int(slvl) + 1, limit):\r\n elvl.append(i)\r\n elvl_entry.config(value=elvl)\r\n elvl_entry.current(0)\r\n\r\n\r\n # initialize gui\r\n ws = Tk()\r\n ws.title('Honing Calculator')\r\n ws.geometry('933x879')\r\n ws['bg'] = '#AC99F2'\r\n\r\n game_frame = Frame(ws)\r\n game_frame.grid(row=3, column=0, columnspan=10)\r\n\r\n item_options = [\"Armor 1302\", \"Armor 1340\", \"Weapon 1302\", \"Weapon 1340\"]\r\n\r\n item_entry_label = Label(ws, text=\"Item\")\r\n item_entry_label.grid(row=0, column=0, sticky=NSEW)\r\n item_entry = ttk.Combobox(ws, value=item_options)\r\n item_entry.current(0)\r\n item_entry.grid(row=0, column=1, sticky=NSEW)\r\n item_entry.bind(\"<>\", item_select)\r\n\r\n slvl_entry_label = Label(ws, text=\"Starting Level\")\r\n slvl_entry_label.grid(row=0, column=2, sticky=NSEW)\r\n slvl_entry = ttk.Combobox(ws, value=[\" \"])\r\n slvl_entry.current(0)\r\n slvl_entry.grid(row=0, column =3, sticky=NSEW)\r\n slvl_entry.bind(\"<>\", slvl_select)\r\n\r\n elvl_entry_label = Label(ws, text=\"End Level\")\r\n elvl_entry_label.grid(row=0, column=4, sticky=NSEW)\r\n elvl_entry = ttk.Combobox(ws, value=[\" \"])\r\n elvl_entry.current(0)\r\n elvl_entry.grid(row=0, column =5, sticky=NSEW)\r\n\r\n count_entry_label = Label(ws, text=\"Count\")\r\n count_entry_label.grid(row=0, column=6, sticky=NSEW)\r\n count_entry = ttk.Combobox(ws, value=[\" \"])\r\n count_entry.current(0)\r\n count_entry.grid(row=0, column =7, sticky=NSEW)\r\n\r\n add_button = Button(ws, text=\"Add\", command=add)\r\n add_button.grid(row=0, column=8, sticky=NSEW)\r\n\r\n undo_button = Button(ws, text=\"Undo\", command=undo, state=DISABLED)\r\n undo_button.grid(row=1, column=0, sticky=NSEW)\r\n\r\n reset_button = Button(ws, text=\"Reset\", command=reset)\r\n reset_button.grid(row=1, column=1, sticky=NSEW)\r\n\r\n delete_button = Button(ws,text=\"Delete\", command=delete)\r\n delete_button.grid(row=1, column=2, sticky=NSEW)\r\n\r\n my_game = ttk.Treeview(game_frame, selectmode=\"browse\", height=40)\r\n\r\n my_game['columns'] = ('Item', 'Silver', 'Gold', 'Honing Shards', 'Simple Oreha Fusion Materials', 'Basic Oreha Fusion Materials',\r\n 'Guardian Stone Crystals', 'Destruction Stone Crystals', 'Honor Leapstones', 'Great Honor Leapstones')\r\n\r\n my_game.column(\"#0\", width=0, stretch=NO)\r\n my_game.column(\"Item\",anchor=CENTER, width=200)\r\n my_game.column(\"Silver\",anchor=CENTER,width=80)\r\n my_game.column(\"Gold\",anchor=CENTER,width=80)\r\n my_game.column(\"Honing Shards\",anchor=CENTER,width=80)\r\n my_game.column(\"Simple Oreha Fusion Materials\",anchor=CENTER,width=80)\r\n my_game.column(\"Basic Oreha Fusion Materials\",anchor=CENTER, width=80)\r\n my_game.column(\"Guardian Stone Crystals\",anchor=CENTER,width=80)\r\n my_game.column(\"Destruction Stone Crystals\",anchor=CENTER,width=80)\r\n my_game.column(\"Honor Leapstones\",anchor=CENTER,width=80)\r\n my_game.column(\"Great Honor Leapstones\",anchor=CENTER,width=90)\r\n\r\n my_game.heading(\"#0\",text=\"\",anchor=CENTER)\r\n my_game.heading(\"Item\",text=\"Item\",anchor=CENTER)\r\n my_game.heading(\"Silver\",text=\"Silver\",anchor=CENTER)\r\n my_game.heading(\"Gold\",text=\"Gold\",anchor=CENTER)\r\n my_game.heading(\"Honing Shards\",text=\"Shards\",anchor=CENTER)\r\n my_game.heading(\"Simple Oreha Fusion Materials\",text=\"Simple Oreha\",anchor=CENTER)\r\n my_game.heading(\"Basic Oreha Fusion Materials\",text=\"Basic Oreha\",anchor=CENTER)\r\n my_game.heading(\"Guardian Stone Crystals\",text=\"Guardian\",anchor=CENTER)\r\n my_game.heading(\"Destruction Stone Crystals\",text=\"Destruction\",anchor=CENTER)\r\n my_game.heading(\"Honor Leapstones\",text=\"HLeapstones\",anchor=CENTER)\r\n my_game.heading(\"Great Honor Leapstones\",text=\"GHLeapstones\",anchor=CENTER)\r\n\r\n my_game.grid(row=3, column=5)\r\n\r\n ws.mainloop()\r\n","repo_name":"SolimanJoey/Lost-Ark-Honing-Calculator","sub_path":"honing_calc_v7.py","file_name":"honing_calc_v7.py","file_ext":"py","file_size_in_byte":18045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15263441556","text":"import argparse\nimport tensorflow as tf\n\nfrom train import train\nfrom bayes_opt import BayesianOptimization\n\nparam_ranges = {\n 'lr_0': (1, 10),\n 'lr_1': (-6, -3),\n 'T_0': (1, 10),\n 'T_1': (-3, 2),\n 'alpha': (0., 1.),\n 'rho': (0., 1.)\n}\n\ndef training_wrapper(meta_args):\n def inner_training_wrapper(lr_0, lr_1, T_0, T_1, alpha, rho) -> float:\n lr = lr_0 * 10**int(lr_1)\n T = T_0 * 10**int(T_1)\n print('arguments LR', lr, 'T', T, 'alpha', alpha, 'rho', rho)\n setattr(meta_args, 'layers', 3)\n setattr(meta_args, 'nodes', 256)\n setattr(meta_args, 'lr', lr)\n setattr(meta_args, 'T', T)\n setattr(meta_args, 'alpha', alpha)\n setattr(meta_args, 'rho', rho)\n\n loss = train(meta_args)\n tf.keras.backend.clear_session()\n return -loss\n\n xgbBO = BayesianOptimization(inner_training_wrapper, param_ranges)\n xgbBO.maximize(n_iter=80, init_points=15, acq='ei')\n print('BEST PARAMS', xgbBO.max['params'])\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--path', default='experiments', type=str, help='path where to store the results')\n\nparser.add_argument('--network', default='fc', type=str, help='type of network')\n\nparser.add_argument('--optimizer', default='adam', type=str, help='type of optimizer')\nparser.add_argument('--patience', default=3, type=int, help='how many evaluations without improvement to wait before reducing learning rate')\nparser.add_argument('--factor', default=.1, type=float, help='multiplicative factor by which to reduce the learning rate')\n\nparser.add_argument('--task', default='helmholtz', type=str, help='type of task to fit')\nparser.add_argument('--update_rule', default='relobalo', type=str, help='type of balancing')\nparser.add_argument('--aggregate_boundaries', action='store_true', help='aggregate all boundary terms into one before balancing')\n\nparser.add_argument('--epochs', default=100000, type=int, help='number of epochs')\nparser.add_argument('--resample', action='store_true', help='resample datapoints or keep them fixed')\nparser.add_argument('--batch_size', default=1024, type=int, help='number of sampled points in a batch')\nparser.add_argument('--verbose', action='store_true', help='print progress to terminal')\n\nargs = parser.parse_args()\ntraining_wrapper(args)\n","repo_name":"rbischof/relative_balancing","sub_path":"src/bayesian_optimization.py","file_name":"bayesian_optimization.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"81"} +{"seq_id":"34479811466","text":"\n\n\ngrossaries= [[\"banana\",\"oranges\",\"mangoes\"], [\"bread\",\"egg\",\"tomatoes\"], [\"rice\", \"beans\", \"yams\" ]]\nfor i in range(0, len(grossaries)):\n i-=1\n for j in range(0, len(grossaries)):\n j-=1\n\n for ingredients in grossaries:\n # for ingredients1 in grossaries:\n print(grossaries[j][i])\n\n break\n","repo_name":"everybees/python_with_cohorts","sub_path":"nine/maryjane/grossary.py","file_name":"grossary.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"14584297255","text":"import numpy as np\nimport torch as th\nimport gymnasium as gym\nimport pytorch_kinematics as pk\n\n\nclass BaseSensor():\n \"\"\"\n The base class for Sensor: y = g(x,w)\n \"\"\"\n def __init__(self, input_dim:int) -> None:\n self.input_dim = input_dim\n self.output_dim = self.__call__(x=th.zeros((1,input_dim))).shape[2]\n pass\n \n def __call__(self, x:th.Tensor) -> th.Tensor:\n \"\"\" Note that x.shape = [n, input_dim] \"\"\"\n return self.sensor_sim(x, sim_num=1) \n\n def sensor_sim(self, x:th.Tensor, sim_num:int=int(10e4)) -> th.Tensor:\n \"\"\" Return ouput (th.Tensor): The ouput of (noisy) sensor.\n Note that x.shape = [n, input_dim], y.shape = [n, sim_num, ouput_dim]\n :param state (th.Tensor): The true state.\n :param sim_num (int): repeat sensoring sim_num times.\n \"\"\"\n output_dim = 1\n return th.zeros((len(x),sim_num,output_dim))\n\nclass LinearGaussianSensor(BaseSensor):\n '''\n The sensor model:\n y = Cx + Gw, w ~ N(0,1)\n ''' \n def __init__(self, C:np.ndarray, G:np.ndarray):\n self.G = th.tensor(G, dtype=th.float32)\n self.C = th.tensor(C, dtype=th.float32)\n self.n_w = self.G.shape[0]\n super().__init__(input_dim=self.C.shape[1]) \n\n def sensor_sim(self, x:th.Tensor, sim_num:int=int(10e4))->th.Tensor:\n '''\n As we know the model of the sensor and the distribution of noise\n w, then we can obtian y = Cx + G*w, w ~ N(0,1) by simulation\n\n :param sim_num : how many times of the simulation.\n '''\n x = x.unsqueeze(dim=1).transpose(dim0=1,dim1=2)\n w = th.randn(size=(sim_num,len(x),self.n_w,1),dtype=th.float32)\n return (self.C.to(x.device) @ x + self.G.to(x.device) @ w.to(x.device)).transpose(dim0=0,dim1=1)\n\n\nclass ForwardKinematicsGaussianSensor(BaseSensor):\n '''\n The sensor model:\n y = FK(x) + G*w, w ~ N(0,1)\n '''\n def __init__(self, IsRot=False):\n self.IsRot = IsRot\n if IsRot:\n self.G = th.tensor([0.1,0.1,0.1,0.,0.,0.,0.], dtype=th.float32)\n else:\n self.G = th.ones((3,), dtype=th.float32)*0.05 \n self.robot = pk.build_serial_chain_from_urdf(\n open('./myenv/envs/mecharm/mecharm_pi.urdf').read(),end_link_name='link6')\n self.n_w = self.G.shape[0]\n input_dim = len(self.robot.get_joint_parameter_names())\n super().__init__(input_dim=input_dim)\n\n def sensor_sim(self, x:th.Tensor, sim_num:int=int(10e4))->th.Tensor:\n '''\n As we know the model of the sensor and the distribution of noise\n w, then we can obtian y = FK(x) + G*w, w ~ N(0,1) by simulation\n y.shape = (len(x),sim_num,output_dim)\n\n :param sim_num : how many times of the simulation.\n '''\n self.robot.to(dtype=x.dtype,device=x.device)\n fk = self.robot.forward_kinematics(x)\n\n m = fk.get_matrix()\n pos = m[:, :3, 3]\n w = th.randn(size=(sim_num,len(x),self.n_w),dtype=th.float32)\n if self.IsRot:\n rot = pk.matrix_to_quaternion(m[:, :3, :3])\n return (th.hstack([pos,rot]) + self.G.to(x.device) * w.to(x.device)).transpose(dim0=0,dim1=1)\n else:\n return (pos + self.G.to(x.device) * w.to(x.device)).transpose(dim0=0,dim1=1)\n","repo_name":"RanKyoto/DE-SAC","sub_path":"utils/sensors.py","file_name":"sensors.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72426210185","text":"import os\nimport glob\nimport re\nfrom pathlib import Path\n\nlgatePatch = False\ntargets = [\n\t# \"\"\"is_machine_empire = yes\"\"\",\n\t# #\"\"\"has_ethic = ethic_gestalt_consciousness\"\"\",\n\t# \"\"\"has_authority = auth_machine_intelligence\"\"\",\n\t# \"\"\"authority = { value = auth_machine_intelligence }\"\"\",\n\t# \"\"\"has_authority = \\\"auth_machine_intelligence\\\"\"\"\",\n\t# \"\"\"has_ascension_perk = ap_synthetic_evolution\"\"\"\n]\ntargets2 = {\n\t#r\"power = \\d*\" : [\"power = \"]\n\t#r\"_hull_add = \\d*\" : [\"_hull_add = \"],\n\t# r\"_species_trait_points_add = \\d\" : [\"_species_trait_points_add = \"],\n\t# r\"job_replicator_add = \\d\":[\"if = {limit = {has_authority = auth_machine_intelligence} job_replicator_add = \", \"} if = {limit = {has_country_flag = synthetic_empire} job_roboticist_add = \",\"}\"]\n\t}\n\ntargets3 = {\n\n\t# \"tile_resource_engineering_research_mult\" : \"planet_jobs_engineering_research_produces_mult\",\n\t# \"tile_resource_physics_research_mult\" : \"planet_jobs_physics_research_produces_mult\",\n\t# \"tile_resource_society_research_mult\" : \"planet_jobs_society_research_produces_mult\",\n\t# \"pop_consumer_goods_mult\" : \"planet_pops_consumer_goods_upkeep_mult\",\n\t# \"pop_food_req_mult\" : \"planet_pops_food_upkeep_mult\",\n\t# \"tile_resource_energy_mult\" : \"planet_jobs_energy_produces_mult\",\n\t# \"tile_resource_minerals_mult\" : \"planet_jobs_minerals_produces_mult\",\n\t# \"tile_resource_food_mult\" : \"planet_jobs_food_produces_mult\",\n\t# \"tile_resource_unity_mult\" : \"planet_jobs_unity_produces_mult\",\n\t# \"pop_robot_build_speed_mult\" : \"pop_assembly_speed\",\n\t# \"pop_robot_upkeep_mult\" : \"planet_pops_robotics_upkeep_mult\",\n\t# \"pop_robot_build_cost_mult\" : \"planet_pop_assemblers_upkeep_mult\",\n\t# \"country_resource_influence_add\" : \"country_base_influence_produces_add\",\n\t# \"country_resource_unity_mult\" : \"country_base_unity_produces_mult\",\n\t# \"pop_eff_wo_slaves\" : \"pop_cat_slave_happiness\"\n\n\t# \"has_starbase_size >= starbase_starfortress\" : \"has_starbase_size >= starbase_outpost\"\n\t#\"leader_trait = yes\" : \"leader_trait = { admiral }\",\n\t#\"trait_robot_domestic_protocols\" : \"trait_just-more-traits_robot_robosexuals\"\n\t#\"levels = 10\" : \"levels = 5\"\n\t#\"levels = -1\" : \"levels = 5\"\n\t#\"_species_trait_points_add = 1\" : \"_species_trait_points_add = 2\"\n\t# \"is_megastructure_type = lgate_base\" : \"OR = { is_megastructure_type = lgate_base is_megastructure_type = lgate_disabled}\"\n\t#\"has_ascension_perk = ap_machine_worlds has_ascension_perk = ap_synth_artificial_worlds\" : \"OR ={ has_ascension_perk = ap_machine_worlds has_ascension_perk = ap_synth_artificial_worlds }\"\n\t# \"default_robot\" : \"2dsynth_01\",\n\t# \"sd_mam_robot\" : \"mammaliansynth\",\n\t# \"sd_rep_robot\" : \"reptiliansynth\",\n\t# \"sd_avi_robot\" : \"aviansynth\",\n\t# \"sd_art_robot\" : \"arthropoidsynth\",\n\t# \"sd_mol_robot\" : \"synthetic_robot_01\",\n\t# \"sd_fun_robot\" : \"dragon_cyber2\",\n\t# \"sd_hum_robot\" : \"2dsynth_01\",\n\t# \"lith_machine\" : \"dragon_robot\",\n\t#\"has_ascension_perk = ap_synthetic_evolution\" : \"has_country_flag = synthetic_empire\",\n #\"create_built_robot_species\" : \"create_built_override_robot_species\"\n\t\n}\n\ndef replacer1(line):\n\tfor target in targets:\n\t\t\n\t\t#has_ascension_perk = ap_synthetic_evolution \n\t\treplacer = \"OR = {has_country_flag = synthetic_empire \"+target+\"}\"\n\t\tif target in line and replacer not in line: \n\t\t\t#print(target)\n\t\t\tshouldReplace = True\n\t\t\tfor j in range(i,0,-1):\n\t\t\t\tif \"{\" in fileContents[j]:\n\t\t\t\t\tif \"NOR\" in fileContents[j] or \"NOT\" in fileContents[j] :\n\t\t\t\t\t\tshouldReplace = False\n\t\t\t\t\t#print (fileContents[j])\n\t\t\t\t\tbreak\n\t\t\t#print(line)\n\t\t\tif shouldReplace:\n\t\t\t\tline = line.replace(target,replacer)\n\treturn line\n\ndef replacer2(line,i):\n\tfor t,r in targets2.items():\n\t\ttargets = re.findall(t,fileContents[i])\n\t\tif len(targets) > 0:\n\t\t\tfor target in targets:\n\t\t\t\t\n\t\t\t\tvalue = int(target.split(\"=\")[1])\n\t\t\t\treplacer = \"\"\n\t\t\t\tfor j in range(len(r)):\n\t\t\t\t\treplacer += r[j]\n\t\t\t\t\t#if i < len(r) -1:\n\t\t\t\t\treplacer += str(int(value * 2))\n\t\t\t\tprint(replacer)\n\t\t\t\tif target in line and replacer not in line: \n\t\t\t\t\tline = line.replace(target,replacer)\n\treturn line\n\ndef replacer3(line):\n\tfor t,r in targets3.items():\n\t\tif t in fileContents[i]:\n\t\t\tline = line.replace(t,r)\n\n\treturn line\n\nfileList = glob.glob('mod/! Modpack/common/**',recursive=True)\n#fileList = [\"mod/! Modpack/common/component_templates/auxmodpack_cores.txt\"]\n#print(targets)\nfor _file in fileList: \n\tif os.path.isfile(_file) and \".txt\" in _file:\n\t\t\n\t\tfileContents = \"\"\n\t\treadFile = open(_file,\"r\")\n\t\t\n\t\ttry:\n\t\t\tfileContents = readFile.readlines()\n\t\t\ttext = \"\\n\".join(fileContents)\n\t\t\treadFile.close()\n\t\t\tout = \"\"\n\t\t\thasGate= False\n\t\t\tif lgatePatch:\n\t\t\t#print(text)\n\t\t\t\tif \"is_megastructure_type = lgate_disabled\" not in text:\n\t\t\t\t\tif \"is_megastructure_type = lgate_base\" in text:\n\t\t\t\t\t\thasGate = True\n\t\t\tif not lgatePatch or hasGate:\n\t\t\t\tfor i in range(0,len(fileContents)):\n\t\t\t\t\tline = fileContents[i]\n\t\t\t\t\t#out += replacer1(line)\n\t\t\t\t\tout += replacer3(line)\n\t\t\t\t\t#out += replacer2(line,i)\n\t\t\t\t#print(line)\n\t\t\t\treadFile = open(_file,\"w\")\n\t\t\t\treadFile.write(out)\n\n\t\texcept Exception as e: \n\t\t\tprint(e)\n\t\t\tprint(\"Unable to open\",_file)\n\t\treadFile.close()\nprint(\"Done!\")\ninput()\n\n","repo_name":"D4rkstalker/StellarisModpackUtility","sub_path":"modfixes.py","file_name":"modfixes.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"29169245917","text":"import hashlib\n\n\ndef checkSoln(soln):\n return soln[:6] == \"000000\"\n\n\ninput = \"ckczppom\"\n# input = \"pqrstuv\"\n\ni = 0\nwhile True:\n result = hashlib.md5((input + str(i)).encode())\n if checkSoln(result.hexdigest()):\n print(i)\n break\n else:\n i += 1\n","repo_name":"jzfraser/aoc2015","sub_path":"Python/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21641375067","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('clients', '0014_blacklist_bl_user'),\n ('setup', '0004_apdrosinataji'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('subscriptions', '0024_abonementi_user'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Abonementu_Apmaksa',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date', models.DateTimeField(default=django.utils.timezone.now)),\n ('full_price', models.DecimalField(max_digits=5, decimal_places=2)),\n ('discount_price', models.DecimalField(max_digits=5, decimal_places=2)),\n ('deposit', models.BooleanField(default=False)),\n ('from_deposit', models.DecimalField(max_digits=5, decimal_places=2)),\n ('gift_card', models.BooleanField(default=False)),\n ('from_gift_card', models.DecimalField(max_digits=5, decimal_places=2)),\n ('insurance_cash', models.DecimalField(max_digits=5, decimal_places=2)),\n ('cash', models.BooleanField(default=False)),\n ('card', models.BooleanField(default=False)),\n ('transfer', models.BooleanField(default=False)),\n ('addiitonal_discount', models.BooleanField(default=False)),\n ('total_ammount', models.DecimalField(max_digits=5, decimal_places=2)),\n ('client', models.ForeignKey(to='clients.Klienti')),\n ('insurance', models.ForeignKey(blank=True, to='setup.Apdrosinataji', null=True)),\n ('subscr', models.ForeignKey(to='subscriptions.Abonementi')),\n ('user', models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'db_table': 'abonementi_pirkums',\n },\n ),\n ]\n","repo_name":"svabis/db","sub_path":"subscriptions/migrations/0025_abonementu_apmaksa.py","file_name":"0025_abonementu_apmaksa.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29148608233","text":"import torch\nfrom torch import nn\nfrom torch.nn.utils import rnn\nimport torch.nn.functional as F\nfrom transformers import BertConfig, BertModel\nimport numpy as np\n\n\nclass Casrel(nn.Module):\n def __init__(self, config_path, model_path, num_rela, bert_freeze, threshold=0.5):\n super(Casrel, self).__init__()\n self.threshold = threshold\n\n config = BertConfig.from_json_file(config_path)\n self.embedding = BertModel.from_pretrained(model_path, config=config)\n self.subject_head_linear = nn.Linear(config.hidden_size, 1)\n self.subject_tail_linear = nn.Linear(config.hidden_size, 1)\n self.object_head_linear = nn.Linear(config.hidden_size, num_rela)\n self.object_tail_linear = nn.Linear(config.hidden_size, num_rela)\n\n if bert_freeze:\n for param in self.embedding.parameters():\n param.requires_grad = False\n\n def get_objects(self, context, subject_head, subject_tail):\n subject_head = subject_head.unsqueeze(-2)\n subject_tail = subject_tail.unsqueeze(-2)\n\n subject_head = torch.matmul(subject_head, context)\n subject_tail = torch.matmul(subject_tail, context)\n subject = (subject_head + subject_tail) / 2\n context_subject = context + subject\n\n pred_object_heads = self.object_head_linear(context_subject)\n pred_object_tails = self.object_tail_linear(context_subject)\n\n return pred_object_heads, pred_object_tails\n\n def get_loss(self, gold, pred, mask):\n loss = F.binary_cross_entropy_with_logits(pred, gold, reduction='none')\n if loss.shape != mask.shape:\n mask = mask.unsqueeze(-1)\n loss = torch.sum(loss * mask) / torch.sum(mask)\n return loss\n\n def forward(self, tokens, segments, masks, decode=True, all_subject_heads=None, all_subject_tails=None,\n subject_head=None, subject_tail=None, object_heads=None, object_tails=None):\n tokens = rnn.pad_sequence(tokens, batch_first=True)\n segments = rnn.pad_sequence(segments, batch_first=True)\n masks = rnn.pad_sequence(masks, batch_first=True)\n\n if not decode:\n all_subject_heads = rnn.pad_sequence(all_subject_heads, batch_first=True).float()\n all_subject_tails = rnn.pad_sequence(all_subject_tails, batch_first=True).float()\n subject_head = rnn.pad_sequence(subject_head, batch_first=True).float()\n subject_tail = rnn.pad_sequence(subject_tail, batch_first=True).float()\n object_heads = rnn.pad_sequence(object_heads, batch_first=True).float()\n object_tails = rnn.pad_sequence(object_tails, batch_first=True).float()\n\n context = self.embedding(input_ids=tokens, token_type_ids=segments, attention_mask=masks)\n context = context.last_hidden_state\n\n pred_subject_heads = self.subject_head_linear(context).squeeze(2)\n pred_subject_tails = self.subject_tail_linear(context).squeeze(2)\n\n if decode:\n pred_subject_heads = torch.sigmoid(pred_subject_heads) * masks\n pred_subject_tails = torch.sigmoid(pred_subject_tails) * masks\n\n triples = []\n for ex_idx in range(len(pred_subject_heads)):\n single_context = context[ex_idx]\n subject_heads = pred_subject_heads[ex_idx]\n subject_tails = pred_subject_tails[ex_idx]\n\n subject_heads = np.where(subject_heads.cpu() > self.threshold)[0]\n subject_tails = np.where(subject_tails.cpu() > self.threshold)[0]\n\n s2o = {}\n if (len(subject_heads) == 0) or (len(subject_tails) == 0):\n triples.append(s2o)\n continue\n\n for subject_head in subject_heads:\n subject_tail = subject_tails[subject_tails >= subject_head]\n\n if len(subject_tail) <= 0:\n continue\n\n subject_tail = subject_tail[0]\n subject = (subject_head, subject_tail)\n\n subject_head_inp = torch.zeros(single_context.shape[0]).to(single_context.device)\n subject_tail_inp = torch.zeros(single_context.shape[0]).to(single_context.device)\n subject_head_inp[subject_head] = 1\n subject_tail_inp[subject_tail] = 1\n\n object_heads, object_tails = self.get_objects(single_context, subject_head_inp, subject_tail_inp)\n object_heads = torch.sigmoid(object_heads)\n object_tails = torch.sigmoid(object_tails)\n\n object_heads = np.where(object_heads.cpu() > self.threshold)\n object_tails = np.where(object_tails.cpu() > self.threshold)\n\n for object_head, rela_head in zip(*object_heads):\n for object_tail, rela_tail in zip(*object_tails):\n if rela_head == rela_tail and object_head <= object_tail:\n if subject not in s2o:\n s2o[subject] = []\n s2o[subject].append((object_head, object_tail, rela_head))\n break\n\n triples.append(s2o)\n return triples\n else:\n pred_object_heads, pred_object_tails = self.get_objects(context, subject_head, subject_tail)\n\n subject_heads_loss = self.get_loss(all_subject_heads, pred_subject_heads, masks)\n subject_tails_loss = self.get_loss(all_subject_tails, pred_subject_tails, masks)\n object_heads_loss = self.get_loss(object_heads, pred_object_heads, masks)\n object_tails_loss = self.get_loss(object_tails, pred_object_tails, masks)\n\n loss = subject_heads_loss + subject_tails_loss + object_heads_loss + object_tails_loss\n return loss\n","repo_name":"zerozzl/nlp_re","sub_path":"joint_casrel/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"43484191117","text":"from flask import Flask\nfrom flask_restful import Api, Resource, reqparse\nimport DataMassager\nimport DatabaseConnector\n\napp = Flask(__name__)\napi = Api(app)\n\n# DATABASE CREDENTIALS\nhostname = '127.0.0.1'\nuser = 'root'\npassword = 'carlsagan42'\ndb = 'find_my_oscar'\n\nmy_connection = DatabaseConnector.create_connection(hostname, user, password, db)\nselect_all_query = \"SELECT * FROM our_data\"\n\nmovies_tuple_list = DatabaseConnector.execute_read_query(my_connection, select_all_query)\nmovies = []\nfields = ('year', 'category', 'winner', 'entity', 'released', 'plot', 'poster')\nfor movie_tuple in movies_tuple_list:\n movie_list_entry = DataMassager.tuple_to_dict(movie_tuple, fields)\n movies.append(movie_list_entry)\n\n\nclass Default(Resource):\n def get(self):\n if movies is None:\n return \"Movie database is empty.\", 404\n return movies, 200\n\n\nclass Entity(Resource):\n def get(self, entity):\n for movie in movies:\n if entity == movie[\"entity\"]:\n return movie, 200\n return \"Movie not found\", 404\n\n def post(self, entity):\n parser = reqparse.RequestParser()\n parser.add_argument(\"year\")\n parser.add_argument(\"category\")\n parser.add_argument(\"winner\")\n parser.add_argument(\"released\")\n parser.add_argument(\"plot\")\n parser.add_argument(\"poster\")\n args = parser.parse_args()\n\n for movie in movies:\n if entity == movie[\"entity\"]:\n return \"Movie with title {} already exists\".format(entity), 400\n\n movie = {\n \"year\": args[\"year\"],\n \"category\": args[\"category\"],\n \"winner\": args[\"winner\"],\n \"entity\": entity,\n \"released\": args[\"released\"],\n \"plot\": args[\"plot\"],\n \"poster\": args[\"poster\"]\n }\n movies.append(movie)\n return movie, 201\n\n def put(self, entity):\n parser = reqparse.RequestParser()\n parser.add_argument(\"year\")\n parser.add_argument(\"category\")\n parser.add_argument(\"winner\")\n parser.add_argument(\"released\")\n parser.add_argument(\"plot\")\n parser.add_argument(\"poster\")\n args = parser.parse_args()\n\n for movie in movies:\n if entity == movie[\"category\"]:\n movie = {\n \"year\": args[\"year\"],\n \"category\": args[\"category\"],\n \"winner\": args[\"winner\"],\n \"entity\": entity,\n \"released\": args[\"released\"],\n \"plot\": args[\"plot\"],\n \"poster\": args[\"poster\"]\n }\n return movie, 200\n\n movie = {\n \"year\": args[\"year\"],\n \"category\": args[\"category\"],\n \"winner\": args[\"winner\"],\n \"entity\": entity,\n \"released\": args[\"released\"],\n \"plot\": args[\"plot\"],\n \"poster\": args[\"poster\"]\n }\n movies.append(movie)\n return movie, 201\n\n def delete(self, entity):\n global movies\n movies = [movie for movie in movies if movie[\"entity\"] != entity]\n return \"{} is deleted.\".format(entity), 200\n\n\nclass Category(Resource):\n\n def get(self, category):\n parser = reqparse.RequestParser()\n parser.add_argument(\"year\")\n parser.add_argument(\"winner\")\n parser.add_argument(\"entity\")\n parser.add_argument(\"released\")\n parser.add_argument(\"plot\")\n parser.add_argument(\"poster\")\n args = parser.parse_args()\n\n select_category_query = \"SELECT * FROM our_data WHERE `category`='\" + category + \"'\"\n\n if args[\"year\"] is not None:\n select_category_query += \" AND `year`='\" + args[\"year\"] + \"'\"\n if args[\"winner\"] is not None:\n select_category_query += \" AND `winner`='\" + args[\"winner\"] + \"'\"\n\n category_tuple_list = DatabaseConnector.execute_read_query(my_connection, select_category_query)\n category_list = []\n\n if not category_tuple_list:\n return \"No movies under category {}.\".format(category), 404\n\n for movie in category_tuple_list:\n category_list.append(DataMassager.tuple_to_dict(movie, fields))\n\n return category_list, 200\n\n def post(self, category):\n pass\n\n def put(self, category):\n pass\n\n def delete(self, category):\n pass\n\n\nif __name__ == \"__main__\":\n api.add_resource(Entity, \"/movies/title/\")\n api.add_resource(Category, \"/movies/category/\")\n api.add_resource(Default, \"/movies\")\n\n app.run(debug=True)\n","repo_name":"Team-10-cs131/FindMyOscar","sub_path":"FindMyOscarDataBase/Api_Server.py","file_name":"Api_Server.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"13809445358","text":"import os\n\nfrom cutekit import args, builder, cmds, shell\n\ndef bootCmd(args: args.Args) -> None:\n kernel = builder.build('p5k-core', 'riscv32-kernel')\n\n qemu = [\n \"qemu-system-riscv32\",\n \"-machine\", \"virt\",\n \"-bios\", \"default\",\n \"-nographic\",\n \"-serial\", \"mon:stdio\",\n \"--no-reboot\",\n \"-kernel\", kernel.outfile()\n ]\n\n shell.exec(*qemu)\n\n\ncmds.append(cmds.Cmd('B', 'boot', 'Boot the kernel', bootCmd))","repo_name":"sleepy-monax/p5k","sub_path":"meta/plugins/boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"3332831476","text":"import os\nimport time\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom typing import List\n\ndef add_custom_legend(ax, colors, labels, linewidth=2, loc='upper left', fancybox=True, bbox_to_anchor=(1.0, 1.00), shadow=True,\n fontsize=10):\n \"\"\"\n Adds a custom legend, with specified colors and the corresponding labels, to the passed axis. \n\n Parameters\n ----------\n ax : matplotlib axis \n colors : colors used for plot\n labels : labels for each color\n linewidth : linewidth of color on the legend\n loc : tuple, optional\n coordinate to anchor legend. The default is 'upper left'.\n fancybox : TYPE, optional\n DESCRIPTION. The default is True.\n bbox_to_anchor : tuple, optional\n where to anchor legend. The default is (1.0, 1.00), which is the upper right portion\n of the plot. When loc is 'upper left', this defaults to the legend being placed\n outside of the plot on the upper left.\n shadow : bool, optional\n show legend box with a shadow? The default is True.\n fontsize : int, optional\n Legend fontsize. The default is 10.\n\n Returns\n -------\n None. Adds legend to axis\n\n \"\"\"\n custom_lines = calc_handles_for_custom_legend(colors, labels=labels, linewidth=linewidth)\n if ax is None: plt.legend(handles=custom_lines, loc=loc, fancybox=fancybox, bbox_to_anchor=bbox_to_anchor, shadow=shadow, fontsize=fontsize)\n else: ax.legend(handles=custom_lines, loc=loc, fancybox=fancybox, bbox_to_anchor=bbox_to_anchor, shadow=shadow,fontsize=fontsize)\n\ndef save_fig(title:str=None, tight_layout:bool=True, rect:List[int]=None,save_type:List[str]=None, \n save_svg:bool=True, save_png:bool=False, save_folder:str=None):\n \"\"\"\n Saves current matplotlib figure \n\n Parameters\n ----------\n title : str, optional\n title of plot, will be saved under this name. The default is to save it as the current time.\n tight_layout : bool, optional\n Will call plt.tightlayout() to fix formatting. The default is True.\n rect : List[int], optional\n Lenghth of 4 ints denoting the rectangle to save. Defaults to a rectangle that doesn't\n crop the figure.\n save_type : List[str], optional\n . The default is None.\n save_svg : bool, optional\n Save fig as an svg file. The default is True.\n save_png : bool, optional\n Save fig as a png file. The default is False.\n save_folder : str, optional\n Where to save the file. The default is the current folder.\n \"\"\"\n if (not save_png) and (not save_svg): save_png = True\n if save_folder is None: save_folder = './figs'\n os.makedirs(save_folder, exist_ok=True)\n if rect is None: rect = [0, 0, 1, 1]\n if title is None: title = time.time()\n if tight_layout: plt.tight_layout(rect=rect)\n save_type = []\n if save_png: save_type.append('png')\n if save_svg: save_type.append('svg')\n for st in save_type:\n save_path = os.path.join(save_folder, f'{title}.{st}')\n # bbox_inches='tight' crops the plot down based on the extents of the artists in the plot.\n # https://stackoverflow.com/questions/44642082/text-or-legend-cut-from-matplotlib-figure-on-savefig\n plt.gcf().savefig(save_path, dpi=plt.gcf().dpi, bbox_inches='tight')\n plt.ion()\n return save_path\n\n###############################################\n# Fonts, Colors, Plotting #####################\n###############################################\ndef set_default_fonts():\n \"\"\" Sets some default parameters for matplotlib. \"\"\"\n matplotlib.rc('font', family='sans-serif') \n matplotlib.rc('font', serif='Arial') \n matplotlib.rcParams.update({'font.size': 12})\n matplotlib.rcParams['svg.fonttype'] = 'none'\n matplotlib.rc('lines', solid_capstyle='butt')\n \n\ndef set_standard_fonts(ax=None, axis_label_fontsize=7, axis_label_fontnames='Arial',\n tick_fontsize=5):\n \"\"\" Quick way to set the tick labels, axis labels fontsizes and fontnames.\"\"\"\n axes = _to_ax_iterable(ax)\n for ax in axes:\n ax.xaxis.get_label().set_fontname('Arial')\n ax.yaxis.get_label().set_fontname('Arial')\n set_tick_label_fontnames(both='Arial', ax=None)\n ax.xaxis.get_label().set_fontsize(axis_label_fontsize)\n ax.yaxis.get_label().set_fontsize(axis_label_fontsize)\n for tick in ax.get_xticklabels(): tick.set_fontsize(tick_fontsize)\n for tick in ax.get_yticklabels(): tick.set_fontsize(tick_fontsize)\n \ndef set_tick_label_fontnames(both='Arial', ax=None):\n axes = _to_ax_iterable(ax)\n for ax in axes:\n for tick in ax.get_xticklabels():\n tick.set_fontname(both)\n for tick in ax.get_yticklabels():\n tick.set_fontname(both)\n\ndef hide_spine(tblr='tr', ax=None):\n \"\"\" \n Hides the ax spine. tblr is 'top' 'bottom' 'left' 'right' list \"\"\"\n for top_bttm_left_right in tblr:\n if top_bttm_left_right.startswith('l'): ax.spines['left'].set_visible(False)\n elif top_bttm_left_right.startswith('r'): ax.spines['right'].set_visible(False)\n elif top_bttm_left_right.startswith('t'): ax.spines['top'].set_visible(False)\n elif top_bttm_left_right.startswith('b'): ax.spines['bottom'].set_visible(False)\n else: raise ValueError(tblr)\n\ndef _is_iterable(arr):\n try:\n iter(arr)\n return True\n except TypeError: return False\n\ndef _to_ax_iterable(axes):\n if axes is None: axes = plt.gca()\n if _is_iterable(axes): return axes\n else: return [axes]\n\ndef hide_tick_marks(xy, ax=None):\n \"\"\" hides tick marks of ax. \n xy = 'x', 'y', or 'xy' denoting which axis to hide tickmarks of\n \"\"\"\n if ax is None: ax = plt.gca()\n axes = _to_ax_iterable(ax)\n for _, ax in enumerate(axes):\n if xy == 'x': ax.tick_params(axis='x', which='both',length=0)\n elif xy == 'y': ax.tick_params(axis='y', which='both',length=0)\n elif (xy == 'xy') or (xy == 'both'): ax.tick_params(axis='both', which='both',length=0)\n\n \ndef set_tick_locations(locations, xy, ax=None):\n \"\"\" sets tick locations for ax \"\"\"\n axes = _to_ax_iterable(ax)\n for _, ax in enumerate(axes):\n if 'x' in xy: ax.set_xticks(locations)\n if 'y' in xy: ax.set_yticks(locations)\n\ndef calc_zero_centered_hist_bins(y, bin_width):\n \"\"\" creates bins for a histogram that are centered at zero.\n y is data that needs to be bined. \"\"\"\n bins = list(np.arange( (np.floor(np.min(y)) // bin_width) * bin_width , 0, bin_width)) + list(np.arange(0, (np.ceil(np.max(y)) // bin_width) * bin_width + 2 * bin_width, bin_width))\n assert np.min(y) > bins[0]\n assert np.max(y) < bins[-1], (np.max(y), bins[-1])\n return bins\n\n# Quick conversions\ndef mm_to_inch(mm): return mm / 25.4\ndef cm_to_inch(cm): return cm / 2.54\ndef inch_to_cm(inch): return inch * 2.54\n\n\ndef calc_handles_for_custom_legend(colors, labels, linewidth=2):\n if type(colors) is str:\n colors = [colors]\n labels = [labels]\n assert len(colors) == len(labels)\n custom_lines = []\n for l, c in zip(labels, colors):\n custom_lines.append(matplotlib.lines.Line2D([0], [0], color=c, lw=linewidth, label=l))\n return custom_lines\n\ndef hex_to_rgb(hex_str):\n \"\"\" https://stackoverflow.com/questions/29643352/converting-hex-to-rgb-value-in-python \"\"\"\n h = hex_str.lstrip('#')\n return tuple(int(h[i:i+2], 16) / 256 for i in (0, 2, 4))\n\ndef hex_to_rgba(hex_str, alpha):\n \"\"\" https://stackoverflow.com/questions/29643352/converting-hex-to-rgb-value-in-python \"\"\"\n h = hex_str.lstrip('#')\n return list((int(h[i:i+2], 16) / 256 for i in (0, 2, 4))) + [alpha]\n\n###################################\n## Plotting: Tick Marks & Labels ##\n###################################\ndef hide_tick_marks_and_labels(xy='xy', ax=None):\n \"\"\" hides tick marks and labels by setting them to be an empty list\"\"\"\n if ax is None: ax = plt.gca()\n if xy == 'xy':\n ax.get_xaxis().set_ticks([])\n ax.get_yaxis().set_ticks([])\n elif xy == 'x': ax.get_xaxis().set_ticks([])\n elif xy == 'y': ax.get_yaxis().set_ticks([])\n elif xy == 'z': ax.get_zaxis().set_ticks([])\n elif xy == 'xy' or xy == 'yx':\n ax.get_xaxis().set_ticks([])\n ax.get_yaxis().set_ticks([])\n elif xy == 'xyz':\n ax.get_xaxis().set_ticks([])\n ax.get_yaxis().set_ticks([])\n ax.get_zaxis().set_ticks([])\n else: raise ValueError(xy)\n\ndef hide_tick_marks(xy, ax=None):\n if ax is None: ax = plt.gca()\n axes = _to_ax_iterable(ax)\n for _, ax in enumerate(axes):\n if xy == 'x': ax.tick_params(axis='x', which='both',length=0)\n elif xy == 'y': ax.tick_params(axis='y', which='both',length=0)\n elif (xy == 'xy') or (xy == 'both'): ax.tick_params(axis='both', which='both',length=0)\n\ndef hide_tick_labels(xy, axes=None, set_invisible=False):\n \"\"\" Same as hide_tick_marks but just sets them to be invisible instead of removing them.\n Use when sharing axes and want to show on only 1. \"\"\"\n assert 'x' in xy or 'y' in xy, xy\n axes = _to_ax_iterable(axes)\n\n for ax in axes:\n if set_invisible:\n if 'x' in xy: plt.setp(ax.get_xticklabels(), visible=False)\n if 'y' in xy: plt.setp(ax.get_yticklabels(), visible=False)\n else: raise ValueError(xy)\n else:\n if 'x' in xy: ax.set_xticklabels([])\n elif 'y' in xy: ax.set_yticklabels([])\n else: raise ValueError(xy)\n\ndef show_tick_labels(xy='xy', ax=None):\n assert 'x' in xy or 'y' in xy, xy\n axes = _to_ax_iterable(ax)\n for ax in axes:\n if 'x' in xy: ax.xaxis.set_tick_params(labelbottom=True)\n if 'y' in xy: ax.yaxis.set_tick_params(labelbottom=True)\n\ndef hide_tick_labels_invis(xy, axes=None):\n \"\"\" https://stackoverflow.com/questions/4209467/matplotlib-share-x-axis-but-dont-show-x-axis-tick-labels-for-both-just-one\n Same as hide_tick_marks but just sets them to be invisible instead of removing them.\n Use when sharing axes, as this won't effect the other shared axis\n \"\"\"\n assert 'x' in xy or 'y' in xy, xy\n if axes is None: axes = plt.gca()\n axes = _to_ax_iterable(axes)\n\ndef set_tick_label_fontsize(both=None, x_tick_label_fontsize=None, y_tick_label_fontsize=None, ax=None):\n \"\"\" Sets the size of the tick marks for each subplot \"\"\"\n axes = _to_ax_iterable(ax)\n if both is not None:\n if type(both) is bool:\n raise ValueError(\"Both is supposed to be an int, not bool\")\n x_tick_label_fontsize = both\n y_tick_label_fontsize = both\n try: len(axes)\n except TypeError: raise\n for ax in axes:\n for tick in ax.xaxis.get_major_ticks():\n if x_tick_label_fontsize is not None: tick.label1.set_fontsize(x_tick_label_fontsize)\n for tick in ax.yaxis.get_major_ticks():\n if y_tick_label_fontsize is not None: tick.label1.set_fontsize(y_tick_label_fontsize)\n\ndef plot_bar_with_stars(mean, top_data_lim, pval, left_dist_mean=0, ax=None, color=None, star_fontsize=10, linewidth=2):\n \"\"\" plots line with corresponding p-value stars.\"\"\"\n if ax is None: ax = plt.gca()\n if color is None: color = 'k'\n if pval > 0.05: star_fontsize = 5\n stars = pval_to_stars(pval) \n add_text( (mean + left_dist_mean) / 2, top_data_lim, stars, ax=ax, fontsize=star_fontsize, coord_type='data', va='bottom', \n ha='center', fontname='Arial', color=color)\n ax.plot( [left_dist_mean, mean], [top_data_lim, top_data_lim], color=color, linewidth=linewidth, solid_capstyle='butt')\n\ndef pval_to_stars(pval):\n if pval < 0.001: stars = '***'\n elif pval < 0.01: stars = '**'\n elif pval < 0.05: stars = '*'\n else: stars = 'n.s.'\n return stars\n\ndef add_text(x, y, text, ha='auto', va='auto', ax=None, fontsize=10, coord_type='axis', fontname='Arial', **kwargs):\n \"\"\" \n Adds text to ax, but automated so the text is less likely to run over the edge by changing the vertical and horizontal\n alighment based on the passed x and y location.\n \n ha in 'left', 'center', right\n va in 'top', 'bottom \"\"\"\n if ax is None: ax = plt.gca()\n if ha == 'auto':\n if x < 0.5: ha = 'left'\n elif x > 0.5: ha = 'right'\n elif x == 0.5: ha = 'center'\n if va == 'auto':\n if y < 0.5: va = 'bottom'\n elif y > 0.5: va = 'top'\n elif y == 0.5: va = 'center'\n\n if coord_type in {'axis', 'ax'}:\n ax.text(x, y, text, verticalalignment=va, horizontalalignment=ha, fontsize=fontsize, transform=ax.transAxes, fontname=fontname, **kwargs)\n elif coord_type == 'data':\n ax.text(x, y, text,\n verticalalignment=va, horizontalalignment=ha, fontsize=fontsize, fontname=fontname, **kwargs)\n else: raise ValueError(coord_type)\n\ndef add_lead_zeros(integer, num_places=4): \n \"\"\" Adds leading zeros to an integer, returning a string \"\"\"\n return f\"%0{num_places}d\" % (integer,)\n\ndef set_tick_labels(labels, xy, ax=None):\n \"\"\" sets tick label locations for the specified axis on the the passed ax \"\"\"\n axes = _to_ax_iterable(ax)\n for _, ax in enumerate(axes):\n if 'x' in xy: ax.set_xticklabels(labels)\n if 'y' in xy: ax.set_yticklabels(labels)\n","repo_name":"loseydm/core_lib","sub_path":"vis/extra_matplotlib_fns.py","file_name":"extra_matplotlib_fns.py","file_ext":"py","file_size_in_byte":13317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73233094346","text":"import subprocess\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n \"\"\"\n 필요한 패키지는 전역에 있어야 합니다\n 이 스크립트는 가상환경을 실행시킬 수 없습니다\n --OS 에러 발생--\n \"\"\"\n\n help = \"모든 데이터를 생성합니다. [!! 슈퍼유저를 생성한 후 실행하세요 슈퍼유저는 수동으로 만들어야 합니다.]\"\n\n def handle(self, *args, **options):\n\n prefix = \"python manage.py seed_\"\n\n command_list = [\n prefix + \"users --total 100\",\n prefix + \"constituents --total 50\",\n prefix + \"flavor_tags\",\n prefix + \"postings --total 200\",\n prefix + \"custom_lists --total 100\",\n ]\n\n for command in command_list:\n # Popen쓰지 마라 여기서 멀티 프로세싱은 위험하다\n subprocess.run(command)\n","repo_name":"clomia/Aribi","sub_path":"core/management/commands/seed_all.py","file_name":"seed_all.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"30202326881","text":"def dispense(item):\n print(f\"Dispensing {item}...\")\n\ndef select_item():\n print(\"Please select an item:\")\n\n print(\"1. Crisps\")\n print(\"2. Sweete\")\n print(\"3. Drink\")\n\n selection = int(input(\"Enter the number of the item you want to dispense: \"))\n\n if selection == 1:\n dispense(\"chips\")\n elif selection == 2:\n dispense(\"sweets\")\n elif selection == 3:\n dispense(\"drink\")\n else:\n print(\"Invalid selection. Please try again.\")\nselect_item()\n","repo_name":"work1245/work","sub_path":"dispense.py","file_name":"dispense.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18697584152","text":"import os\nimport random\nimport unittest\n\nfrom suds.client import Client\nfrom suds.sax.attribute import Attribute\nfrom suds.sax.element import Element\nfrom suds.wsse import UsernameToken\n\nimport pdb;pdb.set_trace()\nCWD = os.getcwd()\nXML_PATH = os.path.join(CWD, os.path.sep.join(__file__.split('/')[:-1]))\nWSDL_URL = 'https://staging.payu.co.za/service/PayUAPI?wsdl'\n\nclass TestsetTransaction(unittest.TestCase):\n \n def test_advanced_RPP_setTransaction(self):\n file_path = os.path.join(XML_PATH, 'xml', 'set_transaction_advanced_redirect_page.xml')\n xml_file = open(file_path, 'rb')\n xml_template = xml_file.read()\n xml_file.close()\n \n details = dict(\n safekey = '{45D5C765-16D2-45A4-8C41-8D6F84042F8C}',\n transaction_type = 'PAYMENT',\n stage = 'true',\n cancel_url = 'http://www.example.com/cancel',\n demo_mode = 'true',\n merchant_ref = random.randrange(1,10+1),\n notification_url = 'http://www.example.com/notify',\n return_url = 'http://www.example.com/return',\n secure3d = 'false',\n supported_payments = 'CREDITCARD',\n show_budget = 'true',\n merchant_user_id = '7',\n customer_email = 'jane@example.com',\n customer_firstname = 'Jane',\n customer_lastname = 'Doe',\n customer_mobile = '2700000000',\n customer_regional_id = '1234567890',\n customer_country_code = '27',\n basket_amount = '1000',\n basket_currency_code = 'ZAR',\n basket_description = 'Basket description',\n loyalty_amount = '999',\n loyalty_information = 'Loyalty info',\n loyalty_membership_number = '12345',\n merhant_id = '11111111',\n )\n\n xml = xml_template.format(**details)\n\n client = Client(WSDL_URL)\n\n def test_setTransaction(self):\n file_path = os.path.join(XML_PATH, 'xml', 'set_transaction_with_fraud_check.xml')\n xml_file = open(file_path, 'rb')\n xml_template = xml_file.read()\n xml_file.close()\n\n xml = xml_template.format(\n username = 'Staging Integration Store 1',\n password = '78cXrW1W',\n safekey = '{45D5C765-16D2-45A4-8C41-8D6F84042F8C}',\n merchant_ref = random.randrange(1,10+1),\n cancel_url = 'http://example.com/cancel',\n notification_url = '',\n return_url = 'http://example.com/return',\n customer_email = 'jane@example.com',\n customer_firstname = 'Jane',\n customer_lastname = 'Doe',\n customer_mobile = '27840000000',\n basket_amount = '10000',\n basket_currency_code = 'ZAR',\n basket_description = 'Test basket',\n product_lineitem_amount = '10000',\n product_lineitem_cost = '10000',\n product_lineitem_description = 'Test item',\n product_lineitem_giftmessage = 'Test gift message',\n product_lineitem_productcode = '999',\n product_lineitem_quantity = '1',\n product_lineitem_address = '1 First Aven.',\n product_lineitem_city = 'First city',\n product_lineitem_country_code = '27',\n product_lineitem_postal_code = '0000',\n product_lineitem_firstname = 'Jane',\n product_lineitem_lastname = 'Doe',\n shipping_details_address = '1 First Aven.',\n shipping_details_city = 'First city',\n shipping_details_email = 'jane@example.com',\n shipping_details_country_code = '27',\n shipping_details_method = 'P',\n shipping_details_firstname = 'Jane',\n shipping_details_lastname = 'Doe',\n shipping_details_fax = '27000000000',\n shipping_details_phone = '27000000000',\n check_fraud_override = 'false',\n check_fraud_merchant_website = 'www.example.com',\n check_fraud_pc_fingerprint = 'asdfasdfsdfasdfasdf',\n )\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"rijkstofberg/paymentintegrations","sub_path":"paymentintegrations/tests/test_setTransaction.py","file_name":"test_setTransaction.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30150775805","text":"import os\nimport warnings\nimport glob\nimport sys\nimport tqdm\nimport json\nimport time\nimport csv\nimport datetime\nimport numpy as np\n\nimport keras.backend as K\nimport tensorflow as tf\nimport kerastuner as kt\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport tensorflow_addons as tfa\n\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Input, Conv3D, BatchNormalization, MaxPooling3D, Flatten, Dense, Dropout\nfrom tensorflow.keras.optimizers import Adam as Adam\nfrom tensorflow.keras.losses import binary_crossentropy as BC\nimport matplotlib.pyplot as plt\n\n\nK.set_image_data_format('channels_last')\n# Ignore FutureWarning from numpy\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\";\n# The GPU id to use, usually either \"0\" or \"1\";\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,1\";\n# Allow growth of GPU memory, otherwise it will always look like all the memory is being used\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\ntf.config.experimental.set_memory_growth(physical_devices[1], True)\n\nglobal nChannels\n\ndef parse_example(serialized):\n '''Decode examples stored in TFRecords'''\n features = {\n 'x_dim': tf.io.FixedLenFeature([], tf.int64),\n 'y_dim': tf.io.FixedLenFeature([], tf.int64),\n 'z_dim': tf.io.FixedLenFeature([], tf.int64),\n 'channels': tf.io.FixedLenFeature([], tf.int64),\n 'image': tf.io.FixedLenFeature([], tf.string),\n 'label': tf.io.FixedLenFeature([], tf.int64)}\n # Parse the serialized data so we get a dict with our data.\n parsed_example = tf.io.parse_single_example(serialized=serialized, features=features)\n\n x_dim = parsed_example['x_dim']\n y_dim = parsed_example['y_dim']\n z_dim = parsed_example['z_dim']\n channels = parsed_example['channels']\n\n im_shape = [x_dim, y_dim, z_dim, channels]\n\n\n label = parsed_example['label']\n image_raw = parsed_example['image']\n\n image = tf.cast(tf.io.decode_raw(image_raw, tf.float32), tf.float32)\n image = tf.reshape(image, [x_dim, y_dim, z_dim, channels])\n\n label = tf.cast(label, tf.int64)\n label = tf.reshape(label, [1])\n\n return image, label\n\ndef input_fn(filenames, subset, batch_size, buffer_size=512):\n # Args:\n # filenames: Filenames for the TFRecords files.\n # subset: Subset to make either train, valid, test.\n # batch_size: Return batches of this size.\n # buffer_size: Read buffers of this size. The random shuffling\n # is done on the buffer, so it must be big enough.\n\n # Create a TensorFlow Dataset-object which has functionality\n # for reading and shuffling data from TFRecords files.\n dataset = tf.data.TFRecordDataset(filenames=filenames)\n\n # Parse the serialized data in the TFRecords files.\n # This returns TensorFlow tensors for the image and labels.\n dataset = dataset.map(parse_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n if subset == 'train' or subset =='valid':\n # Allow infinite reading of the data.\n dataset = dataset.repeat()\n else :\n dataset = dataset.repeat(1)\n\n if subset == 'train':\n dataset = dataset.shuffle(buffer_size=buffer_size)\n\n # Get a batch of data with the given size.\n dataset = dataset.batch(batch_size)\n\n if subset == 'train':\n dataset = dataset.prefetch(64)\n\n return dataset\n\ndef train_input_fn():\n return input_fn(filenames=path_tfrecords_train, subset='train', batch_size=batch_size)\n\ndef valid_input_fn():\n return input_fn(filenames=path_tfrecords_valid, subset='valid', batch_size=batch_size)\n\ndef test_input_fn():\n return input_fn(filenames=path_tfrecords_test, subset='test', batch_size=batch_size)\n\n\n\nCURRENT_DIR = os.getcwd()\nos.system(f'rm -rf {CURRENT_DIR}/results.csv')\n# train params\nn_GPUs = 2\nbatch_size = 4 #64 * n_GPUs\n# CNN params\nnFilts = [32, 64, 128]\ndropoutRates = [0.5, 0.6]\ndenseNodes = [25, 50, 100, 200]\ndoubleFirst = True\nconvLayers = [4, 5, 6]\nlearning_rate = 0.0001\n\nwith open(f'{CURRENT_DIR}/metadata.txt') as f:\n json_data = json.load(f)\n\n\nn_im_train = int(json_data[\"TfRecords\"][0][\"train\"]) # 724\n\nn_im_valid = int(json_data[\"TfRecords\"][0][\"valid\"]) # 206\n\nn_im_test = int(json_data[\"TfRecords\"][0][\"test\"]) # 105\n\nnChannels = int(json_data['nChannels'])\n\n# Tensorbord\nos.system(f'rm -rf {CURRENT_DIR}/logs/')\nlogdir = f'{CURRENT_DIR}/logs/fit_{datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")}'\n\n# Read TfRecords\ntfrecord_files_train = sorted(glob.glob(f'{CURRENT_DIR}/train*.tfrecord'))\ntfrecord_files_valid = sorted(glob.glob(f'{CURRENT_DIR}/valid*.tfrecord'))\ntfrecord_files_test = sorted(glob.glob(f'{CURRENT_DIR}/test*.tfrecord'))\n\n# Loop over settings\n\nfile = open('results.csv', 'w', newline='')\nwith file :\n header = ['convLayers', 'nFilts', 'dropoutRate', 'denseNodes', 'loss', 'accuracy']\n writer = csv.DictWriter(file, fieldnames=header)\n writer.writeheader()\n\n for convLayer in convLayers:\n for nFilt in nFilts:\n for dropoutRate in dropoutRates:\n for dense in denseNodes:\n strategy = tf.distribute.MirroredStrategy()\n with strategy.scope():\n NAME = f'{convLayer}-conv-{nFilt}-filtrs-{dropoutRate}-dropout-{dense}-dense'\n\n input = Input(shape=[61,73,61,nChannels])\n x = input\n\n x = Conv3D(nFilt, kernel_size=(3,3,3), activation='relu', padding='same',\n strides = (1, 1, 1), kernel_regularizer = tf.keras.regularizers.l2(l=0.01))(x)\n x = BatchNormalization()(x)\n\n\n if doubleFirst:\n x = Conv3D(nFilt, kernel_size=(3,3,3), activation='relu', padding='same',\n strides = (1, 1, 1), kernel_regularizer = tf.keras.regularizers.l2(l=0.01))(x)\n x = BatchNormalization()(x)\n\n x = MaxPooling3D(pool_size=(2,2,2))(x)\n\n for i in range(convLayer):\n # x = Conv3D(nFilt*(2**(i+1)), kernel_size=(3,3,3), activation='relu', padding='same')(x)\n x = Conv3D(nFilt*(2**(i+1)), kernel_size=(3,3,3), activation='relu', padding='same',\n strides = (1, 1, 1), kernel_regularizer = tf.keras.regularizers.l2(l=0.01))(x)\n x = BatchNormalization()(x)\n x = MaxPooling3D(pool_size=(2,2,2))(x)\n\n x = Flatten()(x)\n\n for _ in range(1):\n x = Dense(dense, activation='relu')(x)\n x = BatchNormalization()(x)\n x = Dropout(dropoutRate)(x)\n\n output = Dense(1, activation='sigmoid')(x)\n\n model = Model(inputs=input, outputs=output)\n model.compile(loss=BC, optimizer=Adam(lr=learning_rate), metrics=['accuracy'])\n\n\n print(f'Training model with parameters {NAME}')\n model.summary()\n\n dataset_train = input_fn(tfrecord_files_train, 'train', batch_size)\n dataset_valid = input_fn(tfrecord_files_valid, 'valid', batch_size)\n dataset_test = input_fn(tfrecord_files_test, 'test', batch_size)\n\n ## Train\n start_time = time.time()\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=10, restore_best_weights=True, mode='max')\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=f'{logdir}/{NAME}', histogram_freq=1)\n\n try:\n\n history = model.fit(dataset_train, epochs=30,\n steps_per_epoch=n_im_train//batch_size,\n validation_data=dataset_valid,\n validation_steps=n_im_valid//batch_size,\n callbacks=[early_stopping_callback, tensorboard_callback])\n\n elapsed_time = time.time() - start_time\n elapsed_time_string = str(datetime.timedelta(seconds=round(elapsed_time)))\n print('Training time: ', elapsed_time_string)\n\n score = model.evaluate(dataset_test, steps=n_im_test//batch_size)\n print('Test loss: %.4f' % score[0])\n print('Test accuracy: %.4f' % score[1])\n\n # Save results to text file\n # accuracy = np.zeros((1,1))\n writer.writerow({'convLayers': convLayer ,\n 'nFilts': nFilt, 'dropoutRate': dropoutRate,\n 'denseNodes': dense, 'loss': score[0],\n 'accuracy':score[1]})\n\n except:\n\n print('Training failed reporting zero accuracy/loss')\n # accuracy = np.zeros((1,1))\n accuracy = 0.0\n writer.writerow({'convLayers': convLayer ,\n 'nFilts': nFilt, 'dropoutRate': dropoutRate,\n 'denseNodes': dense, 'loss': 0.0,\n 'accuracy':0.0})\n finally:\n\n del x\n del model\n del strategy\n del dataset_train\n del dataset_valid\n del dataset_test\n","repo_name":"quartermaine/Master-Thesis","sub_path":"thesis code/Other_code_experiments/train_keras.py","file_name":"train_keras.py","file_ext":"py","file_size_in_byte":10110,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"33726181067","text":"#!/usr/bin/env python3\n\nfrom typing import Callable, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom ..utils.errors import NotPSDError\nfrom ..utils.memoize import cached\nfrom ._linear_operator import LinearOperator\nfrom .batch_repeat_linear_operator import BatchRepeatLinearOperator\nfrom .dense_linear_operator import DenseLinearOperator\n\nAllsor = Union[Tensor, LinearOperator]\n\n\nclass _TriangularLinearOperatorBase:\n \"\"\"Base class that all triangular lazy tensors are derived from.\"\"\"\n\n pass\n\n\nclass TriangularLinearOperator(LinearOperator, _TriangularLinearOperatorBase):\n r\"\"\"\n A wrapper for LinearOperators when we have additional knowledge that it\n represents a lower- or upper-triangular matrix (or batch of matrices).\n\n :param tensor: A `... x N x N` Tensor, representing a (batch of)\n `N x N` triangular matrix.\n :param upper: If True, the tensor is considered to be upper-triangular, otherwise lower-triangular.\n \"\"\"\n\n def __init__(self, tensor: Allsor, upper: bool = False) -> None:\n if isinstance(tensor, TriangularLinearOperator):\n # this is a null-op, we can just use underlying tensor directly.\n tensor = tensor._tensor\n elif isinstance(tensor, BatchRepeatLinearOperator):\n # things get kind of messy when interleaving repeats and triangualrisms\n if not isinstance(tensor.base_linear_op, TriangularLinearOperator):\n tensor = tensor.__class__(\n TriangularLinearOperator(tensor.base_linear_op, upper=upper),\n batch_repeat=tensor.batch_repeat,\n )\n if torch.is_tensor(tensor):\n tensor = DenseLinearOperator(tensor)\n super().__init__(tensor, upper=upper)\n self.upper = upper\n self._tensor = tensor\n\n def __add__(self, other: Allsor) -> LinearOperator:\n from .diag_linear_operator import DiagLinearOperator\n\n if isinstance(other, DiagLinearOperator):\n from .added_diag_linear_operator import AddedDiagLinearOperator\n\n return self.__class__(AddedDiagLinearOperator(self._tensor, other), upper=self.upper)\n if isinstance(other, TriangularLinearOperator) and not self.upper ^ other.upper:\n return self.__class__(self._tensor + other._tensor, upper=self.upper)\n return self._tensor + other\n\n def _cholesky(self, upper=False) -> LinearOperator:\n raise NotPSDError(\"TriangularLinearOperator does not allow a Cholesky decomposition\")\n\n def _cholesky_solve(self, rhs: Tensor, upper: bool = False) -> Tensor:\n # use custom method if implemented\n try:\n res = self._tensor._cholesky_solve(rhs=rhs, upper=upper)\n except NotImplementedError:\n if upper:\n # res = (U.T @ U)^-1 @ v = U^-1 @ U^-T @ v\n w = self._transpose_nonbatch().solve(rhs)\n res = self.solve(w)\n else:\n # res = (L @ L.T)^-1 @ v = L^-T @ L^-1 @ v\n w = self.solve(rhs)\n res = self._transpose_nonbatch().solve(w)\n return res\n\n def _diagonal(self) -> Tensor:\n return self._tensor._diagonal()\n\n def _expand_batch(self, batch_shape: torch.Size) -> \"TriangularLinearOperator\":\n if len(batch_shape) == 0:\n return self\n return self.__class__(tensor=self._tensor._expand_batch(batch_shape), upper=self.upper)\n\n def _get_indices(\n self, row_index: torch.LongTensor, col_index: torch.LongTensor, *batch_indices: Tuple[torch.LongTensor, ...]\n ) -> Tensor:\n return self._tensor._get_indices(row_index, col_index, *batch_indices)\n\n def _matmul(self, rhs: Tensor) -> Tensor:\n return self._tensor.matmul(rhs)\n\n def _mul_constant(self, constant: Tensor) -> \"TriangularLinearOperator\":\n return self.__class__(self._tensor * constant.unsqueeze(-1), upper=self.upper)\n\n def _root_decomposition(self) -> Allsor:\n raise NotPSDError(\"TriangularLinearOperator does not allow a root decomposition\")\n\n def _root_inv_decomposition(self, initial_vectors: Optional[Tensor] = None) -> Allsor:\n raise NotPSDError(\"TriangularLinearOperator does not allow an inverse root decomposition\")\n\n def _size(self) -> torch.Size:\n return self._tensor.shape\n\n def _solve(\n self,\n rhs: Tensor,\n preconditioner: Callable[[Tensor], Tensor],\n num_tridiag: int = 0,\n ) -> Tensor:\n # already triangular, can just call solve for the solve\n return self.solve(rhs)\n\n def _sum_batch(self, dim: int) -> \"TriangularLinearOperator\":\n return self.__class__(self._tensor._sum_batch(dim), upper=self.upper)\n\n def _transpose_nonbatch(self) -> \"TriangularLinearOperator\":\n return self.__class__(self._tensor._transpose_nonbatch(), upper=not self.upper)\n\n def abs(self) -> \"TriangularLinearOperator\":\n \"\"\"\n Returns a TriangleLinearOperator with the absolute value of all diagonal entries.\n \"\"\"\n return self.__class__(self._tensor.abs(), upper=self.upper)\n\n def add_diagonal(self, added_diag: Tensor) -> \"TriangularLinearOperator\":\n added_diag_lt = self._tensor.add_diagonal(added_diag)\n return self.__class__(added_diag_lt, upper=self.upper)\n\n @cached\n def to_dense(self) -> Tensor:\n return self._tensor.to_dense()\n\n def exp(self) -> \"TriangularLinearOperator\":\n \"\"\"\n Returns a TriangleLinearOperator with all diagonal entries exponentiated.\n \"\"\"\n return self.__class__(self._tensor.exp(), upper=self.upper)\n\n def inv_quad_logdet(\n self,\n inv_quad_rhs: Optional[Tensor] = None,\n logdet: bool = False,\n reduce_inv_quad: bool = True,\n ) -> Tuple[Tensor, Tensor]:\n if inv_quad_rhs is None:\n inv_quad_term = torch.empty(0, dtype=self.dtype, device=self.device)\n else:\n # triangular, solve is cheap\n inv_quad_term = (inv_quad_rhs * self.solve(inv_quad_rhs)).sum(dim=-2)\n if logdet:\n diag = self._diagonal()\n logdet_term = self._diagonal().abs().log().sum(-1)\n if torch.sign(diag).prod(-1) < 0:\n logdet_term = torch.full_like(logdet_term, float(\"nan\"))\n else:\n logdet_term = torch.empty(0, dtype=self.dtype, device=self.device)\n if inv_quad_term.numel() and reduce_inv_quad:\n inv_quad_term = inv_quad_term.sum(-1)\n return inv_quad_term, logdet_term\n\n @cached\n def inverse(self) -> \"TriangularLinearOperator\":\n \"\"\"\n Returns the inverse of the DiagLinearOperator.\n \"\"\"\n eye = torch.eye(self._tensor.size(-1), device=self._tensor.device, dtype=self._tensor.dtype)\n inv = self.solve(eye)\n return self.__class__(inv, upper=self.upper)\n\n def solve(self, right_tensor: Tensor, left_tensor: Optional[Tensor] = None) -> Tensor:\n squeeze = False\n if right_tensor.dim() == 1:\n right_tensor = right_tensor.unsqueeze(-1)\n squeeze = True\n\n if isinstance(self._tensor, DenseLinearOperator):\n res = torch.linalg.solve_triangular(self.to_dense(), right_tensor, upper=self.upper)\n elif isinstance(self._tensor, BatchRepeatLinearOperator):\n res = self._tensor.base_linear_op.solve(right_tensor, left_tensor)\n # TODO: Proper broadcasting\n res = res.expand(self._tensor.batch_repeat + res.shape[-2:])\n else:\n # TODO: Can we be smarter here?\n res = self._tensor.solve(right_tensor=right_tensor, left_tensor=left_tensor)\n\n if squeeze:\n res = res.squeeze(-1)\n\n if left_tensor is not None:\n res = left_tensor @ res\n return res\n","repo_name":"PythonPortable/3.10.8.0","sub_path":"python-3.10.8.amd64/Lib/site-packages/linear_operator/operators/triangular_linear_operator.py","file_name":"triangular_linear_operator.py","file_ext":"py","file_size_in_byte":7805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11303844773","text":"import requests\nfrom pprint import pprint, pformat\nfrom oauth_pile import oauth_wrapper\nimport pendulum, time, json\n\n\nclass Chat:\n\n def __init__(self):\n self.name = 'youtube_stream'\n self.generate_auth()\n self.init_stream()\n self.chat = {}\n\n def generate_auth(self):\n storage_name = \"%s-oauth2.json\" % self.name\n self.credentials = oauth_wrapper(storage_name)\n self.auth = self.credentials.access_token\n pass\n\n def init_stream(self):\n params = {\n 'broadcastStatus': 'active',\n 'part': 'id,snippet',\n }\n headers = {\n 'Authorization': f\"Bearer {self.auth}\",\n\n }\n yt_response = requests.get('https://www.googleapis.com/youtube/v3/liveBroadcasts', params=params,\n headers=headers)\n if yt_response.status_code != 200:\n error = f\"\"\"\n Youtube returns an error\n {pformat(yt_response.json())}\n \"\"\"\n raise IOError(error)\n self.id = yt_response.json()['items'][0]['id']\n self.name = yt_response.json()['items'][0]['snippet']['title']\n self.chat_id = yt_response.json()['items'][0]['snippet']['liveChatId']\n self.offset = 0\n stream_url = f\"https://www.youtube.com/watch?v={self.id}\"\n chat_send([(\"SYS\", stream_url)])\n\n def list_all(self):\n params = {\n 'liveChatId': self.chat_id,\n 'part': 'authorDetails,snippet',\n }\n headers = {\n 'Authorization': f\"Bearer {self.auth}\",\n\n }\n chat_response = requests.get('https://www.googleapis.com/youtube/v3/liveChat/messages', params=params,\n headers=headers)\n return chat_response.json()['items']\n\n # TODO refactor\n def chat_update(self, raw_chat):\n cleaned_chat = [{'name': entry['authorDetails']['displayName'],\n 'text': entry['snippet']['displayMessage'],\n 'timestamp': pendulum.parse(entry['snippet']['publishedAt'])}\n for entry in raw_chat]\n for entry in cleaned_chat:\n if entry['timestamp'] not in self.chat:\n self.chat[entry['timestamp']] = {'sent': False,\n 'entry': (entry['name'], entry['text'])}\n unsent = [val['entry'] for val in self.chat.values() if not val['sent']]\n if len(unsent) > 5:\n self.chat = {k: {\"sent\": True, 'entry': v['entry']} for k, v in self.chat.items()}\n return None\n chat_send(unsent)\n self.chat = {k: {\"sent\": True, 'entry': v['entry']} for k, v in self.chat.items()}\n pass\n\n def update_offset(self, timestamp, chat):\n pass\n\n\ndef chat_send(cleaned_chat):\n bot_token = '571655900:AAEeOOOw5Uhr7MEmyUr1t0O1tmf_uZpYbI8'\n url = 'https://api.telegram.org/bot' + bot_token + '/sendMessage'\n headers = {'Content-Type': 'application/json'}\n global_chat_id = '41535069'\n for name, message in cleaned_chat:\n fname = name.split(\" \")[0]\n text = f\"{fname}: {message}\"\n print(text)\n message = {\n 'chat_id': global_chat_id,\n 'text': text,\n }\n requests.post(url, headers=headers, json=message)\n pass\n\n\nif __name__ == '__main__':\n yt_chat = Chat()\n chat = yt_chat.list_all()\n\n with open('chat.json', 'w') as chat_file:\n json.dump(chat, chat_file, indent=2)\n cleaned_chat = [{'name': message['authorDetails']['displayName'],\n 'text': message['snippet']['displayMessage'], }\n for message in chat]\n pprint(cleaned_chat)\n while True:\n chat = yt_chat.list_all()\n yt_chat.chat_update(chat)\n time.sleep(2)\n","repo_name":"ariksu/pytube_chat","sub_path":"Chat.py","file_name":"Chat.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"27963205765","text":"# python3\n\n\"\"\"\nPart 1 Solution: 247 x 25 = 6175\n\n\"\"\"\n\ndef read_data(fname):\n\tlines = []\n\twith open(fname) as fp:\n\t\tline = fp.readline().rstrip(\"\\n\")\n\t\twhile line:\n\t\t\tlines.append(line)\n\t\t\tline = fp.readline().rstrip(\"\\n\")\n\n\treturn lines\n\n\ndef checkId(id):\n\tletter_count = {}\n\n\tfor letter in id:\n\t\t#print(letter)\n\t\tif letter not in letter_count:\n\t\t\tletter_count[letter] = 1\n\t\telse:\n\t\t\tletter_count[letter] += 1\n\t\n\thasExactly2 = False\n\thasExactly3 = False\n\t\t\t\n\tfor k, v in letter_count.items():\n\t\t#print(\"{} = {}\".format(k,v))\n\t\t\n\t\tif v == 2:\n\t\t\thasExactly2 = True\n\t\telif v == 3:\n\t\t\thasExactly3 = True\n\t\t\t\n\treturn hasExactly2, hasExactly3\t\n\t\n\ndef solve_day2_part1(box_ids):\n\tnum2 = 0\n\tnum3 = 0\n\t\t\n\tfor id in box_ids:\n\t\texactly2, exactly3 = checkId(id)\n\t\t#print(\"id={}, Match={}\".format(id, exactly2))\n\t\tif exactly2:\n\t\t\tnum2 += 1\n\t\tif exactly3:\n\t\t\tnum3 += 1\n\n\tprint(\"{} x {} = {}\".format(num2, num3, num2*num3))\n\t\n\ndef cmp_strings(str1, str2):\n\tdifferences = 0\n\n\tfor i in range(len(str1)):\n\t\tif str1[i] != str2[i]:\n\t\t\tdifferences += 1\n\n\treturn differences\n\n\ndef solve_day2_part2(box_ids):\n\tnum2 = 0\n\tnum3 = 0\n\tn_box_ids = len(box_ids) - 1\n\tprint(\"Number boxes: {}\".format(n_box_ids))\n\n\tfor i in range(0, n_box_ids):\n\t\tstr1 = box_ids[i]\n\n\t\t# letters1 = \n\t\tfor j in range(i, n_box_ids+1):\n\t\t\tstr2 = box_ids[j]\n\t\t\t# print(\"{}, {} Comparing={}, {}\".format(i, j, str1, str2))\n\t\t\tn = cmp_strings(str1, str2)\n\t\t\tif n == 1:\n\t\t\t\tprint(\"{}, {} Comparing={}, {}\".format(i, j, str1, str2))\n\n\t# print(\"{} x {} = {}\".format(num2, num3, num2*num3))\n\t\n# solve_day2([\"bababc\", \"abbcde\", \"abcccd\", \"aabcdd\", \"abcdee\", \"ababab\"])\ndata_day2 = read_data(\"data_day2.txt\")\nsolve_day2_part1(data_day2)\n\nsolve_day2_part2(data_day2)\nn = cmp_strings(\"abc\", \"abc\")\nn = cmp_strings(\"abc\", \"abe\")\nn = cmp_strings(\"xbc\", \"abc\")\nn = cmp_strings(\"xbc\", \"adc\")\n\n# print(\"differences={}\".format(n))\n# print(data)\n","repo_name":"melling/AdventOfCode2018","sub_path":"02/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12836929747","text":"#!/usr/local/bin/python3\n\n'''\nThis program provides examples of string formatting\nin Python 3.\n\nFirst Written by Justin Lynn Reid 9/8/2013\n'''\n\ndata = ((1,1), (2, 2), (12, 13), (4, 4), (99, 98))\n\nfor num1, num2 in data:\n product = num1 * num2\n \n print(\"{0:>4d} = {1:>2d} x {2:>2d}\".format(product, num1, num2))","repo_name":"jlynnr28/OST_Python","sub_path":"Python1/multuple.py","file_name":"multuple.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18487643278","text":"\"\"\"\nSensor for monitoring the size of a file.\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/sensor.filesize/\n\"\"\"\nimport datetime\nimport logging\nimport os\n\nimport voluptuous as vol\n\nfrom homeassistant.helpers.entity import Entity\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\n\n_LOGGER = logging.getLogger(__name__)\n\nCONF_DIR_PATHS = 'dir_paths'\nICON = 'mdi:file-multiple'\n\n#PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n# vol.Required(CONF_DIR_PATHS):\n# vol.All(cv.ensure_list, [cv.isdir]),\n#})\n\ndef get_number_files(path):\n num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])\n return num_files\n \ndef del_old_files(path):\n for dirpath, dirnames, filenames in os.walk(path):\n for file in filenames:\n curpath = os.path.join(dirpath, file)\n file_modified = datetime.datetime.fromtimestamp(os.path.getmtime(curpath))\n if datetime.datetime.now() - file_modified > datetime.timedelta(hours=24):\n os.remove(curpath)\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the file size sensor.\"\"\"\n sensors = []\n for path in config.get(CONF_DIR_PATHS):\n if not hass.config.is_allowed_path(path):\n _LOGGER.error(\n \"Filepath %s is not valid or allowed\", path)\n return\n else:\n sensors.append(dir_files(path))\n\n if sensors:\n add_devices(sensors, True)\n\n\nclass dir_files(Entity):\n \"\"\"Encapsulates file size information.\"\"\"\n\n def __init__(self, path):\n \"\"\"Initialize the data object.\"\"\"\n self._path = path # Need to check its a valid path\n self._size = None\n self._number_files = 0\n self._name = path\n self._unit_of_measurement = 'files'\n\n def update(self):\n \"\"\"Update the sensor.\"\"\"\n self._number_files = get_number_files(self._path)\n #self._last_updated = get_last_updated(self._path)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def state(self):\n \"\"\"Return the number of files.\"\"\"\n state_nf = self._number_files\n return state_nf\n\n @property\n def icon(self):\n \"\"\"Icon to use in the frontend, if any.\"\"\"\n return ICON\n\n @property\n def device_state_attributes(self):\n \"\"\"Return other details about the sensor state.\"\"\"\n attr = {\n 'path': self._path,\n 'number_files': self._number_files,\n }\n return attr\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement of this entity, if any.\"\"\"\n return self._unit_of_measurement","repo_name":"robsonschmidt/homeassistant","sub_path":"custom_components/sensor/dir_files.py","file_name":"dir_files.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"9587799625","text":"import argparse\n\nimport json\nimport os\n\nfrom io import StringIO\nimport requests\nfrom api import EmaillabsAPI\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--appkey\",\n default=os.environ.get(\"EMAILLABS_APPKEY\"),\n help=\"App key (default from environment EMAILLABS_APPKEY)\",\n )\n parser.add_argument(\n \"--secret\",\n default=os.environ.get(\"EMAILLABS_SECRET\"),\n help=\"App key (default from environment EMAILLABS_SECRET)\",\n )\n parser.add_argument(\n \"--data-file\", required=True, type=str, help=\"Path to data file\"\n )\n\n args = parser.parse_args()\n if not args.appkey or not args.secret:\n raise Exception(\"Appkey and Secret are required!\")\n\n client = EmaillabsAPI(args.appkey, args.secret)\n\n data = []\n for email in client.get_emails_iter():\n data.append(email)\n with open(args.data_file, \"w\") as fp:\n json.dump(data, fp, indent=2)\n","repo_name":"watchdogpolska/docker-images","sub_path":"emaillabs-report-tool/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28629362829","text":"from collections import deque\nimport sys\n\ndef input():\n return sys.stdin.readline().rstrip()\n\nn, m = map(int, input().split())\ngraph = [[] for _ in range(n + 1)]\ntable = [0] * (n + 1)\nanswer = []\nq = deque()\nfor _ in range(m):\n a, b = map(int, input().split())\n table[b] += 1\n graph[a].append(b)\n\nfor i, x in enumerate(table):\n if i and x == 0:\n q.append(i)\n\nwhile q:\n now = q.popleft()\n answer.append(now)\n for s in graph[now]:\n table[s] -= 1\n if table[s] == 0:\n q.append(s)\n\nprint(*answer)","repo_name":"mangbaam/CodingTest","sub_path":"백준/Gold/2252. 줄 세우기/줄 세우기.py","file_name":"줄 세우기.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3862696838","text":"import serial\nfrom p1_decode import p1_decode\n\n\nclass SmartMeter(serial.Serial):\n def read_p1_packet(self):\n try:\n packet = ''\n line = ''\n\n while '!' not in line:\n line = self.readline().decode()\n line = line.replace('\\r', '')\n packet += line\n\n decoded_packet = p1_decode(packet)\n return decoded_packet\n except:\n return None\n","repo_name":"WouterGritter/p1-smart-meter-decoder","sub_path":"smart_meter.py","file_name":"smart_meter.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16458377130","text":"import jcs_math\nimport vector2\n\n\nclass Physics(object):\n\n \"\"\"All physic relate function will be put in this file.\"\"\"\n\n # --------------------------------------------\n # Public Variables\n # --------------------------------------------\n GRAVITY_SCALE = 15.0\n GRAVITY = vector2.Vector2(0, 9.81 * GRAVITY_SCALE)\n EPSILON = 0.0001\n\n # --------------------------------------------\n # Private Variables\n # --------------------------------------------\n\n # --------------------------------------------\n # Protected Variables\n # --------------------------------------------\n\n # --------------------------------------------\n # Constructor\n # --------------------------------------------\n\n # --------------------------------------------\n # Public Methods\n # --------------------------------------------\n @staticmethod\n def unit_vector(vec2):\n \"\"\"Return the unit vector base on two point.\n\n @param vec2: Vector2 you want to target.\n \"\"\"\n tmpLen = Physics.get_mgnitude(vec2)\n\n if tmpLen > Physics.EPSILON:\n invLen = 1.0 / tmpLen\n vec2.x *= invLen\n vec2.y *= invLen\n\n @staticmethod\n def get_magnitude(vec2):\n \"\"\"Return magnitude base on two coordinate.\n\n @param vec2: Vector2 you want to target.\n @return magnitude / absolute value of vector length\n \"\"\"\n return jcs_math.pythagorean_theorem(vec2.x, vec2.y, \"hyp\")\n\n @staticmethod\n def get_normalize(vec2):\n \"\"\"Modified normalize value.\n\n @param vec2: Vector2 you want to target.\n \"\"\"\n Physics.unit_vector(vec2)\n\n @staticmethod\n def get_perpendicular(vec2):\n \"\"\"Return perpendicular value.\n\n @param vec2: Vector2 you want to target.\n @return perpendicular value.\n \"\"\"\n return vector2.Vector2(vec2.y, -vec2.x)\n\n @staticmethod\n def point_distance(pointA, pointB):\n \"\"\"Return the distance between two points.\n\n @param { Vector2 } pointA : first point.\n @param { Vector2 } pointB : second point.\n @return { float } : distance between two points.\n \"\"\"\n\n vDistance = jcs_math.absolute_value(pointA.get_y() - pointB.get_y())\n hDistance = jcs_math.absolute_value(pointA.get_x() - pointB.get_x())\n\n return jcs_math.pythagorean_theorem(vDistance, hDistance, \"hyp\")\n\n @staticmethod\n def dist_sqr(vecA, vecB):\n \"\"\"Distance angle.\n @param { Vector2 } vecA : vector A.\n @param { Vector2 } vecB : vector B.\n @return { float } : angle value.\n \"\"\"\n vecC = vecA - vecB\n return jcs_math.dot_product(vecC, vecC)\n\n @staticmethod\n def integrate_forces(shape, deltaTime):\n \"\"\"SEE: http://www.niksula.hut.fi/~hkankaan/Homepages/gravity.html\"\"\"\n\n tmpBody = shape.get_rigidbody()\n\n if tmpBody.get_inverse_mass() == 0.0:\n return\n\n halfDeltaTime = deltaTime * 0.5\n\n tmpBody.velocity += (tmpBody.get_force() * tmpBody.get_inverse_mass() + Physics.GRAVITY) * halfDeltaTime\n tmpBody.angular_velocity += tmpBody.torque * tmpBody.get_inverse_inertia() * halfDeltaTime\n\n @staticmethod\n def integrate_velocity(shape, deltaTime):\n \"\"\"Start the velocity in physics world.\"\"\"\n tmpBody = shape.get_rigidbody()\n\n if tmpBody.get_inverse_mass() == 0.0:\n return\n\n tmpBody.position += tmpBody.velocity * deltaTime\n # STUDY(jenchieh): angular_velocity is too small\n # and cannot be add up.\n tmpBody.orientation += (tmpBody.angular_velocity * deltaTime)\n\n tmpBody.set_orientation(tmpBody.orientation)\n Physics.integrate_forces(shape, deltaTime)\n\n\n # --------------------------------------------\n # Protected Methods\n # --------------------------------------------\n\n # --------------------------------------------\n # Private Methods\n # --------------------------------------------\n\n # --------------------------------------------\n # setter / getter\n # --------------------------------------------\n","repo_name":"jcs090218/ImpulseEngine","sub_path":"jcspygm_physics/physics.py","file_name":"physics.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"34646446289","text":"from math import sqrt\r\n\r\ndef nodesDict(nodes_list):\r\n nodes = {}\r\n for dict in nodes_list:\r\n node_id = str(dict['id'])\r\n x, y, _ = dict['pos'].split(',')\r\n pos = (float(x), float(y))\r\n nodes[node_id] = pos\r\n return nodes\r\n\r\n\r\ndef edgesDict(edges_list):\r\n edges = {}\r\n for dict in edges_list:\r\n src = str(dict['src'])\r\n dest = dict['dest']\r\n w = dict['w']\r\n if src not in edges.keys():\r\n edges[src] = []\r\n edges[src].append((dest, w))\r\n return edges\r\n\r\n\r\nclass Graph:\r\n def __init__(self, nodes_list, edges_list):\r\n self.nodes = nodesDict(nodes_list)\r\n self.edges = edgesDict(edges_list)\r\n\r\n def dis(self, a, b): # a, b are lists\r\n x1 = a[0]\r\n x2 = b[0]\r\n y1 = a[1]\r\n y2 = b[1]\r\n return abs(sqrt(pow((x1 - x2), 2) + pow((y1 - y2), 2)))\r\n\r\n def findPokemon(self, pos, type):\r\n x, y, _ = pos.split(',')\r\n pos = (float(x)), (float(y))\r\n for src_id in self.edges.keys():\r\n src = self.nodes[src_id]\r\n for dest_node in self.edges[src_id]:\r\n dest_id = dest_node[0]\r\n if int(src_id) < int(dest_id) and type >= 0 or int(src_id) > int(dest_id) and type <= 0:\r\n dest = self.nodes[str(dest_id)]\r\n dis_srcToDest = self.dis(src, dest)\r\n dis_pokemonToNodes = self.dis(src, pos) + self.dis(pos, dest)\r\n if (abs(dis_srcToDest - dis_pokemonToNodes)) < 0.00001:\r\n return src_id, dest_id\r\n\r\n return None\r\n\r\n def updatePokemons(self, agent_nodesListKeys, pokemons, isTarget_pokemon):\r\n for dict in pokemons:\r\n false_flag = False\r\n agent_flag = False\r\n pos = dict['Pokemon']['pos']\r\n type = dict['Pokemon']['type']\r\n\r\n for list in isTarget_pokemon['false']:\r\n if str(pos) == str(list[0]):\r\n false_flag = True\r\n break\r\n\r\n for list in agent_nodesListKeys:\r\n if str(list[0]) == str(pos):\r\n agent_flag = True\r\n break\r\n\r\n if false_flag == False and agent_flag == False:\r\n pokemon = Graph.findPokemon(self, pos, type)\r\n l = (pos, pokemon)\r\n isTarget_pokemon['false'].insert(0, l)\r\n\r\n return isTarget_pokemon\r\n\r\n def shortestPath(self, A, B): # A - src of agent, B - edge(src_node, dest_node)\r\n src_node = int(B[0])\r\n if src_node == A:\r\n return [B[1]]\r\n dest_node = B[1]\r\n path = [src_node, dest_node]\r\n # initialize distance from A (src)\r\n d = [1000000] * len(self.nodes)\r\n d[A] = 0\r\n\r\n phi = [0] * len(self.nodes) # list of a pointers to the previews node in the path (to keep the path)\r\n visited = [False] * len(self.nodes) # control the visited nodes\r\n allVisited = False\r\n\r\n while not allVisited:\r\n # find the minimum dis node from src that not visited\r\n id_min = 0\r\n for i in range(len(self.nodes)):\r\n if not visited[i]:\r\n id_min = i\r\n break\r\n for j in range(len(self.nodes)):\r\n if d[j] < d[id_min] and not visited[j]:\r\n id_min = j\r\n\r\n # update all its sons if necessary\r\n for node_dest in self.edges[str(id_min)]:\r\n dest = node_dest[0]\r\n w = node_dest[1]\r\n if d[id_min] + w < d[dest]:\r\n d[dest] = d[id_min] + w\r\n phi[dest] = id_min\r\n visited[id_min] = True\r\n\r\n # check if all visited\r\n allVisited = True\r\n for i in range(len(visited)):\r\n if not visited[i]:\r\n allVisited = False\r\n break\r\n\r\n i = src_node # src node\r\n while i != A:\r\n path.insert(0, phi[i])\r\n i = phi[i]\r\n\r\n return path\r\n","repo_name":"ShaniVahav/Pokemon-Catcher","sub_path":"Algo.py","file_name":"Algo.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5179363151","text":"# Desenvolva um programa que leia seis números inteiros e mostre a soma apenas daqueles que forem pares. Se o valor digitado for ímpar, desconsidere-o.\n\nsoma = 0\ncont = 0\n\nfor n in range(1,7):\n num = int(input(f'Digite o {n} número: '))\n if num % 2 == 0:\n soma += num\n cont += 1\n \nprint(f'Foram usados {cont} números pares, resultado da soma: {soma} ')","repo_name":"Nandabdev/Python3-Mundo2","sub_path":"for.py/somandopares.py","file_name":"somandopares.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36176343350","text":"from numpy import argsort\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np \nfrom my_utils.chem_utils import ATOM_FDIM,BOND_FDIM\nfrom my_utils.data_utils import G2SBatch\nfrom my_models.graphfeat import GraphFeatEncoder \nfrom my_models.attention_xl import AttnEncoderXL\n\nfrom torch_geometric.nn import Set2Set\n\nclass MLP(nn.Module):\n def __init__(self, in_dim, out_dim):\n super(MLP, self).__init__()\n self.fcs = nn.Sequential(\n nn.Linear(in_dim, out_dim),\n nn.BatchNorm1d(out_dim),\n nn.PReLU(),\n nn.Linear(out_dim, out_dim),\n nn.BatchNorm1d(out_dim),\n nn.PReLU(),\n nn.Linear(out_dim, out_dim),\n nn.PReLU()\n )\n self.linear_shortcut = nn.Linear(in_dim, out_dim)\n\n def forward(self, x):\n return self.fcs(x) + self.linear_shortcut(x)\n\nclass Gsimclr(nn.Module):\n def __init__(self,args,feature_dim=64):\n super(Gsimclr,self).__init__()\n self.args=args\n\n # encoder\n self.encoder = GraphFeatEncoder(\n args,\n n_atom_feat=sum(ATOM_FDIM),\n n_bond_feat=BOND_FDIM\n )\n \n if args.attn_enc_num_layers > 0:\n self.attention_encoder = AttnEncoderXL(args)\n else:\n self.attention_encoder = None\n \n #used for producing a global-embedding\n #dim从512->256\n #self.set2set=Set2Set(256,processing_steps=3) \n\n # projection head \n self.head=MLP(128,feature_dim)\n \n self.g = nn.Sequential(\n nn.Linear(128,128,bias=True),\n nn.BatchNorm1d(128),\n nn.ReLU(),\n nn.Linear(128, feature_dim, bias=True))\n \n \"\"\"\n self.g = nn.Sequential(\n nn.Linear(128,feature_dim),\n nn.BatchNorm1d(feature_dim),\n nn.PReLU(),\n nn.Linear(feature_dim, feature_dim),\n nn.BatchNorm1d(feature_dim),\n nn.PReLU(),\n nn.Linear(feature_dim, feature_dim),\n nn.PReLU()\n )\n self.linear_shortcut = nn.Linear(128, feature_dim)\n \"\"\"\n\n def f(self,reaction_batch:G2SBatch):\n\n #print('reaction_batch.size',reaction_batch.size) #b\n hatom,_ = self.encoder(reaction_batch)\n\n #print('hatom.size()',hatom.size()) #512/632不等\n atom_scope=reaction_batch.atom_scope\n memory_lengths=[scope[-1][0]+scope[-1][1]-scope[0][0] for scope in atom_scope]\n\n assert 1+sum(memory_lengths) == hatom.size(0), \\\n f\"Memory lengths calculation error, encoder output: {hatom.size(0)},memory_lengths:{memory_lengths}\"\n\n memory_bank=torch.split(hatom,[1]+memory_lengths,dim=0)\n padded_memory_bank=[]\n max_length=max(memory_lengths)\n\n for length,h in zip(memory_lengths,memory_bank[1:]):\n m=nn.ZeroPad2d((0,0,0,max_length-length))\n padded_memory_bank.append(m(h))\n \n padded_memory_bank=torch.stack(padded_memory_bank,dim=1) #[max_t,b,h]\n memory_lengths = torch.tensor(memory_lengths,\n dtype=torch.long,\n device=padded_memory_bank.device)\n \n \n if self.attention_encoder is not None:\n \n padded_memory_bank = self.attention_encoder(\n padded_memory_bank,\n memory_lengths,\n reaction_batch.distances\n )\n \n return padded_memory_bank,memory_lengths\n\n def forward(self, x):\n \"\"\"\n local_emb,memory_lengths= self.f(x) #[max_t,b,h]\n local_emb=local_emb.transpose(0,1) #[b,max_t,h]\n global_emb=torch.sum(local_emb,dim=1)\n b=local_emb.shape[0]\n max_t=local_emb.shape[1]\n hid=local_emb.shape[2]\n local_emb=local_emb.reshape(-1,hid) #[b*max_t,h]\n\n local_enc=self.head(local_emb)\n global_enc=self.head(global_emb)\n \n local_enc=self.g(local_emb)\n global_enc=self.g(global_emb)\n\n #local_enc=local_enc.transpose(1,2) #[b,max_t, h] \n local_enc=local_enc.reshape(b,max_t,-1)\n mlen=len(memory_lengths)\n #return local_enc,global_enc,mlen\n \"\"\"\n local_emb,_= self.f(x) #[max_t,b,h]\n local_emb=local_emb.transpose(0,1) #[b,max_t,h]\n global_emb=torch.sum(local_emb,dim=1) #[b,h]\n global_enc=self.head(global_emb)\n return global_enc","repo_name":"Alice01010101/Gsimclr2SMILES","sub_path":"pretrained/my_models/Gsimclr.py","file_name":"Gsimclr.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27750808914","text":"# Revision.get(hash)\n# Revision.delete(hash)\n# /v1/revisions/\n# /v1/revisions//content\nimport hashlib\nfrom synchrony import app, db\nimport flask_restful as restful\nfrom flask import request, session\nfrom synchrony.models import Revision\nfrom sqlalchemy import and_, or_, desc\nfrom synchrony.controllers.auth import auth\nfrom synchrony.controllers.utils import Pagination, make_response\n\nclass RevisionCollection(restful.Resource):\n def get(self):\n user = auth(session, required=True)\n\n parser = restful.reqparse.RequestParser()\n parser.add_argument(\"page\", type=int, default=1)\n parser.add_argument(\"per_page\", type=int, default=10)\n args = parser.parse_args() \n\n if user.can(\"see_all\"):\n query = Revision.query.order_by(desc(Revision.created)).paginate(args.page, args.per_page)\n else:\n query = Revision.query.filter(or_(Revision.public == True, Revision.user == user))\\\n .order_by(desc(Revision.created)).paginate(args.page, args.per_page)\n\n return make_response(request.url, query)\n\nclass RevisionResource(restful.Resource):\n def get(self, hash):\n \"\"\"\n Return a specific revision by hash.\n \"\"\"\n user = auth(session)\n rev = Revision.query.filter(and_(Revision.hash == hash, Revision.user == user)).first()\n if rev:\n return rev.jsonify()\n return {}, 404\n\n def put(self, hash):\n \"\"\"\n Create an edit of a revision.\n \"\"\"\n user = auth(session, required=True)\n parser = restful.reqparse.RequestParser()\n parser.add_argument(\"document\",type=str, required=True)\n args = parser.parse_args() \n\n parent = Revision.query.filter(and_(Revision.hash == hash, Revision.user == user)).first()\n if parent != None:\n revision = Revision()\n revision.user = user\n revision.parent = parent\n revision.content = args.document\n revision.size = len(revision.content)\n revision.hash = hashlib.sha1(args.document).hexdigest()\n revision.get_mimetype()\n db.session.add(revision)\n db.session.add(parent)\n db.session.commit()\n return revision.jsonify(), 201\n return {}, 404\n\n def post(self, hash):\n \"\"\"\n Modify attributes of an existing revision.\n \"\"\"\n user = auth(session, required=True)\n\n parser = restful.reqparse.RequestParser()\n parser.add_argument(\"public\", type=bool, help=\"\", required=True, default=None)\n args = parser.parse_args() \n\n rev = Revision.query.filter(and_(Revision.hash == hash, Revision.user == user)).first()\n # Tell peers we're storing data for this revision if it exists and isn't an edit.\n if rev and rev.parent == None and args.public != None:\n rev.public = args.public\n if args.public == True:\n for router in app.routes:\n app.routes[router][rev] = rev\n\n db.session.add(rev)\n db.session.commit()\n return rev.jsonify()\n\n return {}, 404\n\n def delete(self, hash):\n \"\"\"\n Delete a revision object by content hash.\n \"\"\"\n user = auth(session, required=True)\n\n if user.can(\"delete_at_will\"):\n rev = Revision.query.filter(Revision.hash == hash).first()\n else:\n rev = Revision.query.filter(\n and_(Revision.hash == hash, Revision.user == user)\n ).first()\n \n if not rev:\n return {}, 404\n\n db.session.delete(rev)\n db.session.commit()\n return {}, 204\n\nclass RevisionContentResource(restful.Resource):\n def get(self, hash):\n user = auth(session)\n rev = Revision.query.filter(and_(Revision.hash == hash, Revision.user == user)).first()\n if rev:\n return rev.as_response\n return {}, 404\n\nclass RevisionDownloadsCollection(restful.Resource):\n def get(self):\n \"\"\"\n Return all DHT downloads across all networks.\n \"\"\"\n user = auth(session, required=True)\n\n parser = restful.reqparse.RequestParser()\n parser.add_argument(\"page\", type=int, default=1)\n parser.add_argument(\"per_page\", type=int, default=10)\n args = parser.parse_args() \n\n if not user.can(\"see_all\") and not user.can(\"review_downloads\"):\n return {}, 403\n\n response = []\n for routes in app.routes.values():\n r = {'network': routes.network}\n r['downloads'] = [{f: routes.protocol.downloads[f]} for \\\n f in routes.protocol.downloads]\n response.append(r)\n \n pages = Pagination(response, args.page, args.per_page)\n return make_response(request.url, pages, jsonify=False)\n\nclass RevisionDownloadsResource(restful.Resource):\n def get(self, network):\n \"\"\"\n Provides an overview of revisions fetched via overlay network.\n\n Would be part of RevisionFeedbackResource except this has no need\n for a \"hash\" parameter.\n \"\"\"\n user = auth(session, required=True)\n\n if not user.can(\"see_all\") and not user.can(\"review_downloads\"):\n return {}, 403\n\n if not network:\n routes = app.routes._default\n else:\n routes = app.routes.get(network, None)\n if not routes:\n return {}, 404\n\n return [f for f in routes.protocol.downloads]\n\n def post(self, network):\n \"\"\"\n Decrements a peers trust rating and deletes the offending revision.\n \"\"\"\n user = auth(session, required=True)\n parser = restful.reqparse.RequestParser()\n parser.add_argument(\"url\", type=str, required=True)\n parser.add_argument(\"hash\", type=str, required=True)\n args = parser.parse_args()\n\n if not user.can(\"review_downloads\"):\n return {}, 403\n\n routes = app.routes.get(network, None)\n if not routes:\n return \"Network not found.\", 404\n\n hashes = routes.protocol.downloads.get(args.url)\n if not hashes:\n return {}, 404\n\n addr = hashes.get(args.hash, None)\n if not addr:\n return \"Peer not found.\", 404\n\n # Delete the revision in question by hash\n revision = Revision.query.filter(Revision.hash == args.hash).first()\n if not revision:\n return \"Revision not found.\", 404\n\n db.session.delete(revision)\n db.session.commit()\n\n success = routes.protocol.decrement_trust(addr)\n # Peer is 410 Gone\n if not success:\n return \"Peer had already left.\", 410\n\n return {}, 200\n\nclass RevisionSearchResource(restful.Resource):\n def get(self, query):\n user = auth(session, required=True)\n parser = restful.reqparse.RequestParser()\n parser.add_argument(\"page\", type=int, default=1)\n parser.add_argument(\"per_page\", type=int, default=10)\n args = parser.parse_args()\n\n revisions = [_ for _ in Revision.query.all() if query in _.url and \\\n (_.user == user or _.public)]\n pages = Pagination(revisions, args.page, args.per_page)\n response = make_response(request.url, pages)\n return response\n","repo_name":"Psybernetics/Synchrony","sub_path":"synchrony/resources/revisions.py","file_name":"revisions.py","file_ext":"py","file_size_in_byte":7437,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"81"} +{"seq_id":"74794836425","text":"class Solution:\n def find132pattern(self, nums: List[int]) -> bool:\n '''\n dp[i] => i左邊的最小值是誰\n monotonic stack => 找到遞增(從尾到頭)\n '''\n n = len(nums)\n dp = [float('inf')] * n\n \n # dp[i] => i左邊的最小值是誰\n cur_min = float('inf')\n for i, num in enumerate(nums):\n dp[i] = cur_min\n cur_min = min(cur_min, num)\n \n \n \n # monotonic stack\n right_stack = [-float('inf')]\n for i in range(n-1, -1, -1):\n left = dp[i]\n mid = nums[i]\n right = right_stack[-1]\n \n while mid > right:\n if left < right:\n return True\n right_stack.pop()\n if not right_stack:\n break\n right = right_stack[-1]\n right_stack.append(mid)\n \n return False\n","repo_name":"novayo/LeetCode","sub_path":"0456_132_Pattern/try_3.py","file_name":"try_3.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"10380346254","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n# @Time : 2020/10/1 11:29\n# @Author : strawsyz\n# @File : signate1.py\n# @desc:\n\nimport os\nimport matplotlib.pylab as plt\nimport torch\nimport xgboost as xgb\nfrom sklearn import metrics\nfrom sklearn.svm import SVR, LinearSVR, LinearSVC\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torch.nn import BCEWithLogitsLoss\nfrom xgboost.sklearn import XGBClassifier\n\nfrom utils.machine_learning_utils import *\n\n\ndef year_2_int(year: str):\n year = year.replace(\"years\", \"\").replace(\"year\", \"\").strip()\n return int(year)\n\n\ndef str_2_int_4_application_type(application_type: str):\n if application_type == \"Joint App\":\n return 1\n elif application_type == \"Individual\":\n return 2\n else:\n return 0\n\n\ndef str_2_int_4_purpose(purpose: str):\n choices = ['small_business', 'house', 'medical', 'home_improvement', 'car', 'debt_consolidation', 'other',\n 'credit_card', 'major_purchase']\n for idx, choice in enumerate(choices, start=1):\n if choice == purpose:\n return idx\n return 0\n\n\ndef str_2_int_4_loan_status(loan_status: str):\n if loan_status == \"ChargedOff\":\n return 1\n elif loan_status == \"FullyPaid\":\n return 0\n else:\n return None\n\n\ndef str_2_int_4_grade(grade: str):\n grade_level_ = grade[0]\n grade_no_ = int(grade[1])\n grade_levels = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n for idx, grade_level in enumerate(grade_levels):\n if grade_level_ == grade_level:\n return idx * 10 + grade_no_\n return None\n\n\ndef preprocess(train_data_path: str = r\"\",\n test_data_path: str = r\"\"):\n df = pd.read_csv(train_data_path)\n df[\"purpose\"] = df[\"purpose\"].map(str_2_int_4_purpose)\n df[\"application_type\"] = df[\"application_type\"].map(str_2_int_4_application_type)\n df[\"term\"] = df[\"term\"].map(year_2_int)\n df[\"employment_length\"] = df[\"employment_length\"].map(year_2_int)\n df[\"grade\"] = df[\"grade\"].map(str_2_int_4_grade)\n df[\"loan_status\"] = df[\"loan_status\"].map(str_2_int_4_loan_status)\n\n # standard data\n # one hot\n df.to_csv(r\"\")\n\n df = pd.read_csv(test_data_path)\n df[\"purpose\"] = df[\"purpose\"].map(str_2_int_4_purpose)\n df[\"application_type\"] = df[\"application_type\"].map(str_2_int_4_application_type)\n df[\"term\"] = df[\"term\"].map(year_2_int)\n df[\"employment_length\"] = df[\"employment_length\"].map(year_2_int)\n df[\"grade\"] = df[\"grade\"].map(str_2_int_4_grade)\n df.to_csv(r\"\")\n\n\ndef read_train_data(file_path: str = r\"\", num: int = None):\n df = pd.read_csv(file_path)\n df[\"purpose\"] = df[\"purpose\"].map(str_2_int_4_purpose)\n df[\"application_type\"] = df[\"application_type\"].map(str_2_int_4_application_type)\n df[\"term\"] = df[\"term\"].map(year_2_int)\n df[\"employment_length\"] = df[\"employment_length\"].map(year_2_int)\n df[\"grade\"] = df[\"grade\"].map(str_2_int_4_grade)\n df[\"loan_status\"] = df[\"loan_status\"].map(str_2_int_4_loan_status)\n # print(df.info())\n # print(df.describe())\n # print(df.corr())\n # print(df.corr(\"kendall\"))\n # print(df.corr(\"spearman\"))\n\n train_data = df.loc[:, ['loan_amnt', 'term', 'interest_rate', 'grade',\n 'employment_length', 'purpose', 'credit_score', 'application_type']]\n # all_data = df.loc[:, ['term', 'interest_rate', 'grade', \"loan_status\"]]\n # all_data = np.asarray(all_data)\n # import random\n # random.shuffle(all_data)\n # train_data = all_data[:, :3]\n # train_gt = all_data[:, 3]\n\n # train_data = df.loc[:, ['term', 'interest_rate', 'grade', 'credit_score']]\n train_data = np.array(train_data)\n if num is not None:\n train_data = train_data[:num]\n train_data, range_, min_val = z_score(train_data)\n # df.loc[:, ['term', 'interest_rate', 'grade', 'credit_score']] = train_data\n df.loc[:, ['loan_amnt', 'term', 'interest_rate', 'grade',\n 'employment_length', 'purpose', 'credit_score', 'application_type']] = train_data\n train_gt = df.loc[:, \"loan_status\"]\n train_gt = np.array(train_gt)\n if num is not None:\n train_gt = train_gt[:num]\n return train_data, train_gt, range_, min_val\n\n\ndef read_test_data(range_, min_val, file_path=r\"\"):\n df = pd.read_csv(file_path)\n df[\"purpose\"] = df[\"purpose\"].map(str_2_int_4_purpose)\n df[\"application_type\"] = df[\"application_type\"].map(str_2_int_4_application_type)\n df[\"term\"] = df[\"term\"].map(year_2_int)\n df[\"employment_length\"] = df[\"employment_length\"].map(year_2_int)\n df[\"grade\"] = df[\"grade\"].map(str_2_int_4_grade)\n\n test_data = df.loc[:, ['loan_amnt', 'term', 'interest_rate', 'grade',\n 'employment_length', 'purpose', 'credit_score', 'application_type']]\n # test_data = df.loc[:, ['term', 'interest_rate', 'grade',\n # 'credit_score']]\n test_data = np.array(test_data)\n test_data, _, _ = z_score(test_data, range_, min_val)\n return test_data\n\n\ndef create_model(gpu=True):\n from net_structures.FNN import AdaptiveFNN\n model = AdaptiveFNN(num_in=8, num_out=2, num_units=[8, 16, 32, 16, 8, 4, 1], activate_func=\"sigmoid\")\n if gpu:\n model.cuda()\n return model\n\n\ndef save_predict_data(data, save_path, file_path=r\"\"):\n df = pd.read_csv(file_path, header=None, index_col=0)\n save_path = os.path.join(\"C:\\(lab\\datasets\\signate01\", save_path)\n length = len(data)\n df.iloc[:length, 0] = data\n print(\"save predict data at {}\".format(save_path))\n df.to_csv(save_path, header=None)\n\n\ndef ML_01(X, Y):\n GridSearch(Lasso()).use_grid_search(X, Y,\n {'alpha': [0.0000001, 0.000002, 0.0000003, 0.0000004, 0.000005],\n 'max_iter': [10000]})\n GridSearch(Ridge()).use_grid_search(X, Y, {'alpha': [0.1, 0.2, 0.3, 0.4, 0.5]})\n # GridSearch(SVR()).grid_get(X, Y, {'C': [11], 'kernel': ['rbf'], 'gamma': [0.0003, 0.0004],\n # 'epsilon': [0.008, 0.009]})\n # params = {'alpha': [0.2, 0.3, 0.4, 0.5], 'kernel': ['polynomial'], 'degree': [3],\n # 'coef0': [0.8, 1, 1.2]}\n # GridSearch(KernelRidge()).grid_get(X, Y, params)\n GridSearch(ElasticNet()).use_grid_search(X, Y,\n {'alpha': [0.0005, 0.0008, 0.004, 0.005],\n 'l1_ratio': [0.08, 0.1, 0.3, 0.5, 0.7],\n 'max_iter': [10000]})\n\n\ndef create_data_loader(data, batch_size):\n batch_data = []\n for idx, item in enumerate(data, start=1):\n batch_data.append(item)\n if idx % batch_size == 0:\n yield np.asarray(batch_data)\n batch_data = []\n\n\ndef normalization(data_set, range_=None, min_val=None, axis=0):\n if min_val is None or range_ is None:\n min_val = np.min(data_set, axis=axis)\n max_val = np.max(data_set, axis=axis)\n range_ = max_val - min_val\n norm_data_set = (data_set - min_val) / range_\n return norm_data_set, range_, min_val\n\n\ndef z_score(x, mean=None, std=None, axis=0):\n \"\"\" 这段代码可能有问题 \"\"\"\n if mean is None or std is None:\n mean = np.mean(x, axis=axis)\n std = np.std(x, axis=axis)\n xr = np.rollaxis(x, axis=axis)\n xr -= mean\n xr /= std\n return x, mean, std\n\n\ndef ML_02(X, Y, test_X):\n lasso = Lasso(alpha=0.0005, max_iter=10000)\n ridge = Ridge(alpha=60)\n svr = SVR(gamma=0.0004, kernel='rbf', C=13, epsilon=0.009)\n ker = KernelRidge(alpha=0.2, kernel='polynomial', degree=3, coef0=0.8)\n ela = ElasticNet(alpha=0.005, l1_ratio=0.08, max_iter=10000)\n bay = BayesianRidge()\n stack_model = stacking(models=[lasso, ridge, svr, ker, ela, bay], meta_model=ker)\n result = rmse_cv(stack_model, X, Y)\n\n print(result)\n print(result.mean())\n\n X_train_stack, X_test_stack = stack_model.get_oof(X, Y, test_X)\n x_train_add = np.hstack((X, X_train_stack))\n\n print(rmse_cv(stack_model, x_train_add, Y))\n print(rmse_cv(stack_model, x_train_add, Y).mean())\n\n\ndef ML_03(X, Y, test_data=None, test_gt=None, cv=5):\n \"\"\"compare different models to train\"\"\"\n # models = [LinearRegression(), Ridge(), Lasso(alpha=0.01, max_iter=10000), RandomForestRegressor(),\n # GradientBoostingRegressor(), SVR(), LinearSVR(),\n # ElasticNet(alpha=0.001, max_iter=10000), SGDRegressor(max_iter=1000, tol=1e-3), BayesianRidge(),\n # KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5),\n # ExtraTreesRegressor(), XGBRegressor()]\n # model_names = [\"LR\", \"Ridge\", \"Lasso\", \"RF\", \"GBR\", \"SVR\", \"LinearSVR\", \"Ela\", \"SGD\", \"Bay\", \"Ker\", \"Extra\", \"Xgb\"]\n # for name, model in zip(model_names, models):\n # score = rmse_cv(model, X, Y)\n model_dict = [\n (\"LinearSVC\", LinearSVC()),\n (\"RF\", RandomForestRegressor()),\n (\"LinearSVR\", LinearSVR(max_iter=10000)),\n (\"Extra\", ExtraTreesRegressor())]\n\n # model_dict = [(\"LinearSVC\", LinearSVC()), (\"LR\", LinearRegression()), (\"Ridge\", Ridge()),\n # (\"Lasso\", Lasso(alpha=0.01, max_iter=10000)),\n # (\"RF\", RandomForestRegressor()), (\"GBR\", GradientBoostingRegressor()),\n # (\"LinearSVR\", LinearSVR(max_iter=10000)), (\"Ela\", ElasticNet(alpha=0.001, max_iter=10000)),\n # (\"SGD\", SGDRegressor(max_iter=1000, tol=1e-3)), (\"Bay\", BayesianRidge()),\n # (\"Extra\", ExtraTreesRegressor()), (\"Xgb\", XGBRegressor())]\n for name, model in model_dict:\n score = rmse_cv(model, X, Y, scoring=\"f1\", cv=cv)\n print(\"{} 's score is {}\".format(name, score))\n\n # model.fit(X, Y)\n # # for test_data_item in test_data:\n # predict_data = model.predict(test_data)\n # predict_data = np.where(predict_data < 0.5, 0, 1)\n # # save_predict_data(predict_data, \"{}.csv\".format(name))\n # if test_gt is not None:\n # # print(predict_data)\n # # print(test_gt)\n # print(\"num of correct data is {}\".format(np.sum(predict_data == test_gt)))\n # print(\"num of valid data is {}\".format(len(test_gt)))\n # print(np.sum(predict_data == test_gt) / len(test_gt))\n # save_model(model, \"{}.pkl\".format(name))\n # print(predict_data)\n # print(\"{}: {:6f}, {:6f}\".format(name, score.mean(), score.std()))\n\n\ndef ML_04(X, Y, test_X):\n linear_svc = LinearSVC()\n rf = RandomForestRegressor()\n linear_svr = LinearSVR(max_iter=10000)\n extra = ExtraTreesRegressor()\n ker = KernelRidge(alpha=0.2, kernel='polynomial', degree=3, coef0=0.8)\n stack_model = stacking(models=[linear_svc, rf, linear_svr, extra], meta_model=ker)\n result = rmse_cv(stack_model, X, Y)\n print(result)\n print(result.mean())\n\n X_train_stack, X_test_stack = stack_model.get_oof(X, Y, test_X)\n x_train_add = np.hstack((X, X_train_stack))\n\n print(rmse_cv(stack_model, x_train_add, Y))\n print(rmse_cv(stack_model, x_train_add, Y).mean())\n\n\ndef FNN(X, Y, valid_data=None, valid_gt=None, display_iter=10000):\n model = create_model()\n EPOCH = 50\n batch_size = 8\n lr = 0.3\n\n loss_function = BCEWithLogitsLoss()\n num_train = len(X)\n optimizer = optim.Adam(model.parameters(), lr=lr)\n print(model)\n for epoch in range(EPOCH):\n model.train()\n x_dataloader = create_data_loader(X, batch_size=batch_size)\n y_dataloader = create_data_loader(Y, batch_size=batch_size)\n sum_loss = 0\n for idx, (x_item, y_item) in enumerate(zip(x_dataloader, y_dataloader), start=1):\n print(x_item)\n print(y_item)\n optimizer.zero_grad()\n x_item = Variable(torch.from_numpy(x_item)).float().cuda()\n y_item = Variable(torch.from_numpy(y_item)).float().cuda()\n # y_item = torch.squeeze(y_item)\n y_item = torch.unsqueeze(y_item, dim=1)\n predict = model(x_item)\n loss = loss_function(predict, y_item)\n loss.backward()\n optimizer.step()\n if idx % display_iter == 0:\n print(\"predic is {},out is {},loss is {}\".format(predict, y_item, loss.data))\n sum_loss += loss\n ave_loss = sum_loss / (int(num_train / batch_size))\n print(\"epoch is {},sum_loss is {},ave_loss is {}\".format(epoch, sum_loss, ave_loss))\n\n # valid\n if valid_data is not None:\n model.eval()\n num_valid = len(valid_data)\n sum_loss = 0\n x_dataloader = create_data_loader(valid_data, batch_size=batch_size)\n y_dataloader = create_data_loader(valid_gt, batch_size=batch_size)\n for idx, (x_item, y_item) in enumerate(zip(x_dataloader, y_dataloader), start=1):\n x_item = Variable(torch.from_numpy(x_item)).float().cuda()\n y_item = Variable(torch.from_numpy(y_item)).float().cuda()\n y_item = torch.unsqueeze(y_item, dim=1)\n predict = model(x_item)\n loss = loss_function(predict, y_item)\n sum_loss += loss\n ave_loss = sum_loss / (int(num_valid / batch_size))\n print(\"Valid sum_loss is {}, ave_loss is {}\".format(sum_loss, ave_loss))\n\n\nimport operator\n\n\ndef kNN(test_x, data_set, labels, k):\n num_data_set = data_set.shape[0]\n diff_mat = np.tile(test_x, (num_data_set, 1)) - data_set\n distances = (diff_mat ** 2).sum(axis=1) ** 0.5\n indicies_sorted_distance = distances.argsort()\n label_counter = {}\n for i in range(k):\n voted_label = labels[indicies_sorted_distance[i]]\n label_counter[voted_label] = label_counter.get(voted_label, 0) + 1\n return sorted(label_counter.items(),\n key=operator.itemgetter(1), reverse=True)[0][0]\n\n\ndef classify(train_data, train_gt_data, valid_data, valid_gt_data=None, k=5):\n if valid_gt_data is None:\n predict_data = []\n for idx, data in enumerate(valid_data):\n predict = kNN(data, train_data, train_gt_data, k)\n predict_data.append(predict)\n if idx % 1000:\n print(predict)\n return predict_data\n else:\n correct = 0\n\n for data, gt_data in zip(valid_data, valid_gt_data):\n predict = kNN(data, train_data, train_gt_data, k)\n if gt_data == predict:\n correct += 1\n print(\"correct\")\n else:\n print(\"error\")\n print(correct / len(valid_data))\n\n\ndef split_train_valid(data, gt_data, valid_rate=0.2):\n num_train = len(data)\n num_valid = int(num_train * valid_rate)\n num_train = num_train - num_valid\n valid_data = data[num_train:]\n valid_gt_data = gt_data[num_train:]\n data = data[:num_train]\n gt_data = gt_data[:num_train]\n return data, gt_data, valid_data, valid_gt_data\n\n\ndef use_ML_03():\n valid_rate = 0.99\n train_data, gt_data, range_, min_val = read_train_data()\n # test_data = read_test_data(range_, min_val)\n # ML_03(train_data, gt_data, test_data)\n\n train_data, gt_data, valid_data, valid_gt_data = split_train_valid(train_data, gt_data, valid_rate)\n ML_03(train_data, gt_data, valid_data, valid_gt_data)\n\n\ndef use_ML_04():\n valid_rate = 0.2\n train_data, gt_data, range_, min_val = read_train_data()\n train_data, gt_data, valid_data, valid_gt_data = split_train_valid(train_data, gt_data, valid_rate)\n ML_04(train_data, gt_data, valid_data)\n\n\ndef use_KNN(test=False):\n k = 11\n train_data, gt_data, range_, min_val = read_train_data()\n if test:\n valid_rate = 0.2\n train_data, gt_data, valid_data, valid_gt_data = split_train_valid(train_data, gt_data, valid_rate)\n predict = classify(train_data, gt_data, valid_data, valid_gt_data, k=k)\n else:\n test_data = read_test_data(range_, min_val)\n predict = classify(train_data, gt_data, test_data, valid_gt_data=None, k=k)\n save_predict_data(predict, save_path=\"k={}.csv\".format(k))\n print(k)\n\n\ndef XGB_CV(train_data, train_gt, params: dict = None, scoring: str = \"f1\", cv=5):\n estimator = XGBClassifier(learning_rate=0.1, n_estimators=20, max_depth=5,\n min_child_weight=1, gamma=0, subsample=0.8,\n colsample_bytree=0.8,\n objective='binary:logistic', nthread=8, scale_pos_weight=1,\n seed=27)\n for key in list(params):\n if len(params[key]) == 1:\n setattr(estimator, key, params[key][0])\n del params[key]\n grid_search = GridSearchCV(estimator=estimator, param_grid=params, scoring=scoring, n_jobs=8, iid=False, cv=cv)\n grid_search.fit(train_data, train_gt)\n print_gridcv_result(grid_search)\n # get the best estimator\n best_estimator = grid_search.best_estimator_\n return best_estimator\n\n\ndef print_cv_result(grid_search):\n print(\"Best Estimator:\\n{}\".format(grid_search.best_estimator_))\n print(\"Best Score :{}\".format(grid_search.best_score_))\n print(\"CV Result : \\n{}\".format(pd.DataFrame(grid_search.cv_results_)[\n ['params', 'mean_test_score', 'std_test_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time',\n 'std_score_time']].sort_values(\n 'mean_test_score', ascending=False)))\n\n\ndef model_fit(model, train_df, predictors, targets, useTrainCV=False, cv_folds=5, early_stopping_rounds=50):\n if useTrainCV:\n xgb_param = model.get_xgb_params()\n xgtrain = xgb.DMatrix(train_df[predictors].values, label=train_df[targets].values)\n # xgtest = xgb.DMatrix(test_df[predictors].values)\n cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=model.get_params()['n_estimators'], nfold=cv_folds,\n early_stopping_rounds=early_stopping_rounds, show_progress=False)\n model.set_params(n_estimators=cvresult.shape[0])\n\n model.fit(train_df[predictors], train_df[targets], eval_metric='auc')\n\n dtrain_predictions = model.predict(train_df[predictors])\n dtrain_predprob = model.predict_proba(train_df[predictors])[:, 1]\n print(\"accuracy : %.4g\" % metrics.accuracy_score(train_df[targets].values, dtrain_predictions))\n print(\"f1 : %.4g\" % metrics.f1_score(train_df[targets].values, dtrain_predictions))\n print(\"AUC 得分 (训练集): %f\" % metrics.roc_auc_score(train_df[targets], dtrain_predprob))\n\n feat_imp = pd.Series(model.get_booster().get_fscore()).sort_values(ascending=False)\n feat_imp.plot(kind='bar', title='Feature Importances')\n plt.ylabel('Feature Importance Score')\n plt.show()\n\n\ndef use_xg(data_path=r\"\",\n predict=['term', 'interest_rate', 'grade', 'credit_score'],\n target=[\"loan_status\"]):\n train = pd.read_csv(data_path)\n xgb = XGBClassifier(\n learning_rate=0.1,\n n_estimators=140,\n max_depth=5,\n min_child_weight=1,\n gamma=0,\n subsample=0.8,\n colsample_bytree=0.8,\n objective='binary:logistic',\n nthread=4,\n scale_pos_weight=1,\n seed=27)\n model_fit(xgb, train, predict, target)\n\n\ndef use_xgb_cv():\n train = pd.read_csv(r\"\")\n # feature_names = ['term', 'interest_rate', 'grade', 'credit_score', 'purpose', 'loan_amnt', 'application_type',\n # 'employment_length']\n feature_names = ['loan_amnt', 'term', 'interest_rate', 'employment_length', 'credit_score', 'grade_A1', 'grade_A2',\n 'grade_A3', 'grade_A4', 'grade_A5',\n 'grade_B1', 'grade_B2', 'grade_B3', 'grade_B4', 'grade_B5', 'grade_C1', 'grade_C2', 'grade_C3',\n 'grade_C4', 'grade_C5', 'grade_D1', 'grade_D2', 'grade_D3', 'grade_D4',\n 'grade_D5', 'grade_E1', 'grade_E2', 'grade_E3', 'grade_E4', 'grade_E5', 'grade_F3', 'grade_F5',\n 'purpose_car', 'purpose_credit_card', 'purpose_debt_consolidation',\n 'purpose_home_improvement', 'purpose_house', 'purpose_major_purchase', 'purpose_medical',\n 'purpose_moving', 'purpose_other', 'purpose_small_business',\n 'application_type_Individual', 'application_type_Joint App']\n train_data = train[feature_names]\n train_gt = train[[\"loan_status\"]]\n print(\"num of all data : {}\".format(len(train_data)))\n # read_train_data()\n valid_rate = 0.5\n num_valid = int(len(train_data) * valid_rate)\n valid_data = train_data.loc[:num_valid]\n valid_gt = train_gt.loc[:num_valid]\n\n train_data = train_data.loc[num_valid:]\n train_gt = train_gt.loc[num_valid:]\n print(\"num of train dataset : {}\".format(len(train_data)))\n print(\"num of validation dataset : {}\".format(num_valid))\n\n # find best model\n params = {'max_depth': [22],\n 'min_child_weight': [2],\n 'gamma': [0],\n 'subsample': [0.85],\n 'colsample_bytree': [0.8],\n 'learning_rate': [0.0003],\n 'n_estimators': [125],\n \"scale_pos_weight\": [6.5, 7, 7.5]\n }\n\n best_model = XGB_CV(train_data, train_gt, scoring=\"f1\", cv=3, params=params)\n # validation\n print(\"=\" * 20 + \"validation\" + \"=\" * 20)\n predict = best_model.predict(valid_data)\n print(\"num of 1 in validation: {}\".format(np.sum(predict)))\n print(\"f1 Socre in validation: {}\".format(metrics.f1_score(valid_gt.values, predict)))\n\n # test\n test = pd.read_csv(r\"\")\n test_data = test[feature_names]\n print(\"num of test dataset : {}\".format(len(test_data)))\n predict = best_model.predict(test_data)\n print(\"num of 1 : {} in test dataset\".format(np.sum(predict)))\n\n # save predict data\n save_predict_data(predict, save_path=r\"\")\n\n\nif __name__ == '__main__':\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', 180)\n pd.set_option('max_colwidth', 100)\n use_xgb_cv()\n","repo_name":"strawsyz/straw","sub_path":"small_projects/contest/signate/signate1.py","file_name":"signate1.py","file_ext":"py","file_size_in_byte":21946,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"28131069837","text":"import multiprocessing as mp\nimport os\nimport time\n\nimport cv2\nimport serial\nfrom pypylon import pylon as py\nfrom vidgear.gears import WriteGear\n\nCAMERA_PARAMS = {\n \"Gain\": 0,\n \"ExposureTime\": 5000,\n \"TriggerMode\": \"On\",\n \"TriggerSelector\": \"FrameStart\",\n \"TriggerSource\": \"Line1\",\n \"TriggerActivation\": \"RisingEdge\",\n \"SensorReadoutMode\": \"Fast\",\n}\n\nVIDEO_PARAMS = {\n \"-input_framerate\": 25,\n \"-vcodec\": \"h264_nvenc\",\n \"-preset\": \"fast\",\n \"-rc\": \"cbr_ld_hq\",\n \"-disable_force_termination\": True,\n}\n\nCAMERA_SERIALS = [\"23047980\", \"23096298\", \"23088879\", \"23088882\"]\nKILL_SWITCH = mp.Event()\nBARRIER = mp.Barrier(len(CAMERA_SERIALS) + 1)\n\n\ndef camera(serial: str, show_video: bool = True):\n info = py.DeviceInfo()\n info.SetSerialNumber(serial)\n cam = py.InstantCamera(py.TlFactory.GetInstance().CreateFirstDevice(info))\n # Set parameters\n cam.Open()\n for k, v in CAMERA_PARAMS.items():\n setattr(cam, k, v)\n\n # Create BGR converter\n converter = py.ImageFormatConverter()\n converter.OutputPixelFormat = py.PixelType_BGR8packed\n converter.OutputBitAlignment = py.OutputBitAlignment_MsbAligned\n\n # Create video writer\n filename = os.path.join(\n \"/home/benyishay_la/Videos/20230814_charuco_calibration/\",\n f\"{serial}_100fps.mp4\",\n )\n video_writer = WriteGear(output=filename, logging=False, **VIDEO_PARAMS)\n\n # Wait for all cameras to be ready\n BARRIER.wait()\n\n # Start grabbing\n cam.StartGrabbing(py.GrabStrategy_OneByOne)\n while not KILL_SWITCH.is_set():\n with cam.RetrieveResult(\n CAMERA_PARAMS[\"ExposureTime\"], py.TimeoutHandling_ThrowException\n ) as grab_result:\n if grab_result.GrabSucceeded():\n image = converter.Convert(grab_result)\n image = image.GetArray()\n video_writer.write(image)\n if show_video:\n cv2.imshow(f\"{serial}\", image)\n cv2.waitKey(1)\n\n # Stop grabbing and close video writer\n cv2.destroyAllWindows()\n cam.StartGrabbing()\n video_writer.close()\n\n\nif __name__ == \"__main__\":\n board = serial.Serial(\"/dev/camera_trigger\", 9600)\n ps = []\n for cam_serial in CAMERA_SERIALS:\n p = mp.Process(target=camera, args=(cam_serial,), daemon=False)\n p.start()\n ps.append(p)\n time.sleep(1)\n\n BARRIER.wait()\n board.write(b\"100\")\n kill = input(\"Kill? (y): \")\n KILL_SWITCH.set()\n board.write(b\"0\")\n\n for p in ps:\n p.join()\n","repo_name":"elhananby/BraidTrigger","sub_path":"utils/grab_synchronized_video.py","file_name":"grab_synchronized_video.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35582176153","text":"from heapq import heappush,heappop\nfrom random import randint\n\nN = 4\nNN = N*N\nSHUFFLE_MAGNITUDE = 40\nALFA = '_ABCDEFGHIJKLMNO'\nMOVES = [-N,1,N,-1] # Up Right Down Left\n\nclass Fifteen:\n\tdef __init__(self,key0,key1,searched,nr):\n\t\tself.target = key1\n\t\tself.searched = searched\n\t\tself.nr = nr\n\t\tnode = Board(key0,key1)\n\t\tnode.cachedValue = node.value()\n\t\tself.queue = [node] # Priority Queue\n\n\tdef step(self):\n\t\tdef push(obj): heappush(self.queue, obj)\n\t\tdef pop(): return heappop(self.queue)\n\t\tdef empty(): return len(self.queue) == 0\n\t\tdef successors(node):\n\t\t\tresult = []\n\t\t\tfor m in range(N):\n\t\t\t\tif node.inside(m):\n\t\t\t\t\tchild = node.copy()\n\t\t\t\t\tif child.move(m): result.append(child)\n\t\t\treturn result\n\t\tnode = pop()\n\t\tif node.key0 == self.target: return node.key0\n\t\tself.searched[node.key0] = self.nr\n\t\tfor child in successors(node):\n\t\t\tif child.key0 not in self.searched: push(child)\n\t\t\telif self.searched[child.key0] != self.nr: return child.key0\n\t\treturn \"\"\n\n\tdef shuffle(self):\n\t\tlast = 99\n\t\tself.key0 = ALFA\n\t\tself.path = ''\n\t\tfor i in range(SHUFFLE_MAGNITUDE):\n\t\t\tcands = [m for m in range(N) if self.inside(m) and abs(m - last) != 2]\n\t\t\ti = randint(0, len(cands) - 1)\n\t\t\tlast = cands[i]\n\t\t\tself.move(last)\n\nclass Board:\n\tdef __init__(self, key0, key1, path=''):\n\t\tself.key0 = key0\n\t\tself.key1 = key1\n\t\tself.path = path\n\t\tself.cachedValue = self.value()\n\n\tdef copy(self): return Board(self.key0, self.key1, self.path)\n\tdef __gt__(self, other): return self.cachedValue > other.cachedValue\n\n\tdef display(self):\n\t\tresult = ''\n\t\tfor i in range(NN):\n\t\t\tif i%N == 0: result += \"\\n\"\n\t\t\tresult += ' ' + self.key0[i]\n\t\tresult += ' ' + self.key0 + ' ' + str(len(self.path)) + ' ' + self.path + ' value=' + str(self.cachedValue)\n\t\tif self.key0 == self.key1: result += \" Solved!\"\n\t\treturn result\n\n\tdef value(self):\n\t\tdef manhattan(i, j): return abs(i // N - j // N) + abs(i % N - j % N)\n\t\treturn len(self.path) + sum([manhattan(i, self.key1.index(self.key0[i])) for i in range(NN) if self.key0[i] != '_'])\n\n\tdef move(self, m):\n\t\tif not self.inside(m): return False\n\t\tloc1 = self.key0.index('_')\n\t\tloc2 = loc1 + MOVES[m]\n\n\t\tsk = list(self.key0)\n\t\tsk[loc1], sk[loc2] = sk[loc2], sk[loc1]\n\t\tself.key0 = ''.join(sk)\n\n\t\tself.path += \"URDL\"[m]\n\t\tself.cachedValue = self.value()\n\t\treturn True\n\n\tdef inside(self,m):\n\t\ti = MOVES[m]\n\t\tloc = self.key0.index('_')\n\t\tif i == -N and loc // N == 0: return False\n\t\tif i == +1 and loc % N == N-1: return False\n\t\tif i == +N and loc // N == N-1: return False\n\t\tif i == -1 and loc % N == 0: return False\n\t\treturn True\n\n","repo_name":"ChristerNilsson/Lab","sub_path":"2019/015B-PuzzleCS50/Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"81"} +{"seq_id":"11388943006","text":"import time as t\n\nclass Player:\n def __init__(self):\n pass\n\n def Play(bpm, lyrics, beats):\n bpm_v = int(bpm) #beats per minute\n tpb = 1 / (bpm_v / 60)\n\n #time per one beat (black note), d at the end means \"dot\" (multiplied by 1.5)\n b = 1\n bd = b * 1.5\n\n #double beats (white note)\n b2 = b * 2 \n b2d = b2 * 1.5\n\n #quadruple beats (whole note)\n b4 = b * 4\n b4d = b4 * 1.5\n\n #half note\n b12 = b / 2\n b12d = b12 * 1.5\n\n #quarter note\n b14 = b / 4\n b14d = b14 * 1.5\n\n #one-eighth note\n b18 = b / 8\n b18d = b18 * 1.5\n\n dur = {'b':b, 'bd':bd, 'b2':b2, 'b2d':b2d, 'b4':b4, 'b4d':b4d, \n 'b12':b12, 'b12d':b12d, 'b14':b14, 'b14d':b14d, 'b18':b18d}\n\n for i, e in enumerate(lyrics):\n if e != '*':\n print(e)\n t.sleep(dur[beats[i]] * tpb)","repo_name":"Nukoumi139/Programming-Playground","sub_path":"Python/Programs/LyricsMapper.py","file_name":"LyricsMapper.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39864519139","text":"import os\n#from posix import listdir\nimport shutil\n\nqianzhui = \"\"\nlistStr = ['python Augment_script.py --rotate=','',' --root=./Data'] \n# 把文件src_file移动到目录dest_file\ndef move(src_file, dest_file):\n for src in src_file:\n for dest in dest_file:\n shutil.copy(src, dest)\n\ndef createRotate(min,max):\n listStr = ['python Augment_script.py --rotate=','',' --root=./Data'] \n for i in range(min,max):\n listStr[1] = str(i)\n cmd = ''.join(listStr)\n print(os.system(cmd))\n list = os.listdir('./Data/change_JPEGImages') # 列出文件夹下所有的目录与文件\n for j in range(0, len(list)):\n paths = os.path.join('./Data/change_JPEGImages/', list[j])\n os.rename(paths,\"./Data/jpg/\"+qianzhui+str(j)+'0'+str(i)+\".jpg\")\n list = os.listdir('./Data/change_Annotations') # 列出文件夹下所有的目录与文件\n for j in range(0, len(list)):\n paths = os.path.join('./Data/change_Annotations/', list[j])\n os.rename(paths,\"./Data/xml/\"+qianzhui+str(j)+'0'+str(i)+\".xml\")\n\ndef createNoise():\n listStr = ['python Augment_script.py --root=./Data --Noise NOISE'] \n for i in range(0,1):\n cmd = ''.join(listStr)\n print(os.system(cmd))\n list = os.listdir('./Data/change_JPEGImages') # 列出文件夹下所有的目录与文件\n for j in range(0, len(list)):\n paths = os.path.join('./Data/change_JPEGImages/', list[j])\n os.rename(paths,\"./Data/jpg/\"+qianzhui+str(j)+'0'+str(i)+\".jpg\")\n list = os.listdir('./Data/change_Annotations') # 列出文件夹下所有的目录与文件\n for j in range(0, len(list)):\n paths = os.path.join('./Data/change_Annotations/', list[j])\n os.rename(paths,\"./Data/xml/\"+qianzhui+str(j)+'0'+str(i)+\".xml\")\n \n\nqianzhui = input(\"请输入文件名前缀:\")\nif not os.path.exists('./Data/xml'):\n os.mkdir('./Data/xml')\nif not os.path.exists('./Data/jpg'):\n os.mkdir('./Data/jpg')\n\n#createNoise()\ncreateRotate(0,15)\n#createRotate(listStr,80,100)\n#createRotate(listStr,170,190)\n#createRotate(listStr,260,280)\ncreateRotate(345,359)\ninput()\nos.removedirs(\"./Data/change_Annotations\")\nos.removedirs(\"./Data/change_JPEGImages\")\n ","repo_name":"tualatinlz/NUEDC","sub_path":"tools/tools-VOC-master/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"17881075003","text":"#!/usr/bin/python\nimport dataset\nimport bson\nimport io\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom skimage.data import imread\nfrom src.freezing import inception\nfrom src.common import consts\nfrom src.data_preparation.tf_record_utils import bytes_feature, float_feature, int64_feature\n\n\ndef convert_bson_2_record(input_bson_filename, output_tfrecords_filename, n=None, inception_feature=False):\n one_hot_encoder, _ = dataset.one_hot_label_encoder(csv_path=\"data/category_names.csv\")\n\n # inception_graph = tf.Graph()\n # inception_sess = tf.Session(graph=inception_graph)\n #\n # with inception_graph.as_default(), inception_sess.as_default():\n # inception_model = inception.inception_model()\n\n z = 0\n data = bson.decode_file_iter(open(input_bson_filename, 'rb'))\n opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)\n\n # def get_inception_ouput(img):\n # with inception_graph.as_default():\n # inception_output = inception_model(inception_sess, img).reshape(-1).tolist()\n # return inception_output\n\n with tf.python_io.TFRecordWriter(output_tfrecords_filename, options=opts) as writer:\n for c, d in tqdm(enumerate(data), total=n):\n n_img = len(d['imgs'])\n for index in range(n_img):\n img_raw = d['imgs'][index]['picture']\n # img = np.array(imread(io.BytesIO(img_raw)))\n # height = img.shape[0]\n # width = img.shape[1]\n product_id = d['_id']\n _feature = {\n '_id': int64_feature(product_id),\n consts.IMAGE_RAW_FIELD: bytes_feature(img_raw)\n }\n # if inception_feature:\n # inception_feature_ = get_inception_ouput(img_raw)\n # _feature[consts.INCEPTION_OUTPUT_FIELD] = float_feature(inception_feature_)\n if 'category_id' in d:\n _feature[consts.LABEL_ONE_HOT_FIELD] = int64_feature(int(one_hot_encoder([str(d['category_id'])])[0]))\n example = tf.train.Example(features=tf.train.Features(feature=_feature))\n writer.write(example.SerializeToString())\n\n z = z + 1\n if (n is not None) and (z % n == 0):\n print(z)\n break\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', dest=\"bson_filename\", type=str, required=True, help='the input file in bson format')\n parser.add_argument('-o', dest=\"tfrecord_filename\", type=str, required=True, help='the output file in tfrecrods format')\n parser.add_argument('-n', dest=\"total_records\", type=int, required=False, help='number of records to convert.')\n args = parser.parse_args()\n\n convert_bson_2_record(args.bson_filename, args.tfrecord_filename, inception_feature=False, n=args.total_records)\n","repo_name":"Zhenxingzhang/kaggle-cdiscount-classification","sub_path":"src/data_preparation/convert_bson_2_tfrecord.py","file_name":"convert_bson_2_tfrecord.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"73387031306","text":"\"\"\"\nФайл с кнопками для клавиатур.\n\"\"\"\n\nfrom copy import deepcopy\n\nfrom app.tbot.extensions import ButtonTemplate\n\nTEMPLATES = \\\n {\n 'review_form_send_to_boss': ButtonTemplate('form_send_to_boss', 'Отправить руководителю 📨'),\n 'review_form_achievements_list': ButtonTemplate('achievements', 'Достижения'),\n 'review_form_back_achievements': ButtonTemplate('achievements', '« К достижениям'),\n 'review_form_achievements_add': ButtonTemplate('achievements_add', 'Добавить'),\n 'review_form_achievement_delete': ButtonTemplate('achievement_delete'),\n 'review_form_achievement_edit': ButtonTemplate('achievement_edit'),\n 'review_form_achievements_delete_choose': ButtonTemplate('achievements_delete_choose', 'Удалить'),\n 'review_form_achievements_edit_choose': ButtonTemplate('achievements_edit_choose', 'Изменить'),\n 'review_send_coworkers': ButtonTemplate('boss_accept', 'Отправить коллегам 📨'),\n 'review_form_fails': ButtonTemplate('fails', 'Провалы'),\n 'review_form_back_fails': ButtonTemplate('fails', '« К провалам'),\n 'review_form_fails_add': ButtonTemplate('fails_add', 'Добавить'),\n 'review_form_fail_delete': ButtonTemplate('fail_delete'),\n 'review_form_fail_edit': ButtonTemplate('fail_edit'),\n 'review_form_fails_delete_choose': ButtonTemplate('fails_delete_choose', 'Удалить'),\n 'review_form_fails_edit_choose': ButtonTemplate('fails_edit_choose', 'Изменить'),\n 'review_form': ButtonTemplate('form', '« К анкете'),\n 'review_to_form': ButtonTemplate('form', 'Анкета'),\n 'review_form_duties_list': ButtonTemplate('duties', 'Обязанности'),\n 'review_form_back_duties': ButtonTemplate('duties', '« К обязанностям'),\n 'review_form_duties_add': ButtonTemplate('duties_add', 'Добавить'),\n 'review_form_duty_delete': ButtonTemplate('duty_delete'),\n 'review_form_duty_edit': ButtonTemplate('duty_edit'),\n 'review_form_duties_delete_choose': ButtonTemplate('duties_delete_choose', 'Удалить'),\n 'review_form_duties_edit_choose': ButtonTemplate('duties_edit_choose', 'Изменить'),\n 'review_form_project_delete_contact': ButtonTemplate('project_delete_contact'),\n 'review_form_projects_list': ButtonTemplate('projects', 'Проекты'),\n 'review_form_back_projects': ButtonTemplate('projects', '« К проектам'),\n 'review_form_project_add': ButtonTemplate('project_add', 'Добавить проект'),\n 'review_form_project_contacts_on_create': ButtonTemplate('con_add', 'Оценивающие'),\n 'review_form_project_contacts_on_create_dep': ButtonTemplate('con_dep', 'Другие отделы'),\n 'review_form_project_contacts_on_create_done': ButtonTemplate('projects', 'Следующий шаг'),\n 'review_form_project_delete': ButtonTemplate('project_delete'),\n 'review_form_project_edit': ButtonTemplate('project_edit'),\n 'review_form_project_edit_name': ButtonTemplate('project_edit_name', 'Название'),\n 'review_form_project_edit_description': ButtonTemplate('project_edit_description', 'Роль и результаты'),\n 'review_form_project_delete_choose': ButtonTemplate('project_delete_choose', 'Удалить проект'),\n 'review_form_project_edit_choose': ButtonTemplate('project_edit_choose', 'Изменить проект'),\n 'review_form_back_projects_list': ButtonTemplate('projects', '« К Проектам'),\n 'review_form_projects_examples': ButtonTemplate('projects', 'Пример').add(example=True),\n 'review_form_projects_descriptions': ButtonTemplate('projects', '« К описанию'),\n 'review_form_project_contacts': ButtonTemplate('project_contacts', 'Оценивающие'),\n 'review_form_project_add_contact': ButtonTemplate('project_add_contact', 'Добавить'),\n 'review_form_project_delete_choose_contact': ButtonTemplate('project_delete_choose_contact',\n 'Удалить'),\n 'review_form_project_edit_choose_contact': ButtonTemplate('project_edit_choose_contact',\n 'Изменить'),\n 'review_form_add_contact_in_current_project': ButtonTemplate('add_contact_in_current_project',\n 'Добавить'),\n 'review_form_project_edit_contact': ButtonTemplate('project_change_contact'),\n 'review_form_project_contacts_back': ButtonTemplate('project_contacts', '« Назад'),\n 'review_form_project_edit_back': ButtonTemplate('project_edit', '« Назад'),\n\n 'boss_review_accept': ButtonTemplate('boss_accept', 'Принять'),\n 'boss_review_decline': ButtonTemplate('boss_decline', 'На доработку'),\n 'boss_review_form': ButtonTemplate('boss_form'),\n 'boss_review_list': ButtonTemplate('boss_review_list', '« К списку'),\n 'boss_review_update_list': ButtonTemplate('boss_review_list', 'Обновить список'),\n 'boss_review_sort_asc': ButtonTemplate('boss_review_list', '🔺').add(asc=True),\n 'boss_review_sort_desc': ButtonTemplate('boss_review_list', '🔻').add(asc=False),\n 'boss_review_to_form': ButtonTemplate('boss_form', 'Посмотреть анкету'),\n 'boss_review_to_list': ButtonTemplate('boss_review_list', 'Список анкет'),\n\n 'coworker_review_form': ButtonTemplate('coworker_form', 'Анкета'),\n 'coworker_review_list': ButtonTemplate('coworker_review_list', '« К анкете'),\n 'coworker_projects': ButtonTemplate('coworker_projects', 'Оценить проекты'),\n 'coworker_project': ButtonTemplate('coworker_project'),\n 'coworker_rate': ButtonTemplate('coworker_rate', 'Оценить'),\n 'coworker_back_form': ButtonTemplate('coworker_form', '« К анкете'),\n 'coworker_back_projects': ButtonTemplate('coworker_projects', '« К проектам'),\n 'coworker_comment': ButtonTemplate('coworker_comment', 'Изменить комментарий'),\n 'coworker_review_todo': ButtonTemplate('coworker_review_todo', 'Что делать'),\n 'coworker_review_not_todo': ButtonTemplate('coworker_review_not_todo', 'Что перестать делать'),\n 'coworker_review_form_send_to_hr': ButtonTemplate('form_send_to_hr', 'Отправить HR'),\n 'coworker_review_sort_asc': ButtonTemplate('coworker_review_list', '🔺').add(asc=True),\n 'coworker_review_sort_desc': ButtonTemplate('coworker_review_list', '🔻').add(asc=False),\n 'coworker_review_update_list': ButtonTemplate('coworker_review_list', 'Обновить список'),\n 'coworkers_review_to_form': ButtonTemplate('coworker_form', 'Анкета коллеги'),\n 'coworkers_review_to_list': ButtonTemplate('coworker_review_list', 'Список анкет на проверку'),\n 'coworker_review_advices': ButtonTemplate('advices', 'Советы'),\n 'coworker_review_advices_todo': ButtonTemplate('advices', 'Что делать?'),\n 'coworker_review_advices_not_todo': ButtonTemplate('advices', 'Что перестать делать?'),\n 'coworker_review_back_advices': ButtonTemplate('advices', '« К советам'),\n 'coworker_review_advices_add': ButtonTemplate('advices_add', 'Добавить'),\n 'coworker_review_advices_delete': ButtonTemplate('advices_delete'),\n 'coworker_review_advices_edit': ButtonTemplate('advices_edit'),\n 'coworker_review_advices_delete_choose': ButtonTemplate('advices_delete_choose', 'Удалить'),\n 'coworker_review_advices_edit_choose': ButtonTemplate('advices_edit_choose', 'Изменить'),\n 'coworker_choose_rate': ButtonTemplate('coworker_choose_rate', 'Изменить оценку'),\n 'coworker_back_project': ButtonTemplate('coworker_project', '« Назад'),\n 'copy_last_form': ButtonTemplate('copy_last_form', 'Скопировать предыдущую форму'),\n 'review_form_duty': ButtonTemplate('duty', 'Обязанности'),\n 'review_form_project_edit_contacts': ButtonTemplate('project_edit_choose_contact', 'Контакты'),\n\n 'hr_review_list': ButtonTemplate('hr_review_list', '« К списку'),\n 'hr_review_form': ButtonTemplate('hr_form'),\n 'hr_review_accept': ButtonTemplate('hr_review_accept', 'Принять'),\n 'hr_review_decline': ButtonTemplate('hr_review_decline', 'Отклонить'),\n 'hr_review_todo': ButtonTemplate('hr_todo', 'Что начать делать'),\n 'hr_review_not_todo': ButtonTemplate('hr_todo', 'Что перестать делать'),\n 'hr_review_ratings': ButtonTemplate('hr_ratings', 'Оценки'),\n 'hr_review_back_to_form': ButtonTemplate('hr_form', '« Назад'),\n 'hr_review_back_to_form_name': ButtonTemplate('hr_form', '« К анкете'),\n 'hr_review_comment_rating': ButtonTemplate('hr_comment_rating'),\n 'hr_review_back_to_decline': ButtonTemplate('hr_review_decline', '« Назад'),\n 'hr_review_send_back': ButtonTemplate('hr_send_back', 'Вернуть форму'),\n 'hr_review_update_list': ButtonTemplate('hr_review_list', 'Обновить список'),\n 'hr_review_sort_asc': ButtonTemplate('hr_review_list', '🔺').add(asc=True),\n 'hr_review_sort_desc': ButtonTemplate('hr_review_list', '🔻').add(asc=False),\n 'hr_review_to_form': ButtonTemplate('hr_form', 'Анкета с оценками'),\n 'hr_review_to_list': ButtonTemplate('hr_review_list', 'Список анкет на проверку'),\n 'hr_advices_edit': ButtonTemplate('hr_advices_edit'),\n\n 'get_position': ButtonTemplate('get_position'),\n 'get_department': ButtonTemplate('get_department'),\n\n 'request_view': ButtonTemplate('request_view'),\n 'request_list_view': ButtonTemplate('request_list_view'),\n 'request_delete_view': ButtonTemplate('request_delete_view', 'Удалить'),\n 'request_accept_view': ButtonTemplate('request_accept_view', 'Принять'),\n 'request_view_back': ButtonTemplate('request_view_back', 'Назад'),\n 'request_delete': ButtonTemplate('request_delete', 'Удалить'),\n 'cancel_deletion': ButtonTemplate('cancel_deletion', 'Отмена'),\n 'to_request': ButtonTemplate('to_request', 'К заявке'),\n\n 'user_view': ButtonTemplate('user_view'),\n 'user_list_view': ButtonTemplate('user_list_view'),\n 'user_delete_view': ButtonTemplate('user_delete_view', 'Удалить'),\n 'user_view_back': ButtonTemplate('user_view_back', 'Назад'),\n 'choose_dep': ButtonTemplate('choose_dep', 'Назад'),\n 'user_edit_view': ButtonTemplate('user_edit_view', 'Редактировать'),\n 'user_delete': ButtonTemplate('user_delete', 'Удалить'),\n 'cancel_user_delete': ButtonTemplate('cancel_user_delete', 'Отмена'),\n 'back_to_user': ButtonTemplate('back_to_user', 'Назад'),\n\n 'user_edit_fullname': ButtonTemplate('edit_fullname', 'ФИО'),\n 'user_edit_role': ButtonTemplate('user_edit_role', 'Роль'),\n 'user_edit_position': ButtonTemplate('user_edit_position', 'Должность'),\n 'user_edit_boss': ButtonTemplate('user_edit_boss', 'Руководитель'),\n 'user_edit_department': ButtonTemplate('user_edit_department', 'Отдел'),\n 'edit_position': ButtonTemplate('edit_position'),\n 'edit_department': ButtonTemplate('edit_department'),\n 'edit_role': ButtonTemplate('edit_role'),\n 'back_to_edit': ButtonTemplate('back_to_edit', 'Назад'),\n\n 'review_period_start': ButtonTemplate('review_period_start', 'Запуск'),\n 'review_period_stop': ButtonTemplate('review_period_stop', 'Остановка'),\n\n 'forms_list': ButtonTemplate('forms_list'),\n 'get_old_review': ButtonTemplate('get_old_review'),\n 'old_review_list': ButtonTemplate('old_review_list'),\n 'back_to_old_review_list': ButtonTemplate('back_to_old_review_list', 'Назад'),\n 'get_rapport': ButtonTemplate('get_rapport'),\n 'back_to_rapport': ButtonTemplate('back_to_rapport', 'Назад'),\n 'back_to_form': ButtonTemplate('back_to_form', 'Назад'),\n 'get_hr_rapport': ButtonTemplate('get_hr_rapport', 'Для HR'),\n 'get_boss_rapport': ButtonTemplate('get_boss_rapport', 'Для руководителя'),\n 'send_rapport_to_boss': ButtonTemplate('send_rap_to_boss', 'Отправить руководителю'),\n 'employee_review': ButtonTemplate('employee_review'),\n 'input_summary': ButtonTemplate('input_summary', 'Написать отчет'),\n 'change_summary': ButtonTemplate('input_summary', 'Изменить отчет'),\n 'get_current_rapport': ButtonTemplate('get_current_rapport', 'Выгрузить анкету'),\n 'current_forms_list': ButtonTemplate('current_forms_list', 'Список анкет'),\n 'current_forms_list_back': ButtonTemplate('current_forms_list', 'Назад'),\n\n }\n\n\nclass AdapterTemplates:\n\n def __getitem__(self, key):\n return deepcopy(TEMPLATES[key])\n\n\nBUTTONS_TEMPLATES = AdapterTemplates()\nGENERAL_BUTTONS = \\\n {\n ''\n }\n\n__all__ = ['BUTTONS_TEMPLATES']\n","repo_name":"gagpa/PerformanceReview__mark","sub_path":"app/tbot/storages/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":13903,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"7393994415","text":"from tcutils.types import *\n\n\ndef object_vars(obj: typing.Any) -> typing.List[str]:\n \"\"\"Get all object variables that do not start with underscore '_'\n and are not callable.\n \"\"\"\n properties = dir(obj)\n non_callable = [property_name for property_name in properties if not (\n callable(getattr(obj, property_name)) or property_name.startswith('_'))]\n return non_callable\n","repo_name":"ktomala/tcutils","sub_path":"tcutils/objects/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"30895206234","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Pradeep Jairamani , github.com/pradeepjairamani\n# Service Scanner (Product and Version Detection)\n\nimport threading\nimport socket\nimport socks\nimport ssl\nimport time\nimport re\n\nfrom lib.socks_resolver.engine import getaddrinfo\n\nresult_dict = {}\nexternal_run_values = []\n\nports_services_and_condition = {\n \"http\": [\"HTTP\", [\"OK\", \"CREATED\", \"No Content\", \"Moved Permanently\", \"Found\", \"Not Modified\", \"Permanent Redirect\"\n \"Bad Request\", \"Unauthorized\", \"Payment Required\", \"Forbidden\", \"Not Found\", \"Method Not Allowed\",\n \"Not Acceptable\", \"Request Timeout\", \"Unsupported Media Type\", \"Too Many Requests\",\n \"Internal Server Error\", \"Bad Gateway\", \"Service Unavailable\", \"Gateway Timeout\",\n \"444 No Response\"],\n ],\n \"ftp\": [\"FTP\", [\"214\", \"220\", \"530\", \"230\", \"502\", \"500\"]],\n \"ssh\": [\"SSH\"],\n \"telnet\": [\"Telnet\"],\n \"smtp\": [\"SMTP\", [\"220\", \"554\", \"250\"]],\n \"imap\": [\"IMAP\"],\n \"mariadb\": [\"MariaDB\"],\n \"mysql\": [\"MySQL\"],\n \"PostgreSQL\": [\"PostgreSQL\"],\n \"ILC 150 GSM/GPRS|pcworx\": [\"ILC 150 GSM/GPRS\"],\n \"RTSP\": [\"RTSP\"],\n \"pptp\": [[\"Firmware:\", \"Hostname:\", \"Vendor:\", \"pptp\"]],\n \"rsync\": [[\"rsync\", \"RSYNC\"]],\n \"portmap\": [\"Portmap\"],\n}\n\nports_services_or_condition = {\n \"http\": [\"400 Bad Request\", \"401 Unauthorized\", \"302 Found\", \"Server: cloudflare\", \"Server: nginx\",\n \"Content-Length:\", \"Content-Type:\", \"text/html\", \"application/json\", \"multipart/form-data\",\n \"Access-Control-Request-Headers\", \"Forwarded: for=\", \"Proxy-Authorization:\", \"User-Agent:\",\n \"X-Forwarded-Host\", \"Content-MD5\", [\"HTTP\", \"Authorization\"], \"Access-Control-Request-Method\",\n \"Accept-Language\", \"HTTP\", \"404 Not Found\", \"HTML\", \"Apache\"],\n \"ftp\": [[\"Pure-FTPd\", \"----------\\r\\n\"], \"\\r\\n220-You are user number\", [\"orks FTP server\", \"VxWorks VxWorks\"],\n \"530 USER and PASS required\", \"Server ready.\\r\\n5\", \"Invalid command: try being more creative\",\n \"220 Hotspot FTP server (MikroTik 6.27) ready\", \"220 SHARP MX-M264N Ver 01.05.00.0n.16.U FTP server.\",\n \"220 Microsoft FTP Service\", \"220 FTP Server ready.\", \"220 Microsoft FTP Service\",\n \"220 Welcome to virtual FTP service.\", \"220 DreamHost FTP Server\",\n \"220 FRITZ!BoxFonWLAN7360SL(UI) FTP server ready.\", \"Directory status\",\n \"Service closing control connection\", \"Requested file action\", \"Connection closed; transfer aborted\",\n \"Requested file action not taken\", \"Directory not empty\"],\n \"ssh\": [\"openssh\", \"-OpenSSH_\", \"\\r\\nProtocol mism\", \"_sshlib GlobalSCAPE\\r\\n\",\n \"\\x00\\x1aversion info line too long\",\"SSH Windows NT Server\", \"WinNT sshd\", \"Secure sshd\",\n \"sshd\", \"SSH Secure Shell\", \"WinSSHD\"],\n \"telnet\": [\"Welcome to Microsoft Telnet Service\", \"no decompiling or reverse-engineering shall be allowed\",\n \"is not a secure protocol\", \"recommended to use Stelnet\", \"Login authentication\",\n \"*WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING*\", \"NetportExpress\",\n \"Closing Telnet connection due to host problems\", \"No more connections are allowed to telnet server\",\n \"Raptor Firewall Secure Gateway\", \"Check Point FireWall-1 authenticated Telnet server running on\"],\n \"smtp\": [\"Server ready\", \"SMTP synchronization error\", \"220-Greetings\", \"ESMTP Arnet Email Security\", \"SMTP 2.0\",\n \"Fidelix Fx2020\", \"ESMTP\"],\n \"imap\": [\"BAD Error in IMAP command received by server\", \"IMAP4rev1 SASL-IR\", \"OK [CAPABILITY IMAP4rev1\",\n \"OK IMAPrev1\", \"LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE IDLE NAMESPACE AUTH=PLAIN AUTH=LOGIN]\",\n \"CAPABILITY completed\"\n \"LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE IDLE AUTH=PLAIN AUTH=LOGIN AUTH=DIGEST-MD5 AUTH=CRAM-MD5]\",\n \"Internet Mail Server\", \"IMAP4 service\", \"BYE Hi This is the IMAP SSL Redirect\"],\n \"mariadb\": [\"is not allowed to connect to this MariaDB server\"],\n \"mysql\": [\"is not allowed to connect to this MySQL server\"],\n \"PostgreSQL\": [\"fe_sendauth: no password supplied\", \"no pg_hba.conf entry for host\",\n \"received invalid response to SSL negotiation:\", \"unsupported frontend protocol\",\n \"FATAL 1: invalid length of startup packet\",],\n \"ILC 150 GSM/GPRS|pcworx\": [\"PLC Type: ILC 150 GSM/GPRS\", \"Model Number: 2916545\", \"Firmware Version: 3.93\",\n \"Firmware Version: 3.71\", \"Firmware Version: 3.70\", \"Firmware Date:\", \"Firmware Time:\"],\n \"RTSP\": [\"RTSP/1.0 401 Unauthorized\", \"RTSP/1.0 200 OK\", \"WWW-Authenticate:\", 'Basic realm=\"device\"',\n \"Server: Dahua Rtsp Server\", \"Server: Rtsp Server/2.0\", \"RTSP/1.0 404 Not Found\"],\n \"pptp\": [\"Firmware: 1\", \"Hostname: pptp server\", \"Vendor: BRN\", \"Vendor: Fortinet pptp\", \"Vendor: AMIT\"],\n \"rsync\": [\"@RSYNCD: 30.0\", \"@RSYNCD: EXIT\"],\n \"Portmap\": [\"Program\", \"Program\tVersion\tProtocol\tPort\", \"portmapper\", \"status\t1\", \"nfs\t2\",\n \"nlockmgr\t1\"],\n \"antivir\": [\"Symantec AntiVirus Scan Engine\", \"antivirus\", \"NOD32 AntiVirus\", \"NOD32SS\"],\n \"nntp\": [\"NetWare-News-Server\", \"NetWare nntpd\", \"nntp\", \"Leafnode nntpd\", \"InterNetNews NNRP server INN\"],\n \"pop3\": [\"POP3\", \"POP3 gateway ready\", \"POP3 Server\", \"Welcome to mpopd\", \"OK Hello there\"],\n}\n\nports_services_regex = {\n \"http\": [\"HTTP\\/[\\d.]+\\s+[\\d]+\", ], # checks for any pattern of type HTTP/1.0 200 OK, etc.\n \"ftp\": [\"FTP\\/[\\d.]+\\s+[\\d]+\"], # similar to above in HTTP\n \"ssh\": [\"SSH-([\\d.]+)-OpenSSH_([\\w._-]+)[ -]{1,2}Debian[ -_](.*ubuntu.*)\", ],\n \"mysql\": [\".\\0\\0\\0\\xff..Host .* is not allowed to connect to this MySQL server$\", ],\n \"mariadb\": [\"[\\d.]+[\\d][\\d]-MariaDB\", ],\n}\n\n\ndef recv_all(s, limit=4196):\n \"\"\"\n receive all data from a socket\n Args:\n s: python socket\n limit: limit size to get response\n Returns:\n response or b\"\"\n \"\"\"\n response = \"\"\n while len(response) < limit:\n try:\n r = s.recv(1)\n if r != b\"\":\n response += r.decode()\n else:\n break\n except Exception as _:\n break\n return response\n\n\ndef discover_by_port(host, port, timeout, send_data, socks_proxy, external_run=False):\n \"\"\"\n request a port to scan and check for existing signatures to discover the service\n Args:\n host: host to scan\n port: port to scan\n timeout: timeout second\n send_data: data to send to port\n socks_proxy: socks proxy\n external_run: if you run this from other module or not calling it from discovery function, you must set\n external_run as True\n Returns:\n discovered services and ports in JSON dict\n \"\"\"\n\n ssl_flag = False\n if socks_proxy is not None:\n socks_version = socks.SOCKS5 if socks_proxy.startswith(\n 'socks5://') else socks.SOCKS4\n socks_proxy = socks_proxy.rsplit('://')[1]\n if '@' in socks_proxy:\n socks_username = socks_proxy.rsplit(':')[0]\n socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]\n socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),\n int(socks_proxy.rsplit(':')[-1]), username=socks_username,\n password=socks_password)\n socket.socket = socks.socksocket\n socket.getaddrinfo = getaddrinfo\n else:\n socks.set_default_proxy(socks_version, str(\n socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))\n socket.socket = socks.socksocket\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(timeout)\n sock.connect((host, port))\n except Exception as _:\n return None\n try:\n sock = ssl.wrap_socket(sock)\n ssl_flag = True\n except Exception as _:\n # No SSL Support for Service\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(timeout)\n sock.connect((host, port))\n except Exception:\n return None\n data1 = recv_all(sock)\n try:\n sock.send(send_data)\n except Exception as _:\n pass\n final_data = recv_all(sock) + data1 # print( \"PORT : \" + str(port) +final_data)\n for service in ports_services_and_condition:\n FLAG = True\n c = 0\n for signature in ports_services_and_condition[service]:\n if isinstance(signature, list):\n OFLAG = True\n for s in ports_services_and_condition[service][c]:\n if s in final_data:\n OFLAG = False\n if OFLAG:\n FLAG = False\n else:\n if signature not in final_data:\n FLAG = False\n if FLAG:\n if ssl_flag:\n result_dict[port] = service + \"/ssl\"\n else:\n result_dict[port] = service\n c += 1\n for service in ports_services_or_condition:\n FLAG = False\n c = 0\n for signature in ports_services_or_condition[service]:\n if isinstance(signature, list):\n OFLAG = True\n for s in ports_services_or_condition[service][c]:\n if s not in final_data:\n OFLAG = False\n if OFLAG:\n FLAG = True\n else:\n if signature in final_data:\n FLAG = True\n if FLAG:\n if ssl_flag:\n result_dict[port] = service + \"/ssl\"\n else:\n result_dict[port] = service\n c += 1\n for service in ports_services_regex:\n for signature in ports_services_regex[service]:\n try:\n pattern = re.compile(signature)\n if pattern.match(final_data):\n if ssl_flag:\n result_dict[port] = service + \"/ssl\"\n else:\n result_dict[port] = service\n except Exception as _:\n pass\n try:\n result_dict[port]\n except Exception as _:\n result_dict[port] = \"UNKNOWN\"\n if external_run and port not in external_run_values:\n external_run_values.append(port)\n return result_dict[port]\n\n\ndef discovery(target, ports=None, timeout=3, thread_number=1000, send_data=None, time_sleep=0, socks_proxy=None):\n \"\"\"\n Discover the service run on the port, it can detect real service names when users change default port number\n Args:\n target: target to scan\n ports: ports in array, or if None it will test 1000 common ports\n timeout: timeout seconds\n thread_number: thread numbers\n send_data: data to send by socket, if None it will send b\"ABC\\x00\\r\\n\" * 10 by default\n time_sleep: time to sleep between requests\n socks_proxy: socks proxy\n Returns:\n discovered services and ports in JSON dict\n \"\"\"\n\n threads = []\n if not send_data:\n send_data = b\"ABC\\x00\\r\\n\" * 10\n if not ports:\n from lib.scan.port.engine import extra_requirements_dict as port_scanner_default_ports\n ports = port_scanner_default_ports()[\"port_scan_ports\"]\n for port in ports:\n t = threading.Thread(target=discover_by_port,\n args=(target, int(port), int(timeout), send_data, socks_proxy))\n threads.append(t)\n t.start()\n time.sleep(time_sleep)\n while 1:\n try:\n if threading.activeCount() <= thread_number:\n break\n time.sleep(0.01)\n except KeyboardInterrupt:\n break\n kill_switch = 0\n while 1:\n time.sleep(0.01)\n kill_switch += 1\n try:\n if threading.activeCount() is 1 or int(kill_switch) is int(timeout * 5 * 10):\n break\n except KeyboardInterrupt:\n break\n return result_dict\n","repo_name":"otaboyevsardorbek1/Automated-Penetration-Testing-Framework","sub_path":"lib/payload/scanner/service/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":12259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"74985134024","text":"import math\ndegrees = 30\nx = math.sin(degrees / 360.0 * 2 * math.pi)\nprint(x)\n\ny = math.exp(math.log(x + 1))\nprint(y)\n\ndef print_lyrics():\n print(\"im a lumberjack\")\n print(\"and im okay\")\n\ndef repeat_lyrics():\n print_lyrics()\n print_lyrics()\n\nrepeat_lyrics()\n\n\n\ndef right_justify(t):\n s = len(t)\n z = ' ' * (70 - s) + t\n print(z)\n return z\n\n\nright_justify('hello')\nright_justify('hellohello')","repo_name":"srujanjayati/testing123","sub_path":"think-python-oreilly/composition.py","file_name":"composition.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29556183412","text":"import matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\n\n\nclass DDPM(nn.Module):\n def __init__(self,\n network,\n n_steps,\n device,\n beta_start=1e-4,\n beta_end=2e-2):\n super(DDPM, self).__init__()\n\n self.network = network.to(device)\n self.device = device\n\n self.n_steps = n_steps\n\n self.betas = torch.linspace(beta_start, beta_end, n_steps).to(device)\n\n self.alphas = 1 - self.betas\n self.sqrt_alphas = torch.sqrt(self.alphas)\n\n self.alphas_cumprod = torch.cumprod(self.alphas, -1)\n self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)\n self.sqrt_alphas_cumprod_compl = torch.sqrt(1 - self.alphas_cumprod)\n\n def add_noise(self, input, noise, timesteps):\n a = self.sqrt_alphas_cumprod[timesteps].reshape(-1, 1, 1, 1)\n b = self.sqrt_alphas_cumprod_compl[timesteps].reshape(-1, 1, 1, 1)\n return a * input + b * noise\n\n def reverse(self, noisy, timesteps):\n return self.network(noisy, timesteps)\n\n def sample(self, n_samples, n_channels, h, w, writer=None):\n cur = torch.randn(n_samples, n_channels, h, w).to(self.device)\n\n timesteps = torch.linspace(self.n_steps - 1, 0, self.n_steps).long().to(self.device)\n for t in tqdm(timesteps, total=len(timesteps)):\n time = torch.ones((n_samples, ), device=self.device) * t\n noise_coeff = self.betas[t] / (self.sqrt_alphas_cumprod_compl[t] * self.sqrt_alphas[t])\n cur = (1 / self.sqrt_alphas[t]) * (cur - noise_coeff * self.network(cur, time.long()))\n cur += torch.sqrt(self.betas[t]) * torch.randn(cur.shape, device=self.device)\n\n if writer is not None:\n fig, ax = plt.subplots()\n ax.imshow(cur[0, 0].cpu().detach().numpy(), cmap=\"gray\")\n writer.add_figure(\"generation\", fig, global_step=self.n_steps - t - 1)\n\n return cur\n","repo_name":"DaniloMarinho/Diffusion-models","sub_path":"diff_models/ddpm.py","file_name":"ddpm.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5704252086","text":"def getAverage(die, numRolls, numTrials):\r\n \"\"\"\r\n - die, a Die\r\n - numRolls, numTrials, are positive ints\r\n - Calculates the expected mean value of the longest run of a number\r\n over numTrials runs of numRolls rolls.\r\n - Calls makeHistogram to produce a histogram of the longest runs for all\r\n the trials. There should be 10 bins in the histogram\r\n - Choose appropriate labels for the x and y axes.\r\n - Returns the mean calculated\r\n \"\"\"\r\n\r\n fullVals = list() \r\n totalList = list()\r\n storeDict = {}\r\n for i in range(numTrials):\r\n collectVals= list()\r\n for j in range(numRolls):\r\n a = die.roll()\r\n collectVals.append(a)\r\n count = 0\r\n\r\n maxLen = list()\r\n for x in range(numRolls):\r\n try:\r\n if collectVals[x] == collectVals[x+1]:\r\n count +=1 \r\n maxLen.append(count)\r\n else:\r\n maxLen.append(count)\r\n count =1\r\n except IndexError:\r\n count =1\r\n maxLen.append(count) \r\n storeDict[i] = max(maxLen) \r\n fullVals.append(collectVals)\r\n for i in range(numTrials):\r\n for j in range(numRolls):\r\n a = fullVals[i][j]\r\n totalList.append(a)\r\n #\r\n\r\n a = list()\r\n for key in storeDict:\r\n a.append(storeDict.get(key))\r\n \r\n someNum = max(a)\r\n indexList= list()\r\n index = 0\r\n for elem in a:\r\n if elem == someNum:\r\n indexList.append(index)\r\n index +=1\r\n else:\r\n index +=1\r\n histogram = list()\r\n #print indexList\r\n for val in indexList:\r\n histogram.append(fullVals[val])\r\n histogram2 = list()\r\n for elem in histogram:\r\n for val in elem:\r\n histogram2.append(val)\r\n \r\n #print histogram2, storeDict\r\n makeHistogram(histogram2, 10, 'Number', 'numRolls', title=None)\r\n return sum(a)/float(len(storeDict))","repo_name":"noslav/python_projects_1","sub_path":"Statistics /pie_2_max.py","file_name":"pie_2_max.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"20751948001","text":"bienvenida = \"Calculadora de numeros de Mersanne\"\nbienvenida = bienvenida.center(50,\"*\")\nprint(bienvenida)\nprint(\"\"\"Quiere usted calcular el primer numero de Mersanne?\n1. Si\n2. No\"\"\")\nrespuesta = input(\"Ingrese el número correspondiente a su respuesta:\")\nbase = 2\nexponente = 1\nif respuesta == \"1\":\n number = base ** exponente\n print(f\"El primer numero de Mersanne es {number}.\")\n print(\"\"\"Quiere usted calcular el siguiente numero de Mersanne?\n1. Si\n2. No\"\"\")\n respuesta = input(\"Ingrese el número correspondiente a su respuesta:\")\n while respuesta == \"1\":\n exponente += 1\n number = base ** exponente\n print(f\"El siguiente numero de Mersanne es {number}.\")\n print(\"\"\"Quiere usted calcular el siguiente numero de Mersanne?\n1. Si\n2. No\"\"\")\n respuesta = input(\"Ingrese el número correspondiente a su respuesta:\")\n print(\"Gracias por probar la calculadora de Mersanne.\") \nelse:\n print(\"Para que ingreso en la calculadora? :-/ \")\n","repo_name":"Luispdiaz/Ejercicios-prepas","sub_path":"Tarea/Tarea3.py","file_name":"Tarea3.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1229375307","text":"# orderbook/okex_orderbook.py\nimport requests\nimport pandas as pd\n\nclass Exchange:\n def __init__(self, name, api_url):\n self.name = name\n self.api_url = api_url\n\n def fetch_orderbook(self):\n raise NotImplementedError()\n\nclass OKEx(Exchange):\n def fetch_orderbook(self):\n response = requests.get(self.api_url + 'api/v5/market/books?instId=BTC-USDT&sz=400') # Add sz parameter here\n data = response.json()\n if data.get('code') == '0':\n orderbook = data['data'][0] # Access the first element of the data array\n bids = pd.DataFrame(orderbook['bids'], columns=['Price', 'Size', '_', '_'])\n bids['Side'] = 'buy'\n asks = pd.DataFrame(orderbook['asks'], columns=['Price', 'Size', '_', '_'])\n asks['Side'] = 'sell'\n return pd.concat([bids, asks], ignore_index=True)\n else:\n print('Error fetching orderbook:', data.get('msg'))\n return None\n\n\n\nif __name__ == '__main__':\n okex = OKEx('OKEx', 'https://www.okex.com/')\n print(okex.fetch_orderbook())\n","repo_name":"suleymanozkeskin/btc_aggregated_orderbook","sub_path":"orderbook/okex_orderbook.py","file_name":"okex_orderbook.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"21088488439","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport tempfile\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom numpy.testing import assert_equal\nimport pytest\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\n\nfrom qkeras import QScaleShift\nfrom qkeras.utils import load_qmodel\n\n\ndef create_qmac_model(layer_cls,\n kwargs=None,\n input_data=None,\n weight_data=None):\n \"\"\"Create a QMAC model for test purpose.\"\"\"\n layer = layer_cls(**kwargs)\n x = Input(shape=input_data.shape[1:], dtype=input_data.dtype)\n y = layer(x)\n layer.set_weights(weight_data)\n\n return Model(x, y)\n\n\n@pytest.mark.parametrize(\n 'layer_kwargs, input_data, weight_data, bias_data, expected_output',\n [\n (\n {\n 'weight_quantizer': 'quantized_bits(8,2,alpha=1.0)',\n 'bias_quantizer': 'quantized_bits(8,2,alpha=1.0)',\n 'activation': 'quantized_bits(8,4,alpha=1.0)'\n },\n np.array([[1, 1], [2, 2]], dtype=K.floatx()),\n np.array([[1.0]]),\n np.array([[4.0]]),\n np.array([[5, 5], [6, 6]], dtype=K.floatx())),\n ])\ndef test_qmac(layer_kwargs, input_data, weight_data, bias_data,\n expected_output):\n model = create_qmac_model(\n layer_cls=QScaleShift,\n kwargs=layer_kwargs,\n input_data=input_data,\n weight_data=[weight_data, bias_data])\n\n actual_output = model.predict(input_data)\n assert_allclose(actual_output, expected_output, rtol=1e-4)\n\n # Test model loading and saving.\n fd, fname = tempfile.mkstemp('.h5')\n model.save(fname)\n\n # Load the model.\n loaded_model = load_qmodel(fname)\n\n # Clean the h5 file after loading the model\n os.close(fd)\n os.remove(fname)\n\n # Compare weights of original and loaded models.\n model_weights = model.weights\n loaded_model_weights = loaded_model.weights\n\n assert_equal(len(model_weights), len(loaded_model_weights))\n for i, model_weight in enumerate(model_weights):\n assert_equal(model_weight.numpy(), loaded_model_weights[i].numpy())\n\n # Compare if loaded models have the same prediction as original models.\n loaded_model_output = loaded_model.predict(input_data)\n assert_equal(actual_output, loaded_model_output)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n","repo_name":"google/qkeras","sub_path":"tests/qmac_test.py","file_name":"qmac_test.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":508,"dataset":"github-code","pt":"81"} +{"seq_id":"41235969821","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_planes, planes, norm_fn='group', stride=1):\n super(ResidualBlock, self).__init__()\n \n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)\n self.relu = nn.ReLU(inplace=True)\n\n num_groups = planes // 8\n\n if norm_fn == 'group':\n self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)\n self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)\n if not stride == 1:\n self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)\n \n elif norm_fn == 'batch':\n self.norm1 = nn.BatchNorm2d(planes)\n self.norm2 = nn.BatchNorm2d(planes)\n if not stride == 1:\n self.norm3 = nn.BatchNorm2d(planes)\n \n elif norm_fn == 'instance':\n self.norm1 = nn.InstanceNorm2d(planes)\n self.norm2 = nn.InstanceNorm2d(planes)\n if not stride == 1:\n self.norm3 = nn.InstanceNorm2d(planes)\n\n elif norm_fn == 'none':\n self.norm1 = nn.Sequential()\n self.norm2 = nn.Sequential()\n if not stride == 1:\n self.norm3 = nn.Sequential()\n\n if stride == 1:\n self.downsample = None\n \n else: \n self.downsample = nn.Sequential(\n nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)\n\n def forward(self, x):\n y = x\n y = self.relu(self.norm1(self.conv1(y)))\n y = self.relu(self.norm2(self.conv2(y)))\n\n if self.downsample is not None:\n x = self.downsample(x)\n\n return self.relu(x+y)\n\n\nclass BottleneckBlock(nn.Module):\n def __init__(self, in_planes, planes, norm_fn='group', stride=1):\n super(BottleneckBlock, self).__init__()\n \n self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0)\n self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)\n self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0)\n self.relu = nn.ReLU(inplace=True)\n\n num_groups = planes // 8\n\n if norm_fn == 'group':\n self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)\n self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)\n self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)\n if not stride == 1:\n self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)\n \n elif norm_fn == 'batch':\n self.norm1 = nn.BatchNorm2d(planes//4)\n self.norm2 = nn.BatchNorm2d(planes//4)\n self.norm3 = nn.BatchNorm2d(planes)\n if not stride == 1:\n self.norm4 = nn.BatchNorm2d(planes)\n \n elif norm_fn == 'instance':\n self.norm1 = nn.InstanceNorm2d(planes//4)\n self.norm2 = nn.InstanceNorm2d(planes//4)\n self.norm3 = nn.InstanceNorm2d(planes)\n if not stride == 1:\n self.norm4 = nn.InstanceNorm2d(planes)\n\n elif norm_fn == 'none':\n self.norm1 = nn.Sequential()\n self.norm2 = nn.Sequential()\n self.norm3 = nn.Sequential()\n if not stride == 1:\n self.norm4 = nn.Sequential()\n\n if stride == 1:\n self.downsample = None\n \n else: \n self.downsample = nn.Sequential(\n nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)\n\n def forward(self, x):\n y = x\n y = self.relu(self.norm1(self.conv1(y)))\n y = self.relu(self.norm2(self.conv2(y)))\n y = self.relu(self.norm3(self.conv3(y)))\n\n if self.downsample is not None:\n x = self.downsample(x)\n\n return self.relu(x+y)\n\nDIM=64\n\nclass ESAC_DROID_Net(nn.Module):\n '''\n FCN architecture for scene coordiante regression.\n The network has two output heads: One predicting a 3d scene coordinate, and a 1d neural guidance weight (if uncertainty is not None).\n The network makes dense predictions, but the output is subsampled by a factor of 8 compared to the input.\n '''\n\n OUTPUT_SUBSAMPLE = 8\n\n def __init__(self, mean,uncertainty, output_dim=128, norm_fn='batch', dropout=0.0, multidim=False):\n '''\n Constructor.\n '''\n super(ESAC_DROID_Net, self).__init__()\n print('ESAC_DROID_Net')\n\n self.un = uncertainty\n\n self.norm_fn = norm_fn\n self.multidim = multidim\n\n if self.norm_fn == 'group':\n self.norm1 = nn.GroupNorm(num_groups=8, num_channels=DIM)\n \n elif self.norm_fn == 'batch':\n self.norm1 = nn.BatchNorm2d(DIM)\n\n elif self.norm_fn == 'instance':\n self.norm1 = nn.InstanceNorm2d(DIM)\n\n elif self.norm_fn == 'none':\n self.norm1 = nn.Sequential()\n\n self.conv1 = nn.Conv2d(3, DIM, kernel_size=7, stride=2, padding=3)\n self.relu1 = nn.ReLU(inplace=True)\n\n self.in_planes = DIM\n self.layer1 = self._make_layer(DIM, stride=1)\n self.layer2 = self._make_layer(2*DIM, stride=2)\n self.layer3 = self._make_layer(4*DIM, stride=2)\n\n # output convolution\n self.conv2 = nn.Conv2d(4*DIM, output_dim, kernel_size=1)\n\n if self.multidim:\n self.layer4 = self._make_layer(256, stride=2)\n self.layer5 = self._make_layer(512, stride=2)\n\n self.in_planes = 256\n self.layer6 = self._make_layer(256, stride=1)\n\n self.in_planes = 128\n self.layer7 = self._make_layer(128, stride=1)\n\n self.up1 = nn.Conv2d(512, 256, 1)\n self.up2 = nn.Conv2d(256, 128, 1)\n self.conv3 = nn.Conv2d(128, output_dim, kernel_size=1)\n\n if dropout > 0:\n self.dropout = nn.Dropout2d(p=dropout)\n else:\n self.dropout = None\n\n self.res1_conv1 = nn.Conv2d(256, 256, 3, 1, 1)\n self.res1_conv2 = nn.Conv2d(256, 256, 1, 1, 0)\n self.res1_conv3 = nn.Conv2d(256, 256, 3, 1, 1)\n\n self.res2_conv1 = nn.Conv2d(256, 512, 3, 1, 1)\n self.res2_conv2 = nn.Conv2d(512, 512, 1, 1, 0)\n self.res2_conv3 = nn.Conv2d(512, 512, 3, 1, 1)\n\n self.res2_skip = nn.Conv2d(256, 512, 1, 1, 0)\n\n self.res3_conv1 = nn.Conv2d(512, 512, 3, 1, 1)\n self.res3_conv2 = nn.Conv2d(512, 512, 1, 1, 0)\n self.res3_conv3 = nn.Conv2d(512, 512, 3, 1, 1)\n\n # output head 1, scene coordinates\n self.fc1 = nn.Conv2d(512, 512, 1, 1, 0)\n self.fc2 = nn.Conv2d(512, 512, 1, 1, 0)\n self.fc3 = nn.Conv2d(512, 3, 1, 1, 0)\n\n # output head 2, neural guidance\n if self.un:\n self.fc1_1 = nn.Conv2d(512, 512, 1, 1, 0)\n self.fc2_1 = nn.Conv2d(512, 512, 1, 1, 0)\n self.fc3_1 = nn.Conv2d(512, 1, 1, 1, 0)\n\n # learned scene coordinates relative to a mean coordinate (e.g. center of the scene)\n self.register_buffer('mean', torch.tensor(mean.size()).cuda())\n self.mean = mean.clone()\n\n def _make_layer(self, dim, stride=1):\n layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)\n layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)\n layers = (layer1, layer2)\n \n self.in_planes = dim\n return nn.Sequential(*layers)\n\n def forward(self, inputs):\n '''\n Forward pass.\n inputs -- 4D data tensor (BxCxHxW)\n '''\n batch_size = inputs.size(0)\n\n x = inputs\n\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu1(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n res = self.conv2(x)\n\n # origin\n x = F.relu(self.res1_conv1(res))\n x = F.relu(self.res1_conv2(x))\n x = F.relu(self.res1_conv3(x))\n\n res = res + x\n\n x = F.relu(self.res2_conv1(res))\n x = F.relu(self.res2_conv2(x))\n x = F.relu(self.res2_conv3(x))\n\n res = self.res2_skip(res) + x\n\n x = F.relu(self.res3_conv1(res))\n x = F.relu(self.res3_conv2(x))\n x = F.relu(self.res3_conv3(x))\n\n res = res + x\n\n # output head 1, scene coordinates\n sc = F.relu(self.fc1(res))\n sc = F.relu(self.fc2(sc))\n sc = self.fc3(sc)\n\n sc[:, 0,:,:] += self.mean[0]\n sc[:, 1,:,:] += self.mean[1]\n sc[:, 2,:,:] += self.mean[2]\n\n # output head 2, neural guidance\n if self.un:\n log_ng = F.relu(self.fc1_1(res))\n log_ng = F.relu(self.fc2_1(log_ng))\n log_ng = self.fc3_1(log_ng)\n un = torch.exp(log_ng)\n\n else:\n un = None\n\n return sc,un\n\n def init_weights(self):\n # init_modules = self.modules()\n # for m in init_modules:\n # if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear):\n # torch.nn.init.xavier_uniform_(m.weight.data)\n # if m.bias is not None:\n # torch.nn.init.constant_(m.bias.data, 0.0)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):\n if m.weight is not None:\n nn.init.constant_(m.weight, 1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n","repo_name":"XinWu98/SC-wLS","sub_path":"Dense_Nets/ESAC_DROID.py","file_name":"ESAC_DROID.py","file_ext":"py","file_size_in_byte":9820,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"4206476226","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 8 14:07:26 2019\n\n@author: alepe\n\"\"\"\n\"\"\"Premier exemple avec Tkinter.\n\nOn crée une fenêtre simple qui souhaite la bienvenue à l'utilisateur.\n\n\"\"\"\n\n# On importe Tkinter\nfrom tkinter import *\n\n# On crée une fenêtre, racine de notre interface\nfenetre = Tk()\n\n# On crée un label (ligne de texte) souhaitant la bienvenue\n# Note : le premier paramètre passé au constructeur de Label est notre\n# interface racine\nchamp_label = Label(fenetre, text=\"Salut les Zér0s !\")\n\n# On affiche le label dans la fenêtre\nchamp_label.pack()\n#un bouton\n#bouton_quitter = Button(fenetre, text=\"Quitter\", command=fenetre.quit)\n#bouton_quitter.pack()\n\n\n#écrire dans la fenetre\nvar_texte = StringVar()\nligne_texte = Entry(fenetre, textvariable=var_texte, width=1)\nligne_texte.pack()\n#cocher une case\n\nvar_case = IntVar()\ncase = Checkbutton(fenetre, text=\"Ne plus poser cette question\", variable=var_case)\ncase.pack()\n\nliste = Listbox(fenetre)\nliste.pack()\nliste.insert(END, \"Pierre\")\nliste.insert(END, \"Feuille\")\nliste.insert(END, \"Ciseau\")\n# On démarre la boucle Tkinter qui s'interompt quand on ferme la fenêtre\nfenetre.mainloop()\n\nvar_case.get() #1 ou 0 suivant qu'on ait coché la case ou non\n\nprint(liste.curselection())","repo_name":"gheritarish/PAPPL","sub_path":"InterfaceGraphique.py","file_name":"InterfaceGraphique.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32099220814","text":"class ListNode:\n def __init__(self, val, next = None):\n self.val = val\n self.next = next\n #self.prev = prev\n\ndef makeInput(file):\n with open(file) as f:\n lines = f.readlines()\n \n lst = []\n prev = None\n for x in lines:\n current = ListNode(int(x.strip()) * 811589153)\n if current.val == 0:\n zero = current\n #current.prev = prev\n if prev:\n prev.next = current\n lst.append(current)\n prev = current\n\n current.next = lst[0]\n #lst[0].prev = current\n\n return lst, zero\n\nfile = 'day20.txt'\n\norder, zero = makeInput(file)\nllst = order.copy()\n\ni = 0\nn = len(order)\ncurrent = order[0]\n# for j in range(n):\n# print(order[j].val)\n# print(current.val)\n# current = current.next\nfor _ in range(10):\n print(_)\n for i in range(n):\n current = order[i]\n prev = current\n while prev.next != current:\n prev = prev.next\n val = current.val % (n-1)\n #print(val)\n for _ in range(val):\n curNext = current.next\n current.next = curNext.next\n curNext.next = current\n prev.next = curNext\n prev = curNext\n\ncurrent = zero\n# temp = zero\n# for _ in range(n):\n# print(temp.val)\n# temp = temp.next\n\ntotalSum = 0\nfor i in range(3):\n for j in range(1000):\n current = current.next\n print(i,current.val)\n totalSum += current.val\n\nprint(\"totalSum = \", totalSum)\n\n ","repo_name":"Isaac-Somerville/AdventOfCode2022","sub_path":"day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4740957794","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.distributions import Normal\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom utils import *\n\nclass Swish(nn.Module):\n def forward(self, x):\n return x * torch.sigmoid(x)\n\nclass NPG_NETS(nn.Module):\n def __init__(self,\n state_size,\n action_size,\n device,\n policy_hidden,\n value_hidden,\n lr_value,\n reg_value):\n super().__init__()\n self.policy = nn.Sequential(nn.Linear(state_size, policy_hidden),\n nn.Tanh(),\n nn.Linear(policy_hidden, policy_hidden),\n nn.Tanh(),\n # nn.Linear(policy_hidden, policy_hidden),\n # nn.ReLU(),\n nn.Linear(policy_hidden, action_size))\n \n self.min_log_std = torch.ones(action_size, device = device) * -2.5\n \n self.policy_log_std = nn.Parameter(torch.ones(action_size, device = device) * 0.0)\n \n self.policy_params = [self.policy_log_std] + list(self.policy.parameters()) \n \n # self.param_shapes = [p.data.numpy().shape for p in self.policy_params]\n # self.param_sizes = [p.data.numpy().size for p in self.policy_params]\n \n self.value = nn.Sequential(nn.Linear(state_size, value_hidden),\n nn.ReLU(),\n nn.Linear(value_hidden, value_hidden),\n nn.ReLU(),\n # nn.Linear(value_hidden, value_hidden),\n # nn.ReLU(),\n nn.Linear(value_hidden, 1))\n \n self.optim_value = optim.Adam(self.value.parameters(), weight_decay = reg_value)\n \n # for m in self.modules():\n # if type(m) is nn.Linear:\n # nn.init.xavier_normal_(m.weight)\n # nn.init.zeros_(m.bias)\n \n for param in list(self.policy.parameters())[-2:]:\n param.data = 1e-2 * param.data\n \n self.to(device)\n\nclass NPG_Agent():\n def __init__(self,\n state_size,\n action_size,\n max_action,\n min_action,\n device,\n policy_hidden = 64,\n value_hidden = 128,\n lr_value = 1e-3,\n reg_value = 0, #1e-3,\n gamma = 0.995,\n GAE_lambda = 0.97,\n norm_step_size = 0.05):\n \n self.nets = NPG_NETS(state_size, \n action_size, \n device, \n policy_hidden, \n value_hidden, \n lr_value,\n reg_value)\n \n self.state_size = state_size\n self.action_size = action_size\n self.max_action = torch.from_numpy(max_action).float().to(device)\n self.min_action = torch.from_numpy(min_action).float().to(device)\n self.gamma = gamma\n self.GAE_lambda = GAE_lambda\n self.norm_step_size = norm_step_size\n self.device = device\n \n # self.reward_scale = 5.0\n # self.target_entropy = -action_size\n # self.log_alpha = torch.zeros(1, requires_grad = True, device = device)\n # self.optim_alpha = optim.Adam([self.log_alpha], lr = 3e-4)\n \n self.mu_state = nn.Parameter(torch.zeros((1, state_size), device = device), requires_grad = False)\n self.sigma_state = nn.Parameter(torch.ones((1, state_size), device = device), requires_grad = False)\n \n self.mu_state_sim = nn.Parameter(torch.zeros((1, state_size), device = device), requires_grad = False)\n self.sigma_state_sim = nn.Parameter(torch.ones((1, state_size), device = device), requires_grad = False)\n \n def sample_actions(self, states):\n \n # State standardization might be the cause of the observed performance decay (?)\n # Probably not\n states = (states - self.mu_state_sim)/(self.sigma_state_sim + 1e-8)\n \n # states = torch.clamp(states, min = -10.0, max = 10.0)/10.0\n \n mean = self.nets.policy(states)\n \n log_std = self.nets.policy_log_std\n \n dist = Normal(mean, log_std.exp())\n actions_raw = dist.sample()\n \n # Transform actions to correct scale (tanh might cause issues, more likely to improve performance in MBRLAnt)\n actions = torch.tanh(actions_raw)\n \n log_probs = dist.log_prob(actions_raw) #- torch.log(1 - actions**2 + 1e-6)\n log_probs = log_probs.sum(-1)\n entropy = dist.entropy()\n entropy = entropy.sum(-1)\n \n # Clamping does not seem to work as well as Tanh\n # actions = actions_raw\n # actions = torch.max(torch.min(self.max_action, actions), self.min_action)\n # log_probs = dist.log_prob(actions).sum(-1)\n\n return actions, log_probs, entropy\n \n def sample_action(self, state, evaluate):\n state = torch.from_numpy(state).float().to(self.device).unsqueeze(0)\n \n state = (state - self.mu_state)/(self.sigma_state + 1e-8)\n \n # state = torch.clamp(state, min = -10.0, max = 10.0)/10.0\n \n with torch.no_grad():\n mean = self.nets.policy(state)\n \n log_std = self.nets.policy_log_std.detach().clone()\n \n if evaluate:\n # action = torch.max(torch.min(self.max_action, mean), self.min_action)\n return torch.tanh(mean).squeeze(0).cpu().numpy() # action.squeeze(0).cpu().numpy()\n else:\n action = mean + torch.randn_like(mean) * log_std.exp()\n # Constant exploration noise level seems to not work\n # action = mean + torch.randn_like(mean) * 0.1\n # action = torch.max(torch.min(self.max_action, action), self.min_action)\n return torch.tanh(action).squeeze(0).cpu().numpy() # action.squeeze(0).cpu().numpy()\n \n def update_state_stats(self, new_mu, new_sigma, sim):\n if not sim:\n self.mu_state.data = new_mu.data\n self.sigma_state.data = new_sigma.data\n else:\n self.mu_state_sim.data = new_mu.data\n self.sigma_state_sim.data = new_sigma.data\n \n def get_policy_std(self):\n print(f\"Policy std: {self.nets.policy_log_std.exp()}\")\n return self.nets.policy_log_std.exp().detach().clone().cpu().numpy()\n \n def GAE(self, states, rewards, log_probs, terminated):\n \n # Inputs are 3-D tensors with 0: Traj ind, 1: Step ind, 2: Dim\n \n TD_resids = []\n \n for state, reward, log_prob, term in zip(states, rewards, log_probs, terminated):\n with torch.no_grad():\n state = torch.clamp(state, min = -10.0, max = 10.0)/10.0\n state_values = self.nets.value(state).squeeze(-1)\n state_values = torch.cat([state_values[:-1], \n torch.tensor([0.0]).to(self.device) if term else state_values[-1, None]])\n TD_resid = reward + self.gamma * state_values[1:] - state_values[:-1]\n # Entropy regularization\n # TD_resid = reward * self.reward_scale - log_prob.detach() + self.gamma * state_values[1:] - state_values[:-1]\n TD_resids.append(TD_resid)\n \n advantages = []\n for TD_resid in TD_resids:\n advantage = torch.zeros_like(TD_resid)\n run_sum = 0.0\n for t in reversed(range(0, TD_resid.shape[0])):\n run_sum = TD_resid[t] + (self.gamma * self.GAE_lambda) * run_sum\n advantage[t] = run_sum\n advantages.append(advantage)\n \n # with torch.no_grad():\n # state_values = self.nets.value(states).squeeze(-1) #* not_dones\n # TD_resid = rewards + state_values[:, 1:] - state_values[:, :-1]\n \n # advantages = torch.zeros(TD_resid.shape[:2], device = self.device)\n # run_sum = 0.0\n # for t in reversed(range(0, TD_resid.shape[1])):\n # run_sum = TD_resid[:, t] + (self.gamma * self.GAE_lambda) * run_sum\n # advantages[:, t] = run_sum\n \n # Whitening (seems to contribute to performance decay (?))\n advantages = torch.cat(advantages, dim = 0)\n advantages = (advantages - advantages.mean())/(advantages.std() + 1e-6)\n \n return advantages\n \n def update_policy(self, states, actions,\n log_probs, entropies, rewards, terminated):\n \n # Compute advantages\n advantages = self.GAE(states, rewards, log_probs, terminated)\n \n # Compute vanilla PG objective (surrogate CPI)\n # old_log_probs = log_probs.detach().clone() \n \n # imp_ratio = (log_probs - old_log_probs).exp()\n # obj_fun = imp_ratio * advantages\n \n log_probs = torch.cat(log_probs, dim = 0)\n entropies = torch.cat(entropies, dim = 0)\n # log_probs_old = log_probs.detach().clone()\n # imp_ratios = (log_probs - log_probs_old).exp()\n \n # 0.001\n obj_fun = log_probs * advantages + 0.001 * entropies\n \n surr_CPI = obj_fun.mean()\n \n # Compute PG\n vpg = torch.autograd.grad(surr_CPI, self.nets.policy_params)\n vpg = flat_grad(vpg)\n \n print(f'VPG: {vpg} nan? : {torch.isnan(vpg).any()} inf? : {torch.isinf(vpg).any()}')\n \n states = torch.cat(states, dim = 0)\n states_cg = (states - self.mu_state_sim)/(self.sigma_state_sim + 1e-8)\n \n # states_cg = torch.clamp(states_cg, min = -10.0, max = 10.0)/10.0\n \n # Compute Natural PG\n npg = conjugate_gradient((self.nets.policy, self.nets.policy_params, self.nets.policy_log_std), states_cg, vpg, vpg.clone(),\n action_size = self.action_size, device = self.device)\n print(f'NPG: {npg}')\n \n # Update policy parameters\n current_params = flat_params(self.nets.policy_params)\n new_params = current_params + (torch.abs(self.norm_step_size / (torch.dot(vpg, npg) + 1e-10))).sqrt() * npg\n \n # set_param_values(self.nets.policy_params, new_params.clone(),\n # self.nets.param_sizes, self.nets.param_shapes, self.nets.min_log_std, self.device)\n # self.nets.policy_params = [self.nets.policy_log_std] + list(self.nets.policy.parameters()) \n \n update_model(self.nets.policy_params, new_params)\n self.nets.policy_log_std.data.copy_(torch.max(self.nets.policy_log_std.data, self.nets.min_log_std))\n \n # Update simulated state stats\n # self.mu_state_sim.data = (states).mean(0).view(1, -1)\n # self.sigma_state_sim.data = torch.mean(torch.abs(states - (states).mean(0)), dim = 0).view(1, -1)\n \n # def update_temperature(self, log_probs):\n # log_probs = torch.cat(log_probs, dim = 0)\n # loss_alpha = -(self.log_alpha.exp() * (log_probs + self.target_entropy).detach()).mean()\n \n # self.optim_alpha.zero_grad()\n # loss_alpha.backward()\n # self.optim_alpha.step()\n \n def update_value(self, states, rewards, log_probs, terminated, epochs = 1, batch_size = 128):\n \n returns = []\n processed_states = []\n for state, reward, log_prob, term in zip(states, rewards, log_probs, terminated):\n returns_traj = []\n run_sum = 0.0\n for t in reversed(range(0, reward.shape[0])):\n run_sum = reward[t, None] + self.gamma * run_sum\n # run_sum = reward[t, None] * self.reward_scale - log_prob[t, None].detach() + self.gamma * run_sum\n returns_traj.append(run_sum)\n returns_traj.reverse()\n if term:\n returns_traj.append(torch.FloatTensor([0]).to(self.device))\n processed_states.append(state)\n else:\n processed_states.append(state[:-1, :])\n \n returns_traj = torch.cat(returns_traj, dim = 0)\n returns.append(returns_traj)\n \n returns = torch.cat(returns, dim = 0)\n \n states_target = torch.cat(processed_states, dim = 0)\n \n states_target = torch.clamp(states_target, min = -10.0, max = 10.0)/10.0\n \n n = states_target.shape[0]\n \n for epoch in range(epochs):\n\n indices = np.random.permutation(n)\n for batch_num in range(int(np.ceil(n / batch_size))):\n batch_inds = indices[batch_num * batch_size:(batch_num + 1) * batch_size]\n \n states_batch = states_target[batch_inds, :]\n returns_batch = returns[batch_inds]\n \n value_est = self.nets.value(states_batch)\n loss = ((value_est - returns_batch) ** 2).mean()\n\n \n self.nets.optim_value.zero_grad()\n loss.backward()\n self.nets.optim_value.step()\n","repo_name":"MKaivola/MBRLAnt","sub_path":"NPG.py","file_name":"NPG.py","file_ext":"py","file_size_in_byte":13571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"18094408144","text":"#Stephanie Bravo\n#February 1, 2019\n#This program prints out each letter, shifted right by 13, in the word the user enteres\n\nmessage = input(\"Enter a word:\")\ncoded = \"\"\nfor ch in message:\n offset = ord(ch) - ord('a') + 13 #tells the computer to add 13\n wrap = offset % 26 #lets the computer know that when at 26 go back to 0\n newChar = chr(ord('a')+ wrap) \n coded= coded + newChar\n\nprint(\"Your word in code is:\", coded)\n","repo_name":"stephanieb00/CSCI127","sub_path":"Python Work/assignment9.py","file_name":"assignment9.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33508444475","text":"import pickle\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import f1_score\n\nfrom sklearn.model_selection import StratifiedShuffleSplit\nimport pdb\n\nclass OracleTrainer:\n '''\n Class is dedicated for training process.\n '''\n def __init__(self, path):\n '''\n Initial method for initializing instance.\n :param path: path to file in csv format.\n '''\n self.path = path\n\n def train(self):\n '''\n Method implements training process and saves needed models.\n :return: classifier_dict, dictionary contains classifier (sklearn.ensemble.forest.RandomForestClassifier)\n and its accuracy score.\n '''\n data_frame = self._read_data()\n data_frame = self._clean_data(data_frame)\n\n sales_encoder, data_frame.sales = self._encode_data(data_frame.sales)\n salary_encoder, data_frame.salary = self._encode_data(data_frame.salary)\n\n labels, data = self._select_data_and_labels(data_frame)\n train_data, test_data, train_labels, test_labels = self._split_data_on_train_test_set(data, labels)\n\n classifier_dict = self._fit_model(train_data, test_data, train_labels, test_labels)\n\n self._save_file(\"sales_encoder\", sales_encoder)\n self._save_file(\"salary_encoder\", salary_encoder)\n self._save_file(\"classifier_dict\", classifier_dict)\n return classifier_dict\n\n def _read_data(self):\n '''\n Method performs reading data from csv file and converts it to data frame.\n :return: data frame, in the format of pandas.core.frame.DataFrame.\n '''\n data_frame = pd.read_csv(self.path)\n return data_frame\n\n def _clean_data(self, data_frame):\n '''\n Method deletes duplicates and resets index in the data frame.\n :param data_frame: data frame (pandas.core.frame.DataFrame), which contains duplicates.\n :return: cleaned data frame, in the format of pandas.core.frame.DataFrame.\n '''\n data_frame.drop_duplicates(inplace=True)\n new_data_frame = data_frame.reset_index(drop=True)\n return new_data_frame\n\n def _encode_data(self, column):\n '''\n Method converts categorical column into indicator column.\n :param column: column (pandas.core.series.Series), which contains categorical values.\n :return: encode_model, model (sklearn.preprocessing.label.LabelEncoder), model which encodes provided column.\n encoded_column, encoded column in the format of pandas.core.series.Series.\n '''\n encode_model = preprocessing.LabelEncoder()\n encode_model.fit(column)\n encoded_column = encode_model.transform(column)\n return encode_model, encoded_column\n\n def _select_data_and_labels(self, data_frame):\n '''\n Method selects the right columns for data (contains only features) and labels.\n :param data_frame: data frame (pandas.core.frame.DataFrame).\n :return: labels, in the format of pandas.core.series.Series.\n data, in the format of pandas.core.frame.DataFrame.\n '''\n labels = data_frame[\"left\"]\n data = data_frame.drop([\"left\"], axis=1)\n return labels, data\n\n def _split_data_on_train_test_set(self, data, labels):\n '''\n Method splits data, labels in train/test sets.\n :param data: data frame (pandas.core.frame.DataFrame), which contains only features.\n :param labels: labels for data (pandas.core.series.Series).\n :return: train_data, test_data - contains only features, in the format of pandas.core.frame.DataFrame.\n train_labels, test_labels - contains only labels, in the format of pandas.core.series.Series.\n '''\n # train_data, test_data, train_labels, test_labels = train_test_split(data,\n # labels,\n # test_size=0.2,\n # random_state=13)\n\n sss = StratifiedShuffleSplit(n_splits=3, test_size=0.25, random_state=0)\n for train_index, test_index in sss.split(data, labels):\n train_data, test_data = data.loc[train_index], data.loc[test_index]\n train_labels, test_labels = labels[train_index], labels[test_index]\n\n return train_data, test_data, train_labels, test_labels\n\n def _fit_model(self, train_data, test_data, train_labels, test_labels):\n '''\n Method creates a classifier and fits it using training data and labels.\n Then it calculates accuracy score on the given test data and labels.\n :param train_data: training data (pandas.core.frame.DataFrame).\n :param test_data: test data (pandas.core.frame.DataFrame).\n :param train_labels: training labels (pandas.core.series.Series).\n :param test_labels: test labels (pandas.core.series.Series).\n :return: classifier_dict, dictionary contains classifier (sklearn.ensemble.forest.RandomForestClassifier)\n and its accuracy score.\n '''\n classifier = RandomForestClassifier(n_estimators=33,\n criterion=\"gini\",\n max_depth=21,\n random_state=0,\n class_weight=\"balanced\")\n classifier.fit(train_data, train_labels)\n score = classifier.score(test_data, test_labels)\n predicted_test = classifier.predict(test_data)\n f1 = f1_score(test_labels, predicted_test, average='weighted')\n classifier_dict = {\"classifier\": classifier, \"score\": score, \"f1_score\": f1}\n return classifier_dict\n\n def _save_file(self, file_name, file):\n '''\n Method saves provided file.\n :param file_name: name of file(str).\n :param file: file, which is needed to save.\n :return: -\n '''\n file_path = \"saved_models/\" + file_name + \".pkl\"\n with open(file_path, \"wb\") as savefile:\n pickle.dump(file, savefile, protocol=pickle.HIGHEST_PROTOCOL)\n","repo_name":"IceMeooow/HR-analysis","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17032791655","text":"from django.db import models\nfrom django.core.mail import send_mail\nimport threading\nimport time\n\n# Create your models here.\nclass MailForSend(models.Model):\n mailer = models.EmailField(help_text=\"Введите Email получателя\")\n subject = models.CharField(max_length=50, help_text=\"Тема письма, не более 50 знаков.\")\n text = models.TextField()\n sec_to_send = models.BigIntegerField()\n is_send = models.BooleanField(default=False)\n \n def save(self, *args, **kwargs):\n if not self.is_send:\n \n super(MailForSend, self).save(*args, **kwargs) # Выполнение настоящего save().\n \n def send_maile_with_delay(mailer, subject, text, sec_to_send, *args, **kwargs):\n #print('Utils. Задача ', subject, ' получена в: ', time.ctime())\n time.sleep(sec_to_send)\n #print('Utils. Задача ', subject, ' выполнена в: ', time.ctime())\n send_mail(subject, text, 'umqambi@yandex.by', [mailer], fail_silently=False,)\n self.is_send = True\n #print('тема во вложенной ', self.subject)\n super(MailForSend, self).save(update_fields=['is_send'])\n\n t = threading.Thread(\n target=send_maile_with_delay, \n args=(self.mailer, self.subject, self.text, self.sec_to_send, self)\n )\n t.daemon = True\n t.start()\n else:\n print('тема ', self.subject, 'is_send = ', self.is_send)\n\n\n\n","repo_name":"umqambi/mailsender-e2","sub_path":"sender/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"70786255625","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# pylint: disable=wrong-import-position,C0330\n\n\"\"\"\nFile : dbs_event_count_plot.py\nAuthor : Christian Ariza \nDescription : Create the event count plot used, for example, in the C-RSG report.\n For additional documentation look at the notebook in the CMSSpark/src/notebooks folder.\nNotes : We disabled wrong-import-position because matplotlib needs to setup the backend before pyplot is imported.\n We disabled C0330 because pylint complains following the\n old recommendation. Black follows the new indentation recommendation.\n\"\"\"\n\n# system modules\nimport os\nimport click\nimport json\nimport logging\nfrom datetime import timedelta, date, datetime\nfrom dateutil.relativedelta import relativedelta\n\nimport pandas as pd\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import from_unixtime, concat, year, month, lpad\n\n# Matplotlib needs to set the backend before pyplot is imported. That will\n# cause pylint complain about the imports not being at top of file.\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# CMSSpark modules\nfrom CMSSpark import spark_utils\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.StreamHandler())\n_VALID_DATE_FORMATS = [\"%Y/%m\"]\n_VALID_TYPES = [\"pdf\", \"png\", \"jpg\", \"svg\"]\n\n\ndef plot_tiers_month(data, colors_file=None, attributes=None):\n \"\"\"Create a stacked bar plot of events by data tier/month.\n\n args:\n - data: pandas dataframe with the month, data_tier_name and nevents columns.\n \"\"\"\n data_by_month_tier = data\n data_by_month_tier.month = data.month.astype(int)\n fig, plot_ax = plt.subplots(figsize=(20, 7))\n months = pd.DataFrame(\n {\n \"month\": pd.Series(\n data_by_month_tier.month.unique().astype(int), name=\"month\"\n )\n }\n )\n logger.debug(months.dtypes)\n tiers = data_by_month_tier.data_tier_name.unique()\n totals = (\n data_by_month_tier[[\"data_tier_name\", \"nevents\"]]\n .groupby(\"data_tier_name\")\n .sum()\n .reset_index()\n )\n totals[\"nevents\"] = totals[\"nevents\"].map(\"{:,d}\".format)\n totals[\"new_label\"] = totals[\"data_tier_name\"].str.cat(\n totals[\"nevents\"].values.astype(str), sep=\" nevents: \"\n )\n label_replacements = dict(zip(totals.data_tier_name, totals.new_label))\n data_by_month_tier = data_by_month_tier.replace(\n {\"data_tier_name\": label_replacements}\n )\n pivot_df = data_by_month_tier.pivot(\n index=\"month\", columns=\"data_tier_name\", values=\"nevents\"\n )\n plt.title(\n f\"Event count plot from {data_by_month_tier.month.min()} to {data_by_month_tier.month.max()}\"\n )\n _default_colors = sns.color_palette(\"husl\", len(tiers))\n try:\n colors = _default_colors if not colors_file else json.load(colors_file)\n logger.info(\"colors before replacements %s\", colors)\n if isinstance(colors, dict):\n _c_r = {label_replacements[k]: colors.get(k) for k in label_replacements}\n colors = [_c_r[k] for k in pivot_df.columns]\n logger.info(\"colors after replacements %s\", colors)\n except (json.JSONDecodeError, KeyError) as err:\n # If the file is not a valid json,\n # or the keys doesn't correspond to the tiernames,\n # use the random palette.\n logger.error(\n \"There was a problem while reading the colors file. %s, %s\",\n colors_file,\n err,\n )\n colors = _default_colors\n plt.xlabel(\"Month\")\n plt.ylabel(\"Event count\")\n # set matplotlib rcParams based on provided attributes for the plot\n # https://matplotlib.org/stable/api/matplotlib_configuration_api.html#matplotlib.rc\n if attributes:\n for key, kwds in attributes.items():\n matplotlib.rc(key, **kwds)\n pivot_df.plot.bar(stacked=True, color=colors, ax=plot_ax).legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n return fig\n\n\ndef get_events_by_tier_month(spark, start_date, end_date,\n tiers_raw=None, remove_raw=None, skims_raw=None, only_valid_files=False, verbose=False):\n \"\"\"Generate a pandas dataframe containing data_tier_name, month, nevents for the given time period.\n\n It will add virtual tiers based on the skims.\n args:\n - spark: Spark session\n - start_date: String with the date y format yyyy/MM/dd\n - end_date: String with the date y format yyyy/MM/dd\n - tiers_raw: List of tiers\n - remove_raw: List of remove patterns\n - skims_raw: List of skim patterns\n - only_valid_files: True if you want to take into account only the valid files.\n - verbose: True if you want additional output messages, default False.\n \"\"\"\n if tiers_raw is None:\n tiers_raw = [\".*\"]\n if skims_raw is None:\n skims_raw = []\n if remove_raw is None:\n remove_raw = []\n tiers = \"^({})$\".format(\"|\".join([\"{}\".format(tier.strip()) for tier in tiers_raw]))\n skims_rlike = (\n \".*-({})-.*\".format(\"|\".join([elem.strip() for elem in skims_raw]))\n if skims_raw\n else \"^$\"\n )\n remove_rlike = (\n \".*({}).*\".format(\"|\".join([elem.strip().lower() for elem in remove_raw]))\n if remove_raw\n else \"^$\"\n )\n tables = spark_utils.dbs_tables(spark, tables=[\"ddf\", \"bdf\", \"fdf\", \"dtf\"])\n if verbose:\n logger.info(\"remove %s\", remove_rlike)\n logger.info(\"skims %s\", skims_rlike)\n for k in tables:\n # tables[k].cache()\n logger.info(k)\n tables[k].printSchema()\n tables[k].show(5, truncate=False)\n datablocks_file_events_df = spark.sql(\n \"\"\"SELECT sum(fdf.f_event_count) as f_event_count,\n max(ddf.d_data_tier_id) as d_data_tier_id,\n d_dataset,\n b_block_name,\n max(b_creation_date) as b_creation_date,\n max(b_block_size) as size\n FROM ddf JOIN bdf on ddf.d_dataset_id = bdf.b_dataset_id\n JOIN fdf on bdf.b_block_id = fdf.f_block_id\n WHERE d_is_dataset_valid = 1\n {}\n group by d_dataset, b_block_name\n \"\"\".format(\n \"AND f_is_file_valid = 1\" if only_valid_files else \"\"\n )\n )\n fiter_field = \"b_creation_date\"\n datablocks_file_events_df = (\n datablocks_file_events_df.withColumn(fiter_field, from_unixtime(fiter_field))\n .filter(\n fiter_field\n + \" between '{}' AND '{}' \".format(\n start_date.replace(\"/\", \"-\"), end_date.replace(\"/\", \"-\")\n )\n )\n .withColumn(\n \"month\", concat(year(fiter_field), lpad(month(fiter_field), 2, \"0\"))\n )\n )\n\n datablocks_file_events_df.registerTempTable(\"dbfe_df\")\n # Union of two queries:\n # - The first query will get all the selected data tiers,\n # excluding the datasets who match the skims\n # - The second query will get all the selected data tiers,\n # but only the dataset who match the skims.\n grouped = spark.sql(\n \"\"\"\n select month, data_tier_name, sum(f_event_count) as nevents\n from dbfe_df join dtf on data_tier_id = d_data_tier_id\n where\n data_tier_name rlike '{tiers}'\n and lower(d_dataset) not rlike '{remove}'\n and d_dataset not rlike '{skims}'\n group by month, data_tier_name\n UNION\n select month,\n concat(data_tier_name, '/',regexp_extract(d_dataset,'{skims}',1)) AS data_tier_name,\n sum(f_event_count) as nevents\n from dbfe_df join dtf on dtf.data_tier_id = d_data_tier_id\n where\n data_tier_name rlike '{tiers}'\n and lower(d_dataset) not rlike '{remove}'\n and d_dataset rlike '{skims}'\n group by month, concat(data_tier_name, '/',regexp_extract(d_dataset,'{skims}',1))\n \"\"\".format(\n tiers=tiers, remove=remove_rlike, skims=skims_rlike\n )\n )\n return grouped.toPandas()\n\n\ndef event_count_plot(start_date, end_date, output_folder, output_formats, tiers, remove_patterns, skims,\n colors_file=None, generate_csv=False, only_valid_files=False, attributes=None, verbose=False):\n \"\"\"\n args:\n - start_date: String with the start date in format yyyy/MM/dd\n - end_date: String with the end date format yyyy/MM/dd\n - output_folder: Path of the output folder.\n - output_formats: list of formata like [png, pdf]\n - tiers: List of tiers\n - remove_patterns: List of remove patterns\n - skims: List of skim patterns\n - generate_csv: save the pandas dataframe to csv\n - only_valid_files: True if you want to take into account only the valid files.\n - verbose: True if you want additional output messages, default False.\n \"\"\"\n spark = SparkSession.builder.appName(\"cms_dbs_event_count\").getOrCreate()\n event_count_pdf = get_events_by_tier_month(\n spark,\n start_date,\n end_date,\n tiers,\n remove_patterns,\n skims,\n only_valid_files,\n verbose,\n )\n start_date_f = event_count_pdf.month.min()\n end_date_f = event_count_pdf.month.max()\n events_fig = plot_tiers_month(event_count_pdf, colors_file, attributes)\n\n os.makedirs(output_folder, exist_ok=True)\n if generate_csv:\n csv_filename = f\"event_count_{start_date_f}-{end_date_f}.csv\"\n event_count_pdf.to_csv(os.path.join(output_folder, csv_filename))\n\n image_paths = []\n for output_format in output_formats:\n image_filename = f\"event_count_{start_date_f}-{end_date_f}.{output_format}\"\n image_path = os.path.join(output_folder, image_filename)\n events_fig.savefig(image_path, format=output_format, bbox_inches='tight')\n image_paths.append(os.path.abspath(image_path))\n return image_paths\n\n\n@click.command()\n@click.option(\"--start_month\", default=None, type=click.DateTime(_VALID_DATE_FORMATS),\n help=\"Start month in format yyyy/MM, defaults to: end_month - 11 months (i.e. one year period)\")\n@click.option(\"--end_month\", default=None, type=click.DateTime(_VALID_DATE_FORMATS),\n help=\"End month (inclusive) in format yyyy/MM, defaults to previous month\")\n@click.option(\"--output_folder\", default=\"./output\", help=\"Output folder for the plots\")\n@click.option(\"--output_format\", default=\"png\", type=click.Choice(_VALID_TYPES), help=\"Output format for the plots\")\n@click.option(\"--colors_file\", default=None, type=click.File('r'),\n help=\"A json file either with a list of colors (strings), or with a mapping of label and color. \"\n \"If the file is not valid, or is not provided, a default palette will be generated.\")\n@click.option(\"--tiers\", type=str,\n default=\"GEN,GEN-SIM,GEN-RAW,GEN-SIM-RECO,AODSIM,MINIAODSIM,\"\n \"RAWAODSIM,NANOAODSIM,GEN-SIM-DIGI-RAW,GEN-SIM-RAW,GEN-SIM-DIGI-RECO\",\n help=\"Comma separated list of tiers to consider. eg: GEN,GEN-SIM,GEN-RAW,GEN-SIM-RECO,AODSIM,MINIAODSIM\")\n@click.option(\"--remove\", default=\"test,backfill,jobrobot,sam,bunnies,penguins\",\n help=\"Comma separated list of case insensitive patterns. \"\n \"The datasets which name match any of the patterns will be ignored.\")\n@click.option(\"--skims\", default=\"\",\n help=\"Comma separated list of skims. The skims are case sensitive. Datasets which match the given skims \"\n \"will not be counted as part of the tier, but in a separated group named /.\")\n@click.option(\"--attributes\", default=None, help=\"matplotlib rc params file (JSON format)\")\n@click.option(\"--generate_csv\", is_flag=True, default=False, help=\"Create also a csv file with the plot data\")\n@click.option(\"--only_valid_files\", is_flag=True, default=False, help=\"Only consider valid files, default False\")\n@click.option(\"--test\", is_flag=True, default=False, help=\"Only consider valid files, default False\")\n@click.option(\"--verbose\", is_flag=True, default=False, help=\"Prints additional logging info\")\ndef main(start_month, end_month, output_folder, output_format, colors_file, tiers, remove, skims, generate_csv,\n only_valid_files, attributes, test, verbose):\n \"\"\"Main function\"\"\"\n # This script create Event Count Plots based on the dbs data. It prints the path of the created image in std output.\n click.echo('--------------------------------------------------------------------------------------------')\n click.echo(f'Input Arguments: start_month:{start_month}, end_month:{end_month}, '\n f'output_folder:{output_folder}, output_format:{output_format}, colors_file:{colors_file}, '\n f'tiers:{tiers}, remove:{remove}, skims:{skims}, generate_csv:{generate_csv}, '\n f'only_valid_files:{only_valid_files}, attributes:{attributes}, verbose:{verbose}')\n click.echo('--------------------------------------------------------------------------------------------')\n if verbose:\n logger.setLevel(logging.INFO)\n if not end_month:\n previous_month = date.today().replace(day=1) - timedelta(days=1)\n end_month = previous_month.strftime(\"%Y/%m\")\n if not start_month:\n _end_date = datetime.strptime(f\"{end_month}/01\", \"%Y/%m/01\")\n _start_date = _end_date - relativedelta(months=11)\n start_month = _start_date.strftime(\"%Y/%m\")\n\n tiers = tiers.split(\",\")\n skims = skims.split(\",\")\n remove = remove.split(\",\")\n start_date = f\"{start_month}/01\"\n if test:\n # If test, give start_month as 1 month ago in bash script, this will handle the rest\n start_date = f\"{start_month}/27\"\n\n # The query to the data exclude the last day,\n # so we will query to the first day of the next month\n _end_date = datetime.strptime(f\"{end_month}/01\", \"%Y/%m/01\") + relativedelta(months=1)\n end_date = _end_date.strftime(\"%Y/%m/%d\")\n # load rc param attributes from a given file\n if attributes:\n with open(attributes, 'r') as istream:\n attributes = json.load(istream)\n # always generate pdf by default in addition to given output format\n output_formats = [\"pdf\"]\n if output_format != \"pdf\":\n output_formats.append(output_format)\n image_file_names = event_count_plot(\n start_date,\n end_date,\n output_folder,\n output_formats,\n tiers,\n remove,\n skims,\n colors_file=colors_file,\n generate_csv=generate_csv,\n only_valid_files=only_valid_files,\n attributes=attributes,\n verbose=verbose,\n )\n for filename in image_file_names:\n print(filename)\n # Write filename to some generic file which will be written by bash script to create symbolic link\n with open(os.path.join(output_folder, output_format + \"_output_path_for_ln.txt\"), \"w+\") as f:\n f.write(filename)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dmwm/CMSSpark","sub_path":"src/python/CMSSpark/dbs_event_count_plot.py","file_name":"dbs_event_count_plot.py","file_ext":"py","file_size_in_byte":15333,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"16108946128","text":"\n\"\"\"\nSet up the plot figures, axes, and items to be done for each frame.\n\nThis module is imported by the plotting routines and then the\nfunction setplot is called to set the plot parameters.\n\n\"\"\"\n\n\n#--------------------------\ndef setplot(plotdata):\n#--------------------------\n \"\"\" \n Specify what is to be plotted at each frame.\n Input: plotdata, an instance of pyclaw.data.ClawPlotData.\n Output: a modified version of plotdata.\n \"\"\"\n\n from pyclaw.plotters.frametools import var_limits\n\n\n plimits = [-1., 1.]\n ulimits = [-1., 1.]\n xlimits = 'auto' # choose automatically\n\n\n # Pressure:\n # ---------\n \n plotfigure = plotdata.new_plotfigure(name='Pressure', figno=1)\n plotaxes = plotfigure.new_plotaxes(name='Pressure')\n plotaxes.axescmd = 'subplot(1,1,1)' \n plotaxes.xlimits = xlimits\n plotaxes.ylimits = plimits\n plotitem = plotaxes.new_plotitem(name='Pressure',plot_type='1d')\n plotitem.plot_var = 0 # q[0] is the pressure\n plotitem.plotstyle = '-'\n plotitem.color = 'b'\n\n\n # Velocity:\n # ---------\n plotfigure = plotdata.new_plotfigure(name='Velocity', figno=2)\n plotaxes = plotfigure.new_plotaxes(name='Velocity')\n plotaxes.axescmd = 'subplot(1,1,1)' \n plotaxes.xlimits = xlimits\n plotaxes.ylimits = ulimits\n plotitem = plotaxes.new_plotitem(name='Velocity',plot_type='1d')\n plotitem.plot_var = 1 # q[1] is the velocity\n plotitem.plotstyle = '-'\n plotitem.color = 'b'\n\n\n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via pyclaw.plotters.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n\n return plotdata\n\n","repo_name":"geoflows/D-Claw","sub_path":"book/chap3/acousimple/setplot.py","file_name":"setplot.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"11038236945","text":"from tensorflow.keras import Model\nfrom tensorflow.keras.applications import resnet50\nfrom tensorflow.keras.layers import Input, Dense, GlobalAveragePooling2D, Dropout\n\ndef build_model(input_shape, output_shape):\n pretrained_model = resnet50.ResNet50(\n weights='imagenet',\n include_top=False\n )\n x = pretrained_model.output\n x = GlobalAveragePooling2D()(x)\n x = Dense(2048, activation='relu')(x)\n x = Dropout(0.5)(x)\n outputs = Dense(output_shape, activation='softmax')(x)\n model = Model(inputs=pretrained_model.input, outputs=outputs)\n return model\n","repo_name":"david-riser/33k-images","sub_path":"supervised/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4800882841","text":"#!/usr/bin/env python\n\n# this is a shameful comment\n# another shameful comment\n# working on workspace beta\n\n#import needed libraries\nimport zipfile\nimport sys\nimport os\nimport logging\n\nlogging.basicConfig(filename=\"file_ex.log\", level = logging.DEBUG)\n\nlogging.info(\"checking to see if the backup.zip file exists\")\n\nif os.path.exists(\"backups.zip\"):\n logging.info(\"it exists!\")\n try:\n # use zipfile to open file instead of os.file\n zip_file = zipfile.ZipFile(\"backup.zip\",'a')\n except:\n err = sys.exe_info()\n logging.error(\"Unable to open backup.zip in append mode\")\n logging.error(\"Error Num:\" + str(err[1].args[0]))\n logging.error(\"Error Msg: \" + err[1].args[1])\n sys.exit()\nelse:\n logging.info(\"creating backup.zip\")\n try:\n zip_file = zipfile.ZipFile(\"backup.zip\", \"w\")\n except:\n err = sys.exe_info()\n logging.error(\"Unable to create backup.zip in write mode\")\n logging.error(\"Error Num:\" + str(err[1].args[0]))\n logging.error(\"Error Msg: \" + err[1].args[1])\n sys.exit()\n\nlogging.info(\"adding text.txt to backup.zip\")\n\ntry:\n zip_file.write('test.txt','test', zipfile.ZIP_DEFLATED)\n\nexcept:\n err = sys.exe_info()\n logging.error(\"Unable to add test to backups.zip\")\n logging.error(\"Error Num:\" + str(err[1].args[0]))\n logging.error(\"Error Msg: \" + err[1].args[1])\n\nzip_file.close()\n","repo_name":"triibal/workspace","sub_path":"archiving.py","file_name":"archiving.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6731321077","text":"import binascii\nimport hashlib\nimport struct\nfrom functools import reduce\nimport unittest\nimport test_package\n\"\"\"\nreplace bit\n\"\"\"\n\n\nclass CryptoTest(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_addtion(self):\n result = 2 + 3\n self.assertEqual(result, 5)\n\n def test_reduce_fun(self):\n ll = [1, 20, 3, 4]\n r_sum = reduce(lambda x, y: x + y, ll)\n print()\n print(r_sum)\n self.assertEqual(r_sum, 28)\n assert r_sum == 28\n r_max = reduce(lambda x, y: x if x > y else y, ll)\n print(r_max)\n self.assertEqual(r_max, 20)\n\n def test_package(self):\n aa = 2\n bb = 3\n ssum = test_package.p_add(aa, bb)\n self.assertEqual(ssum, 5)\n\n\ndef st_lib(self):\n ll = ['qq', 'ww', 'ee']\n p_k = list(map(lambda k: k + '2', ll))\n print(p_k)\n\n d = {'a': 'sx', 'bt': 'hz'}\n ks = d.keys()\n\n r_k = list(map(lambda k: k + d[k], filter(lambda k: k != 'a', ks)))\n print(r_k)\n\n\ndef play_struct():\n with open('xxx.zip', 'rb') as f:\n data = f.read()\n\n start = 0\n for i in range(3): # show the first 3 file headers\n start += 14\n fields = struct.unpack('> 1)\n challenge_bits = challenge_bits >> 1\n print(f'new initial is {bin(initial_value)}')\n print(f'new challenge_bits is {bin(challenge_bits)}')\n\n b24 = (initial_value & 0x800000) >> 23\n b21 = (initial_value & 0x100000) >> 20\n b16 = (initial_value & 0x8000) >> 15\n b13 = (initial_value & 0x1000) >> 12\n b6 = (initial_value & 0x20) >> 5\n b4 = (initial_value & 0x8) >> 3\n\n print(f'b24 is {b24}, b21 is {b21}, b16 is {b16}, b13 is {b13}, b6 is {b6}, b4 is {b4}')\n\n c21 = b24 ^ b21\n c16 = b24 ^ b16\n c13 = b24 ^ b13\n c6 = b24 ^ b6\n c4 = b24 ^ b4\n\n print(f'c21 is {c21}, c16 is {c16}, c13 is {c13}, c6 is {c6}, c4 is {c4}')\n\n initial_value = initial_value & ~(1 << 20) | (c21 << 20)\n initial_value = initial_value & ~(1 << 15) | (c16 << 15)\n\n initial_value = initial_value & ~(1 << 12) | (c13 << 12)\n\n initial_value = initial_value & ~(1 << 5) | (c6 << 5)\n initial_value = initial_value & ~(1 << 3) | (c4 << 3)\n\n print(f'initial_value is {bin(initial_value)}')\n\n print(f'initial_value is {hex(initial_value)}')\n\n r1 = (initial_value & 0xFF0) >> 4\n r2 = ((initial_value & 0xF000) >> 8) + ((initial_value & 0xF00000) >> 20)\n r3 = ((initial_value & 0xF) << 4) + ((initial_value & 0xF0000) >> 16)\n\n return r1, r2, r3\n\n\nif __name__ == '__main__':\n # play_hexlify()\n # play_convert_str2hex()\n # play_convert_str2bin()\n\n # res1, res2, res3 = play_security_access(0x43BB42AA4164F91A) # 0xb6 0xf4 0xcf\n # print(hex(res1), hex(res2), hex(res3))\n\n # res1, res2, res3 = play_security_access(0x43BB42AA418A964E) # 0x81 0x4b 0xa6\n # print(hex(res1), hex(res2), hex(res3))\n\n # res1, res2, res3 = play_security_access(0x5e4d3c2b1a64f91a) # 0xa1 0x1c 0xa8\n # print(hex(res1), hex(res2), hex(res3))\n unittest.main()\n","repo_name":"Horatio123/python-knowledge","sub_path":"crypto/crypto_test.py","file_name":"crypto_test.py","file_ext":"py","file_size_in_byte":6039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74103326346","text":"\"\"\"\nSmarch_opt - parallelized random sampling of propositional formula solutions\n\"\"\"\n\n\nimport random\nfrom subprocess import getoutput\nimport pycosat\nimport os\nimport shutil\nimport time\nimport sys\nimport getopt\nimport math\n\nimport multiprocessing\n\n\nsrcdir = os.path.dirname(os.path.abspath(__file__))\nSHARPSAT = srcdir + '/sharpSAT/build/Release/sharpSAT'\nMARCH = srcdir + '/CnC/march_cu/march_cu'\n\nDEBUG = False\n\n\ndef read_dimacs(dimacsfile_):\n \"\"\"parse variables and clauses from a dimacs file\"\"\"\n\n _features = list()\n _clauses = list()\n _vcount = '-1' # required for variables without names\n\n with open(dimacsfile_) as df:\n for _line in df:\n # read variables in comments\n if _line.startswith(\"c\"):\n _line = _line[0:len(_line) - 1]\n _feature = _line.split(\" \", 4)\n del _feature[0]\n _feature[0] = int(_feature[0])\n _features.append(tuple(_feature))\n\n # read dimacs properties\n elif _line.startswith(\"p\"):\n info = _line.split()\n _vcount = info[2]\n\n # read clauses\n else:\n info = _line.split()\n if len(info) != 0:\n _clauses.append(list(map(int, info[:len(info)-1])))\n\n return _features, _clauses, _vcount\n\n\ndef read_constraints(constfile_, features_):\n \"\"\"read constraint file. - means negation\"\"\"\n\n _const = list()\n\n if os.path.exists(constfile_):\n names = [i[1] for i in features_]\n with open(constfile_) as file:\n for _line in file:\n _line = _line.rstrip()\n data = _line.split()\n if len(data) != 0:\n clause = list()\n\n error = False\n for name in data:\n prefix = 1\n if name.startswith('-'):\n name = name[1:len(name)]\n prefix = -1\n\n if name in names:\n i = names.index(name)\n clause.append(features_[i][0] * prefix)\n else:\n error = True\n clause.append(name)\n\n if not error:\n _const.append(clause)\n print(\"Added constraint: \" + _line + \" \" + str(clause))\n else:\n print(\"Feature not found\" + str(clause))\n else:\n print(\"Constraint file not found\")\n\n return _const\n\n\ndef get_var(flist_, features_):\n \"\"\"convert feature names into variables\"\"\"\n\n _const = list()\n names = [i[1] for i in features_]\n\n for feature in flist_:\n prefix = 1\n if feature.startswith('-'):\n feature = feature[1:len(feature)]\n prefix = -1\n\n # filter features that does not exist\n if feature in names:\n i = names.index(feature)\n _const.append(prefix * features_[i][0])\n\n return _const\n\n\ndef gen_dimacs(vars_, clauses_, constraints_, outfile_):\n \"\"\"generate a dimacs file from given clauses and constraints\"\"\"\n\n _df = open(outfile_, 'w')\n _df.write('p cnf ' + vars_ + ' ' + str(len(clauses_) + len(constraints_)) + '\\n')\n\n for cl in clauses_:\n _df.write(\" \".join(str(x) for x in cl) + ' 0 \\n')\n\n for ct in constraints_:\n if isinstance(ct, (list,)):\n _line = \"\"\n for _v in ct:\n _line = _line + str(_v) + \" \"\n _df.write(_line + '0 \\n')\n else:\n _df.write(str(ct) + ' 0 \\n')\n\n _df.close()\n\n\ndef checksat(dimacs_, constraints_):\n \"\"\"check satisfiability of given formula with constraints\"\"\"\n _features, _clauses, _vcount = read_dimacs(dimacs_)\n cnf = _clauses + constraints_\n _sol = pycosat.solve(cnf)\n\n if _sol == 'UNSAT':\n return False\n else:\n return True\n\n\ndef count(dimacs_, constraints_):\n \"\"\"count dimacs solutions with given constraints\"\"\"\n\n _tempdimacs = os.path.dirname(dimacs_) + '/count.dimacs'\n _features, _clauses, _vcount = read_dimacs(dimacs_)\n\n gen_dimacs(_vcount, _clauses, constraints_, _tempdimacs)\n res = int(getoutput(SHARPSAT + ' -q ' + _tempdimacs))\n\n return res\n\n\n# partition space by cubes and count number of solutions for each cube\ndef count_cc(assigned_, vcount_, clauses_, wdir_, processes_):\n \"\"\" count the number of solutions with cube and conquer \"\"\"\n _total = 0\n _counts = list()\n _cubes = list()\n _freevar = list()\n _dimacsfile = wdir_ + '/dimacs.smarch'\n _cubefile = wdir_ + '/cubes.smarch'\n\n def count_mp(q_, cubes_):\n _sol = list()\n\n pid = os.getpid()\n _tempdimacs = wdir_ + '/' + str(pid) + '/count.dimacs'\n if not os.path.exists(wdir_ + '/' + str(pid)):\n os.makedirs(wdir_ + '/' + str(pid))\n\n for cube in cubes_:\n gen_dimacs(vcount_, clauses_, cube + assigned_, _tempdimacs)\n cres = int(getoutput(SHARPSAT + ' -q ' + _tempdimacs))\n\n if DEBUG:\n print(str(pid) + \":\" + str(cres))\n\n _sol.append(cres)\n\n q_.put([cubes_, _sol])\n\n shutil.rmtree(wdir_ + '/' + str(pid))\n\n # create dimacs file regarding constraints\n gen_dimacs(vcount_, clauses_, assigned_, _dimacsfile)\n\n if not checksat(_dimacsfile, []):\n print(\"formula invalid\")\n exit(1)\n\n # execute march to get cubes\n res = getoutput(MARCH + ' ' + _dimacsfile + ' -d 5 -#')\n _out = res.split(\"\\n\")\n\n # print march result (debugging purpose)\n if DEBUG:\n print(_out)\n\n _allfree = False\n\n for _line in _out:\n if _line.startswith(\"c free\"):\n _freevar = _line.split(\": \")[1].split()\n elif _line.startswith('c all'):\n _allfree = True\n elif _line.startswith('a'):\n _cube = list(_line.split())\n _cube = _cube[1:len(_cube)-1]\n _cubes.append(_cube)\n\n # double check all variables are free\n if _allfree:\n _check = int(getoutput(SHARPSAT + ' -q ' + _dimacsfile))\n if _check != 2 ** (len(_freevar)):\n _freevar.clear()\n _allfree = False\n else:\n _total = _check\n\n if not _allfree:\n # with open(_cubefile) as cf:\n # for _line in cf:\n # _cube = list(_line.split())\n # if 'a' in _cube:\n # _cube.remove('a')\n # if '0' in _cube:\n # _cube.remove('0')\n #\n # _cubes.append(_cube)\n _freevar.clear()\n\n if len(_cubes) == 0:\n # execute sharpSAT to count solutions\n gen_dimacs(vcount_, clauses_, assigned_, _dimacsfile)\n _total = int(getoutput(SHARPSAT + ' -q ' + _dimacsfile))\n else:\n\n # count in parallel\n if processes_ > 1:\n # partition random numbers for each thread\n chunk = math.ceil(len(_cubes) / processes_)\n pnum = math.ceil(len(_cubes) / chunk)\n\n clist = list()\n for i in range(0, pnum):\n clist.append(_cubes[i * chunk: (i + 1) * chunk])\n\n # run sampling processes\n _samples = list()\n with multiprocessing.Manager() as manager:\n q = manager.Queue()\n plist = list()\n\n # create processes\n for i in range(0, len(clist)):\n plist.append(\n multiprocessing.Process(target=count_mp,\n args=(q, clist[i])))\n\n # start processes\n for p in plist:\n p.start()\n\n # wait until processes are finished\n for p in plist:\n p.join()\n\n # gather samples\n _cubes.clear()\n while not q.empty():\n pres = q.get()\n\n _cubes.extend(pres[0])\n _counts.extend(pres[1])\n\n for c in _counts:\n _total += c\n\n # cont without parallelism\n else:\n for _cube in _cubes:\n gen_dimacs(vcount_, clauses_, assigned_ + _cube, _dimacsfile)\n res = int(getoutput(SHARPSAT + ' -q ' + _dimacsfile))\n\n if DEBUG:\n print(res)\n\n _total += res\n _counts.append(res)\n\n return _freevar, _counts, _cubes, _total, _allfree\n\n\ndef master(vcount_, clauses_, n_, wdir_, const_=(), threads_=1, quiet_=False):\n \"\"\"generate random numbers and manage sampling processes\"\"\"\n\n # generate n random numbers for sampling\n def get_random(rcount_, total_):\n def gen_random():\n while True:\n yield random.randrange(1, total_ + 1, 1)\n\n def gen_n_unique(source, n__):\n seen = set()\n seenadd = seen.add\n for i in (i for i in source() if i not in seen and not seenadd(i)):\n yield i\n if len(seen) == n__:\n break\n\n return [i for i in gen_n_unique(gen_random, min(rcount_, int(total_)))]\n\n clauses_ = clauses_ + const_\n\n if not quiet_:\n print(\"Counting - \", end='', flush=True)\n\n count_time = time.time()\n ccres = count_cc([], vcount_, clauses_, wdir_, threads_)\n\n if not quiet_:\n print(\"Total configurations: \" + str(ccres[3]))\n print(\"Counting time: \" + str(time.time() - count_time))\n\n # prevent oversampling\n if ccres[3] < n_:\n n_ = ccres[3]\n\n # generate random numbers\n rands = get_random(n_, ccres[3])\n rands.sort()\n\n # partition random numbers for each thread\n chunk = math.ceil(n_ / threads_)\n pnum = math.ceil(n_/chunk)\n\n rlist = list()\n for i in range(0, pnum):\n rlist.append(rands[i*chunk: (i+1)*chunk])\n\n # run sampling processes\n _samples = list()\n with multiprocessing.Manager() as manager:\n q = manager.Queue()\n plist = list()\n\n # create processes\n for i in range(0, len(rlist)):\n plist.append(\n multiprocessing.Process(target=sample,\n args=(q, vcount_, clauses_, rlist[i], wdir_, ccres, quiet_)))\n\n # start processes\n for p in plist:\n p.start()\n\n # wait until processes are finished\n for p in plist:\n p.join()\n\n # gather samples\n while not q.empty():\n _samples.append(q.get())\n # sset = q.get()\n # for s in sset:\n # samples.append(s)\n\n return _samples\n\n\ndef sample(q, vcount_, clauses_, rands_, wdir_, ccres_, quiet_=False):\n \"\"\"sample configurations\"\"\"\n # create folder for file IO of this process\n pid = os.getpid()\n _wdir = wdir_ + \"/\" + str(pid)\n if not os.path.exists(_wdir):\n os.makedirs(_wdir)\n\n cache_b = dict()\n cache_c = dict()\n\n # select a cube based on given random number\n def select_cube(counts_, cubes_, number_):\n _terminate = False\n _index = -1\n _i = 0\n\n for c in counts_:\n if number_ <= c:\n _index = _i\n if c == 1:\n _terminate = True\n break\n else:\n number_ -= c\n _i += 1\n\n if _index < 0:\n print(\"ERROR: No cube selected\")\n exit(1)\n\n return cubes_[_index], number_, _terminate\n\n # assign free variables without recursion\n def set_freevar(fv_, number_):\n _vars = list()\n\n for _v in fv_:\n if number_ % 2 == 1:\n _vars.append(_v)\n else:\n _vars.append('-'+_v)\n number_ //= 2\n\n return _vars\n\n # partition space by cubes and count number of solutions for each cube\n def traverse(assigned_, r_):\n _cubes = list()\n _freevar = list()\n _selected = list()\n _terminate = False\n _dimacsfile = _wdir + '/dimacs.smarch'\n _cubefile = _wdir + '/cubes.smarch'\n cube_time = 0\n _allfree = False\n\n # get list of cubes\n if tuple(assigned_) in cache_b:\n _cubes = cache_b[tuple(assigned_)][0]\n _freevar = cache_b[tuple(assigned_)][1]\n _allfree = cache_b[tuple(assigned_)][2]\n else:\n # create dimacs file regarding constraints\n gen_dimacs(vcount_, clauses_, assigned_, _dimacsfile)\n _temp = int(getoutput(SHARPSAT + ' -q ' + _dimacsfile))\n\n # execute march to get cubes\n cube_time = time.time()\n res = getoutput(MARCH + ' ' + _dimacsfile + ' -d 2 -#')\n _out = res.split(\"\\n\")\n cube_time = time.time() - cube_time\n\n for _line in _out:\n if _line.startswith(\"c free\"):\n _freevar = _line.split(\": \")[1].split()\n elif _line.startswith('c all'):\n _allfree = True\n elif _line.startswith('a'):\n _cube = list(_line.split())\n _cube = _cube[1:len(_cube) - 1]\n _cubes.append(_cube)\n\n # print march result (debugging purpose)\n if DEBUG:\n print(_out)\n print(_freevar)\n\n # double check all variables are free\n if _allfree:\n check = int((getoutput(SHARPSAT + ' -q ' + _dimacsfile)))\n if check != 2 ** (len(_freevar)):\n _freevar.clear()\n _allfree = False\n\n # select a cube with counting\n if not _allfree:\n # with open(_cubefile) as cf:\n # for _line in cf:\n # _cube = list(_line.split())\n # if 'a' in _cube:\n # _cube.remove('a')\n # if '0' in _cube:\n # _cube.remove('0')\n #\n # _cubes.append(_cube)\n _freevar.clear()\n # print cubes (debugging purpose)\n if DEBUG:\n print(_cubes)\n print(\"r:\" + str(r_))\n\n # execute sharpSAT to count solutions and select partition\n _count = 0\n\n for i in range(0, len(_cubes)):\n # reuse count if cached\n if tuple(assigned_ + _cubes[i]) in cache_c:\n _count = cache_c[tuple(assigned_ + _cubes[i])]\n else:\n # count size of partition\n count_time = time.time()\n gen_dimacs(vcount_, clauses_, assigned_ + _cubes[i], _dimacsfile)\n\n _count = int(getoutput(SHARPSAT + ' -q ' + _dimacsfile))\n\n # print count (debugging purpose)\n if DEBUG:\n print(str(i) + \":\" + str(_count))\n\n # cache count data if sharpSAT runtime exceeds 0.02 seconds\n if time.time() - count_time > 0.05:\n cache_c[tuple(assigned_ + _cubes[i])] = _count\n\n # get selected cube\n if r_ <= _count:\n _selected = _cubes[i].copy()\n break\n else:\n if i == (len(_cubes) - 2):\n _selected = _cubes[i+1].copy()\n r_ = r_ - _count\n break\n else:\n r_ = r_ - _count\n\n # 1 solution left: sampling done\n if _count == 1:\n _terminate = True\n\n # cache cube data if sharpSAT runtime exceeds 0.02 seconds\n if cube_time > 0.05:\n cache_b[tuple(assigned_)] = (_cubes, _freevar, _allfree)\n\n # if len(_selected) == 0 and not _allfree:\n # print(_temp)\n\n return _selected, r_, _freevar, _allfree, _terminate\n\n # sample for each random number\n i = 1\n _sample = list()\n\n for r in rands_:\n\n sample_time = time.time()\n\n # initialize variables\n number = r\n assigned = list()\n\n if ccres_[4]: # all variables free, sampling done\n assigned = assigned + set_freevar(ccres_[0], int(number))\n terminate = True\n elif ccres_[3] == 1:\n terminate = True\n else: # select cube to recurse\n cube, number, terminate = select_cube(ccres_[1], ccres_[2], number)\n assigned = assigned + cube\n\n if len(cube) == 0:\n print(\"ERROR: cube not selected\", flush=True)\n exit(1)\n\n k = 0\n # recurse\n while not terminate:\n cube, number, freevar, allfree, terminate = traverse(assigned, number)\n\n if terminate:\n assigned = assigned + cube\n elif allfree: # all variables free, sampling done\n assigned = assigned + set_freevar(freevar, int(number))\n terminate = True\n else: # select cube to recurse\n assigned = assigned + cube\n\n if len(cube) == 0:\n print(\"ERROR: cube not selected: \" + str(len(freevar)), flush=True)\n exit(1)\n\n k += 1\n\n # verify if sample is valid and assign dead variables using pycosat\n assigned = list(map(int, assigned))\n aclause = [assigned[i:i+1] for i in range(0, len(assigned))]\n cnf = clauses_ + aclause\n _sol = pycosat.solve(cnf)\n\n if _sol == 'UNSAT':\n print(\"ERROR: Sample Invalid\", flush=True)\n exit(1)\n else:\n # _sample.append(s)\n q.put(_sol)\n\n if not quiet_:\n print(str(pid) + \": Sampled \" + str(i) + \" with \" + str(r) + \" - \", end='')\n print(\"sampling time: \" + str(time.time() - sample_time), flush=True)\n i += 1\n\n # q.put(_sample)\n shutil.rmtree(_wdir)\n\n return\n\n\nif __name__ == \"__main__\":\n # test = True\n # if test:\n # # test script\n # n = 100\n # target = \"fiasco_17_10\"\n #\n # dimacs = srcdir + \"/FeatureModel/\" + target + \".dimacs\"\n # constfile = os.path.dirname(dimacs) + \"/constraints.txt\"\n # wdir = os.path.dirname(dimacs) + \"/smarch\"\n #\n # features, clauses, vcount = read_dimacs(dimacs)\n # const = read_constraints(constfile, features)\n #\n # start_time = time.time()\n # samples = master(vcount, clauses, n, wdir, const, 7, False)\n # print(\"--- total time: %s seconds ---\" % (time.time() - start_time))\n #\n # sys.exit(0)\n\n # run script\n # get external location for sharpSAT and march if needed\n if os.path.exists(srcdir + \"/links.txt\"):\n with open(srcdir + \"/links.txt\") as f:\n for line in f:\n link = list(line.split('='))\n if len(link) != 0 and link[0][0] != '#':\n if link[0] == \"SHARPSAT\":\n SHARPSAT = link[1]\n elif link[0] == \"MARCH\":\n MARCH = link[1]\n\n # check sharpSAT and march_cu existence\n if not os.path.exists(SHARPSAT):\n print(\"ERROR: sharpSAT not found\")\n\n if not os.path.exists(MARCH):\n print(\"ERROR: March solver not found\")\n\n # get parameters from console\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hc:o:p:q\", ['help', \"cfile=\", \"odir=\", \"threads=\", 'quiet'])\n except getopt.GetoptError:\n print('smarch.py -c -o -p -q | ')\n sys.exit(2)\n\n if len(args) < 2:\n print('smarch.py -c -o -p -q | ')\n sys.exit(2)\n\n dimacs = args[0]\n n = int(args[1])\n\n print('Input file: ', dimacs)\n print('Number of samples: ', n)\n\n wdir = os.path.dirname(dimacs) + \"/smarch\"\n constfile = ''\n quiet = False\n out = False\n threads = 1\n\n # process parameters\n for opt, arg in opts:\n if opt == '-h':\n print('smarch.py -c -o -p -q | ')\n sys.exit()\n elif opt in (\"-c\", \"--cfile\"):\n constfile = arg\n print(\"Consraint file: \" + constfile)\n elif opt in (\"-o\", \"--odir\"):\n wdir = arg\n out = True\n if not os.path.exists(wdir):\n os.makedirs(wdir)\n print(\"Output directory: \" + wdir)\n elif opt in (\"-p\", \"--threads\"):\n threads = int(arg)\n elif opt in (\"-q\", \"--quiet\"):\n quiet = True\n else:\n print(\"Invalid option: \" + opt)\n\n # create working directory for smarch and CnC\n # create folder for file IO of this process\n if not os.path.exists(wdir):\n os.makedirs(wdir)\n\n # process dimacs file\n features, clauses, vcount = read_dimacs(dimacs)\n const = list()\n if constfile != '':\n read_constraints(constfile, features)\n\n # sample configurations\n start_time = time.time()\n samples = master(vcount, clauses, n, wdir, const, threads, quiet)\n if not quiet:\n print(\"--- total time: %s seconds ---\" % (time.time() - start_time))\n\n # output samples to a file\n base = os.path.basename(dimacs)\n target = os.path.splitext(base)[0]\n samplefile = wdir + \"/\" + target + \"_\" + str(n) + \".samples\"\n\n if out:\n of = open(wdir + \"/\" + target + \"_\" + str(n) + \".samples\", 'w')\n for s in samples:\n for v in s:\n of.write(str(v))\n of.write(\",\")\n of.write(\"\\n\")\n of.close()\n\n print('Output samples created on: ', samplefile)\n","repo_name":"jeho-oh/Smarch","sub_path":"smarch_opt.py","file_name":"smarch_opt.py","file_ext":"py","file_size_in_byte":22065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"69858991626","text":"# run a bunch of models and save them\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nrandom.seed(7)\n\nimport utils as u\nimport torch_utils as tu\nfrom torch_utils import DatasetSpec\n\nfrom models import DNA_Linear_Deep, Kmer_Linear, TINKER_DNA_CNN,DNA_LSTM,DNA_CNNLSTM\n\n\n\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nDATASET_TYPES = [\n DatasetSpec('ohe'),\n DatasetSpec('kmer',k=3),\n DatasetSpec('kmer',k=6),\n]\n\ndef setup_config():\n\n # config = {\n # 'out_dir':'pipe2',\n # #'model_types':['LinearDeep','CNN32','CNN128','Kmer3','Kmer6'],\n # 'model_types':['CNN32','LSTM','CNNLSTM'],\n # 'learning_rates':[0.01,0.001],\n # #'sampler_types': [\"default\", \"rebalanced\"],\n # 'sampler_types': [\"rebalanced\"],\n # 'augmentation': [\n # (\"no\",{}),\n # (\"revslide\",{'stride':50}),\n # #(\"mutation\",{'mutation_rate':0.03}),\n # (\"mutation\",{'mutation_rate':0.1}),\n # ],\n # 'opt_types':['SGD','Adam','Adagrad','AdamW','RMSprop'],\n # 'target_cond':'highCu',\n # 'seq_col':'upstream_region',\n # 'id_col':'locus_tag',\n # 'loss_func':nn.CrossEntropyLoss(),\n # 'loss_label':'Cross Entropy Loss',\n # 'epochs':5000\n # }\n config = {\n 'out_dir':'hyak_test',\n 'model_types':['CNN32'],\n 'learning_rates':[0.01,0.001],\n 'sampler_types': [\"rebalanced\"],\n 'augmentation': [(\"no\",{})],\n 'opt_types':['SGD','Adam'],\n 'target_cond':'highCu',\n 'seq_col':'upstream_region',\n 'id_col':'locus_tag',\n 'loss_func':nn.CrossEntropyLoss(),\n 'loss_label':'Cross Entropy Loss',\n 'epochs':5000\n }\n\n return config\n\ndef get_model_choice(choice,seq_len):\n # LINEAR\n if choice == 'LinearDeep':\n lin_d = DNA_Linear_Deep(\n seq_len,\n h0_size=100,\n h1_size=100,\n )\n lin_d.to(DEVICE)\n return lin_d\n\n # CNN 32 filt\n elif choice == \"CNN32\":\n cnn = TINKER_DNA_CNN(\n seq_len,\n num_filters0=32,\n num_filters1=32,\n kernel_size0=8,\n kernel_size1=8,\n conv_pool_size0=3,\n fc_node_num0=10,\n fc_node_num1=10\n )\n cnn.to(DEVICE)\n return cnn\n\n # CNN 128 filt\n elif choice == \"CNN128\":\n cnn = TINKER_DNA_CNN(\n seq_len,\n num_filters0=128,\n num_filters1=32,\n kernel_size0=8,\n kernel_size1=8,\n conv_pool_size0=3,\n fc_node_num0=10,\n fc_node_num1=10\n )\n cnn.to(DEVICE)\n return cnn\n\n # LSTM\n elif choice == \"LSTM\":\n lstm = DNA_LSTM(\n seq_len,\n DEVICE,\n hidden_dim=100\n )\n lstm.to(DEVICE)\n return lstm\n\n # CNN-LSTM\n elif choice == \"CNNLSTM\":\n cnnlstm = DNA_CNNLSTM(\n seq_len,\n DEVICE,\n hidden_dim=100,\n num_filters=32,\n kernel_size=8\n )\n cnnlstm.to(DEVICE)\n return cnnlstm\n\n # Kmer 3\n elif choice == \"Kmer3\":\n kmer = Kmer_Linear(\n 64, # 4^3\n h1_size=100,\n h2_size=10,\n )\n kmer.to(DEVICE)\n return kmer\n\n # Kmer 6\n elif choice == \"Kmer6\":\n kmer = Kmer_Linear(\n 4096, # 4^6\n h1_size=1000,\n h2_size=10,\n )\n kmer.to(DEVICE)\n return kmer\n\n else:\n raise ValueError(f\"{choice} model choice not recognized. Options are: LinearDeep, CNN32, Kmer3, Kmer6\")\n\ndef get_augmentation_choice(choice,args,train_df,loc2flankseq):\n # No Augmentation\n if choice == 'no':\n aug_str = 'no_aug'\n return train_df,aug_str\n\n # revcomp slide\n elif choice == 'revslide':\n temp = u.augment_revcomp(train_df)\n temp = u.augment_slide(temp,300,loc2flankseq,s=args['stride'])\n aug_str = f\"revslide{args['stride']}\"\n return temp,aug_str\n\n # mutation \n elif choice == 'mutation':\n temp = u.augment_mutate(train_df,10,mutation_rate=args['mutation_rate'])\n aug_str = f\"mutation{args['mutation_rate']}\"\n return temp,aug_str\n\n else:\n raise ValueError(f\"{choice} data augmentation choice not recognized. Options are: no, revslide, mutation\")\n\ndef get_sampler_choice(choice,train_df,reg):\n # Default\n if choice == 'default':\n return None,True\n\n # rebalanced\n elif choice == 'rebalanced':\n sampler = tu.make_weighted_sampler(train_df,reg)\n return sampler, False\n\n else:\n raise ValueError(f\"{choice} sampler choice not recognized. Options are: default, rebalanced\")\n\ndef get_opt_choice(opt_str, model, lr):\n '''\n Given a string indicating an optimizer\n '''\n if opt_str == \"SGD\":\n return torch.optim.SGD(model.parameters(), lr=lr)\n elif opt_str == \"Adam\":\n return torch.optim.Adam(model.parameters(), lr=lr)\n elif opt_str == \"Adagrad\":\n return torch.optim.Adagrad(model.parameters(), lr=lr)\n elif opt_str == \"AdamW\":\n return torch.optim.AdamW(model.parameters(), lr=lr)\n elif opt_str == \"RMSprop\":\n return torch.optim.RMSprop(model.parameters(), lr=lr)\n else:\n raise ValueError(f\"{choice} optimzer choice not recognized. Options are: SGD, Adam, Adagrad, AdamW, RMSprop\")\n\n\ndef filter_inactive_genes(df, tpm_thresh):\n # list of relevant condition names\n with open(\"data/conditions_to_include.txt\",'r') as f:\n conds = [x.strip() for x in f.readlines()]\n\n # df with actual TPM counts\n data_filename = \"data/XY_TPM_opFilt.tsv\"\n tpm_df = pd.read_csv(data_filename,sep='\\t')\n\n # collect genes that never express above a given threshold\n no_tx_genes = []\n for i, row, in tpm_df.iterrows():\n tpms = row[conds].values\n if max(tpms) < tpm_thresh:\n no_tx_genes.append(row['locus_tag'])\n \n # return only genes not in \"no transcription genes\"\n return df[~df['locus_tag'].isin(no_tx_genes)].reset_index().drop('index',axis=1)\n \n\ndef main():\n\n config = setup_config()\n target_cond = config['target_cond']\n seq_col = config['seq_col']\n id_col = config['id_col']\n loss_func = config['loss_func']\n loss_label = config['loss_label']\n epochs = config['epochs']\n out_dir = config['out_dir']\n\n if not os.path.isdir(out_dir):\n print(f\"creating dir {out_dir}\")\n os.mkdir(out_dir)\n #raise ValueError(f\"{out_dir} does not exist. Please make it.\")\n\n # locus to gene info\n locus_info_filename = 'data/locus2info.tsv'\n locus_info_df = pd.read_csv(locus_info_filename,sep='\\t')\n locus2info = u.make_info_dict(locus_info_df)\n\n # log ratio data file\n data_filename = \"data/XY_lr_noCu_opFilt.tsv\"\n XYdf_og = pd.read_csv(data_filename,sep='\\t')\n loc2seq = dict([(x,z) for (x,z) in XYdf_og[[id_col,seq_col]].values])\n\n # file with promoter regions with extra flanking sequence\n flank_data_filename = \"data/XY_lr_noCu_opFilt_-400:100.tsv\"\n XYdf_flank = pd.read_csv(flank_data_filename,sep='\\t')\n loc2flankseq = dict([(x,z) for (x,z) in XYdf_flank[[id_col,seq_col]].values])\n\n # filter out genes that never express above 2 tpm\n XYdf = filter_inactive_genes(XYdf_og,2)\n\n # set regulatory class\n reg = tu.set_reg_class_up_down(XYdf,target_cond,thresh=0.6)\n\n # get stratified train/test/val split\n # specs for class partition dict\n cpd = {\n 0: {'train_test':0.8, 'train_val':0.8},\n 1: {'train_test':0.8, 'train_val':0.8},\n 2: {'train_test':0.8, 'train_val':0.8},\n }\n\n full_train_df, \\\n test_df, \\\n train_df, \\\n val_df = tu.stratified_partition(XYdf, cpd, class_col=reg)\n\n # save the dfs to the outdir for future debugging\n train_df.to_csv(f'{out_dir}/train_df.tsv',sep='\\t',index=False)\n val_df.to_csv(f'{out_dir}/val_df.tsv',sep='\\t',index=False)\n test_df.to_csv(f'{out_dir}/test_df.tsv',sep='\\t',index=False)\n\n # print(\"Train\")\n # print(train_df[reg].value_counts())\n # print(\"Val\")\n # print(val_df[reg].value_counts())\n # print(\"Test\")\n # print(test_df[reg].value_counts())\n\n oracle = dict([(a,[b]) for a,b in XYdf[[id_col,reg]].values])\n seq_len = len(train_df[seq_col].values[0])\n\n # ** COLLECT RESULTS **\n res_rows = []\n loss_dict = {}\n \n # progress tracking\n n = len(config['augmentation']) * len(config['sampler_types']) * \\\n len(config['learning_rates']) * len(config['model_types']) * \\\n len(config['opt_types'])\n i = 0\n \n # DATA AUGMENTATION LOOP\n for aug_choice,args in config['augmentation']:\n # augment the train df if needed\n aug_df,aug_str = get_augmentation_choice(aug_choice,args,train_df,loc2flankseq)\n print(f\"Augmentation: {aug_str}\")\n aug_df.to_csv(f'{out_dir}/aug_train_df.tsv',sep='\\t',index=False)\n\n # sampler loop\n for sampler_choice in config['sampler_types']:\n print(f\"\\tSampler: {sampler_choice}\")\n sampler, shuffle = get_sampler_choice(sampler_choice,aug_df,reg)\n\n # learning rate loop\n for lr in config['learning_rates']:\n print(f\"\\t\\tLR: {lr}\")\n dls = tu.build_dataloaders_single(\n aug_df, \n val_df, \n DATASET_TYPES,\n seq_col=seq_col,\n target_col=reg,\n sampler=sampler,\n shuffle=shuffle\n )\n\n # *********************************************\n # Currently hardcoded to make these DataLoaders\n kmer6_train_dl,kmer6_val_dl = dls['kmer_6']\n kmer3_train_dl,kmer3_val_dl = dls['kmer_3']\n ohe_train_dl,ohe_val_dl = dls['ohe']\n # *********************************************\n\n # model type loop\n for model_choice in config['model_types']:\n print(f\"\\t\\t\\tModel: {model_choice}\")\n # result dict for this specific model\n res_dict = {}\n model = get_model_choice(model_choice,seq_len)\n\n if model_choice == \"Kmer3\":\n train_dl, val_dl = dls['kmer_3']\n ds = DatasetSpec('kmer',k=3)\n elif model_choice == \"Kmer6\":\n train_dl, val_dl = dls['kmer_6']\n ds = DatasetSpec('kmer',k=6)\n else: # catches anything not a kmer model with One-hot encoding\n train_dl, val_dl = dls['ohe']\n ds = DatasetSpec('ohe')\n\n # optimizer type loop\n for opt_choice in config['opt_types']:\n print(f\"\\t\\t\\tOptimizer: {opt_choice}\")\n opt = get_opt_choice(opt_choice, model, lr)\n\n\n print(\"\\t\\t\\t\\tTraining...\")\n train_losses, val_losses,estop,best_val_loss = tu.run_model(\n train_dl, \n val_dl, \n model,\n loss_func,\n DEVICE,\n lr=lr,\n epochs=epochs,\n opt=opt\n )\n\n data_label = [((train_losses,val_losses),model_choice,estop,best_val_loss)]\n\n\n # collect model results\n res_dict['train_losses'] = train_losses\n res_dict['val_losses'] = val_losses\n res_dict['estop'] = estop\n res_dict['best_val_loss'] = best_val_loss\n res_dict['data_label'] = data_label\n\n # save model itself\n print(\"\\t\\t\\t\\tSaving model...\")\n lr_str = f\"_lr{lr}\"\n sample_str = f\"_{sampler_choice}Sampler\"\n opt_str = f\"_{opt_choice}\"\n model_base_str = f\"{model_choice}{lr_str}{opt_str}{sample_str}_{aug_str}\"\n model_filename = f\"{model_base_str}.pth\"\n model_path = os.path.join(out_dir,model_filename)\n torch.save(model,model_path)\n\n # save loss data\n res_dict_filename = f\"{model_base_str}_loss_dict.npy\"\n res_dict_path = os.path.join(out_dir,res_dict_filename)\n np.save(res_dict_path, res_dict) \n\n loss_dict[model_base_str] = res_dict\n\n # get confusion data\n print(\"\\t\\t\\t\\tGetting Confusion data...\")\n train_seqs = aug_df[id_col].values\n train_conf_df = tu.get_confusion_data(model, model_choice, ds, train_seqs, oracle,loc2seq,DEVICE)\n tconfdf_fname = os.path.join(out_dir,f\"{model_base_str}_train_conf_df.tsv\")\n train_conf_df.to_csv(tconfdf_fname,sep='\\t',index=False)\n\n val_seqs = val_df[id_col].values\n val_conf_df = tu.get_confusion_data(model, model_choice, ds, val_seqs, oracle,loc2seq,DEVICE)\n vconfdf_fname = os.path.join(out_dir,f\"{model_base_str}_val_conf_df.tsv\")\n val_conf_df.to_csv(vconfdf_fname,sep='\\t',index=False)\n\n # get classification report\n cls_report = tu.cls_report(val_conf_df)\n\n # put into result row\n row = [\n model_base_str,\n model_choice,\n lr,\n opt_choice,\n sampler_choice,\n aug_str,\n estop,\n best_val_loss,\n cls_report['acc'],\n cls_report['mcc'],\n cls_report['mi_p'],\n cls_report['mi_r'],\n cls_report['mi_f1'],\n cls_report['ma_p'],\n cls_report['ma_r'],\n cls_report['ma_f1']\n ]\n res_rows.append(row)\n\n loss_dict[model_base_str] = res_dict\n i+=1\n print(f\"\\t\\t\\t\\tDone with {model_base_str}...\")\n print(f\"{i} of {n}\")\n cols = [\n 'model_desc',\n 'model_type',\n 'lr',\n 'opt',\n 'sampler',\n 'data_aug',\n 'epoch_stop',\n 'best_val_loss',\n 'acc',\n 'mcc',\n 'mi_p',\n 'mi_r',\n 'mi_f1',\n 'ma_p',\n 'ma_r',\n 'ma_f1'\n ]\n res_df = pd.DataFrame(res_rows,columns=cols)\n res_path = os.path.join(out_dir,'res_df.tsv')\n res_df.to_csv(res_path,sep='\\t',index=False)\n\n loss_dict_path = os.path.join(out_dir,'loss_dict.npy')\n np.save(loss_dict_path, loss_dict) \n print(\"Done\")\n\n\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"erinhwilson/deep-5g-sandbox","sub_path":"modeling_script.py","file_name":"modeling_script.py","file_ext":"py","file_size_in_byte":15512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39455934945","text":"import random\n\n\nclass eight_ball(object):\n def __init__(self, question):\n self.question = question\n def answer(self):\n l = ['Yes',\n 'No',]\n n = random.randint(0, len(l)-1)\n print(l[n])\nball8 = eight_ball('Am i cool')\nball8.answer() \n","repo_name":"AndrewW-coder/Coding-class","sub_path":"Homework/8ball_class.py","file_name":"8ball_class.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"27173327701","text":"import json\nimport csv\nimport numpy as np\n\nlabels = ['source', 'review', 'sentiment', 'prediction', 'id']\n\nwith open('all_fake_neg.txt', 'r') as f:\n fake_neg = f.readlines()\nfake_neg_final = []\nfor i, review in enumerate(fake_neg):\n fake_neg_final.append(['mturk', review.strip(), 0, 0, 'murk_n%i' %i])\n\nwith open('all_fake_pos.txt', 'r') as f:\n fake_pos = f.readlines()\nfake_pos_final = []\nfor i, review in enumerate(fake_pos):\n fake_neg_final.append(['mturk', review.strip(), 1, 0, 'murk_p%i' %i])\n\nwith open('all_unlabeled_neg.txt', 'r') as f:\n real_neg = f.readlines()\nreal_neg_final = []\nfor i, review in enumerate(real_neg):\n real_neg_final.append(['trip_advisor', review.strip(), 0, np.nan, 'ta_n%i' %i])\n\nwith open('all_unlabeled_pos.txt', 'r') as f:\n real_pos = f.readlines()\nreal_pos_final = []\nfor i, review in enumerate(real_pos):\n real_pos_final.append(['trip_advisor', review.strip(), 1, np.nan, 'ta_p%i' %i])\n\n\nwith open('expedia_data3.json') as f:\n expedia = json.load(f)\n\nta = []\nwith open('ta_new3.csv', 'r') as f:\n reader = csv.reader(f)\n for i, row in enumerate(reader):\n if \"Room Tip:\" in row[0]:\n review = row[0].split(\"Room Tip:\")\n review = review[0].strip()\n ta.append(['trip_advisor', review, np.nan, np.nan, 'ta_s%i' %i])\n elif \"More\" in row[0]:\n continue\n else:\n review = row[0].strip()\n ta.append(['trip_advisor', review, np.nan, np.nan, 'ta_s%i' %i])\n\n\n# ta = []\n# with open('ta_new3.csv', 'r') as f:\n# reader = csv.reader(f)\n# for row in reader:\n# ta.append(row)\n\n\n\n\nexpedia_final = []\ncounter = 0\nfor i, key in enumerate(expedia.keys()):\n if i != 0:\n counter += 1\n for j, review in enumerate(expedia[key]):\n if j != 0:\n counter += 1\n expedia_final.append(['expedia', review.strip(), np.nan, 1, 'ex%i' %counter])\n\n\nwith open('review_data_ids.csv', 'w') as fout:\n writer = csv.writer(fout)\n writer.writerow(labels)\n for row in fake_neg_final:\n writer.writerow(row)\n for row in fake_pos_final:\n writer.writerow(row)\n for row in real_neg_final:\n writer.writerow(row)\n for row in real_pos_final:\n writer.writerow(row)\n for row in expedia_final:\n writer.writerow(row)\n for row in ta:\n writer.writerow(row)\n","repo_name":"fmelp/fletcher","sub_path":"review_data/format_data.py","file_name":"format_data.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"14548138108","text":"#shift even and odd\ndef shift_text(even_shift, odd_shift):\n message_two = input('enter message')\n print(\"original message: \" + message_two ) \n \n \n for i in range(len(message_two)):\n if (i%2)==0:\n res = ''.join(chr((ord(char) - 97 - even_shift) % 26 + 97 )for char in message_two)\n else: \n res = ''.join(chr((ord(char) - 97 - odd_shift) % 26 + 97 ) for char in message_two)\n return res \n\neven_shift = 5\nodd_shift = 2\nprint(\"string after shift:\" + shift_text(even_shift, odd_shift))\n\n","repo_name":"MatthewJRusso/Lab-One-","sub_path":"LabOnePartThree.py","file_name":"LabOnePartThree.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23791919602","text":"from reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import A4, portrait\nfrom reportlab.lib.units import mm\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.pdfmetrics import registerFont\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.platypus import Table\n\npdfmetrics.registerFont(TTFont('Gothic','/usr/share/fonts/truetype/fonts-japanese-gothic.ttf'))\npsize = portrait(A4)\npwidth, pheight = psize\n\nxmargin = 10.0 * mm\nymargin = 10.0 * mm\nxdd = xmargin\nwdd = 7.2 * mm\nxww = xdd + wdd\nwww = 11.2 * mm\nxin = xww + www\nwin = 27.6 * mm\nxout = xin + win\nwout = 27.6 * mm\nxval = xout + wout\nwval = 27.6 * mm\nxreason = xval + wval\nwreason = pwidth - xmargin - xreason\n\nc = canvas.Canvas(\"test2.pdf\", pagesize=psize, bottomup=False)\nc.setFont('Gothic', 11)\n\ny = ymargin\nh = 4.5 * mm\nw = pwidth - xmargin - xdd\nc.rect(xdd, y, w, h, stroke=1, fill=0)\nc.drawString(xdd+2, y+h-2, 'yyyy年mm月')\nc.drawString(xout+2, y+h-2, '出勤簿')\nc.drawString(xreason+2, y+h-2, 'テスト 太郎')\n\ny = y + h\nc.rect(xdd, y, wdd, h, stroke=1, fill=0)\nc.rect(xww, y, www, h, stroke=1, fill=0)\nc.rect(xin, y, win, h, stroke=1, fill=0)\nc.rect(xout, y, wout, h, stroke=1, fill=0)\nc.rect(xval, y, wval, h, stroke=1, fill=0)\nc.rect(xreason, y, wreason, h, stroke=1, fill=0)\nc.drawString(xdd+2, y+h-2, '日')\nc.drawString(xww+2, y+h-2, '曜日')\nc.drawString(xin+2, y+h-2, '勤務開始時刻')\nc.drawString(xout+2, y+h-2, '勤務終了時刻')\n\n\nc.showPage()\nc.save()\n","repo_name":"abtoc/reportlab-sample","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40937350232","text":"import sys\nimport numpy as np # this goes into the vocaliser file\nimport torch # sane\nfrom scipy.io.wavfile import write #same\n#import soundfile as sf # ogg codec crashes on long files\n\n# set up tachotron and waveglow from torchub\ndef initializeTTSEngine():\n tacotron2 = torch.hub.load('nvidia/DeepLearningExamples:torchhub', 'nvidia_tacotron2')\n # modified this to allow longer sentences, unsure if it did anything at all.\n tacotron2.max_decoder_steps = 3000\n tacotron2 = tacotron2.to('cuda')\n tacotron2.eval()\n\n waveglow = torch.hub.load('nvidia/DeepLearningExamples:torchhub', 'nvidia_waveglow')\n waveglow = waveglow.remove_weightnorm(waveglow)\n waveglow = waveglow.to('cuda')\n waveglow.eval()\n return tacotron2, waveglow\n\n# just stuff i found here https://pytorch.org/hub/nvidia_deeplearningexamples_tacotron2/\n# This does the actual text to speech\ndef vocalise(subsentences, outputFile, outputFormat, speed, intermediaryFormat, tacotron2, waveglow):\n print(\"[+] Starting TTS on \"+outputFile.split(\"/\")[-1])\n rate = 22050\n # load a saved state if any\n audio_numpy, currentSubsentenceIndex = loadVocaliseState(outputFile)\n try:# this is here so we can save before Ctrl+C\n # process each sentence as tacotron2 -> waveglow -> whatever\n from tqdm import tqdm\n for text in tqdm(subsentences[currentSubsentenceIndex:],\n dynamic_ncols=True,\n initial=currentSubsentenceIndex,\n total=len(subsentences)):\n # preprocessing\n sequence = np.array(tacotron2.text_to_sequence(text, ['english_cleaners']))[None, :]\n sequence = torch.from_numpy(sequence).to(device='cuda', dtype=torch.int64)\n\n # run the models\n with torch.no_grad():\n _, mel, _, _ = tacotron2.infer(sequence)\n audio = waveglow.infer(mel)\n audio_numpy = np.concatenate((audio_numpy, audio[0].data.cpu().numpy()))\n\n # save current position in case TTS is interrupted\n currentSubsentenceIndex = currentSubsentenceIndex + 1\n\n # save resulting wav file\n write(outputFile+intermediaryFormat, rate, audio_numpy)\n # these are just alternative ways to write files, none seemed too good\n # sf.write(outputFile+\".wav\",audio_numpy, rate, format=\"ogg\")\n # writeAudio(outputFile+\".wav\",audio_numpy, rate)\n convertFormat(outputFile+intermediaryFormat, outputFormat, speed)\n cleanSaves(outputFile,intermediaryFormat)\n except KeyboardInterrupt:\n saveVocaliseState(outputFile,intermediaryFormat,audio_numpy,currentSubsentenceIndex,rate)\n sys.exit()\n\n\n# Saving and loading progress to disk\n\nimport io\nimport json, zlib, base64\nimport os\ndef saveVocaliseState(outputFile, intermediaryFormat, audio_numpy, currentSubsentenceIndex,rate):\n # saving binary array compressed and b64 json\n memfile = io.BytesIO()\n np.save(memfile, audio_numpy)\n memfile.seek(0)\n saveDict = {\n \"subsentenceIndex\" : currentSubsentenceIndex,\n \"binaryAudio_numpy\" : base64.b64encode(zlib.compress(memfile.read())).decode('ascii')\n }\n\n with open(outputFile+'.sav', 'w') as f:\n json.dump(saveDict, f)\n\n # this means there was a partial TTS, so we also save where we left off\n print(\"\\n[!] TTS interrupted, progress saved to \"+outputFile+\".sav\")\n print(\"[?] To continue from here, set the --output to the folder containing the .sav file\")\n return\n\ndef loadVocaliseState(loadPath):\n loadPath=loadPath+\".sav\"\n if os.path.exists(loadPath):\n with open(loadPath, 'r') as f:\n saveDict = json.load(f)\n memfile = io.BytesIO()\n memfile.write(zlib.decompress(base64.b64decode(saveDict[\"binaryAudio_numpy\"])))\n memfile.seek(0)\n audio_numpy = np.load(memfile)\n print(\"[+] Found saved state at \"+loadPath+\" continuing from subsentence \" + str(saveDict[\"subsentenceIndex\"]))\n return audio_numpy, saveDict[\"subsentenceIndex\"]\n else:\n print(\"[+] Saved state not found at \"+loadPath+\" starting from the beginning\")\n audio_numpy = np.ndarray(1)\n currentSubsentenceIndex = 0\n return audio_numpy, currentSubsentenceIndex\n\ndef cleanSaves(outputFile,intermediaryFormat):\n if os.path.exists(outputFile+\".sav\"):\n os.remove(outputFile+\".sav\")\n if os.path.exists(outputFile+intermediaryFormat+\".part\"):\n os.remove(outputFile+intermediaryFormat+\".part\")\n print(\"[+] Partial files \"+outputFile+\".sav, \"+outputFile+intermediaryFormat+\".part and \"+outputFile+intermediaryFormat+\" removed\")\n\nimport ffmpeg\ndef convertFormat(sourceFile, format, speed):\n sourceSize=os.stat(sourceFile).st_size\n stream = ffmpeg.input(sourceFile)\n stream = stream.audio.filter(\"atempo\", speed) # slow it down by 10%\n stream = ffmpeg.output(stream, os.path.splitext(sourceFile)[0]+format)\n stream = stream.global_args('-loglevel', 'quiet')\n stream = stream.global_args('-y')\n ffmpeg.run(stream)\n convertedSize=os.stat(os.path.splitext(sourceFile)[0]+format).st_size\n os.remove(sourceFile)\n print(\"[+] Converted \"+ sourceFile+ \" (\"+str(round(sourceSize/1024**2,2))+\"MB) to \"+\n os.path.splitext(sourceFile)[0]+format+\" (\"+str(round(convertedSize/1024**2,2))+\n \"MB) and removed source file\" )\n","repo_name":"f-viktor/intzd","sub_path":"tts.py","file_name":"tts.py","file_ext":"py","file_size_in_byte":5425,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"28586621331","text":"import zmq, time, subprocess\r\nfrom multiprocessing import Process\r\n\r\ndef call_is_waiting_for_it():\r\n args = [\"python\",\r\n \"is_waiting_for_it.py\"]\r\n subprocess.call(args)\r\n\r\n\r\ncontext = zmq.Context()\r\nsocket = context.socket(zmq.PUB)\r\nsocket.bind(\"tcp://*:5556\")\r\n\r\nsocket_REP = context.socket(zmq.REP)\r\nsocket_REP.bind(\"tcp://*:5557\")\r\n\r\np = Process(target=call_is_waiting_for_it)\r\np.start()\r\n\r\ntime.sleep(1)\r\nfor i in range(10):\r\n print(f\"Sending {i}\")\r\n socket.send_string(f\"{i}\")\r\n message = socket_REP.recv_string()\r\n print(f\"{i} confirmed!\")\r\n socket_REP.send_string(\"OK!\")\r\n\r\np.join()\r\n\r\n","repo_name":"TheAmazingElys/random_scripts_and_notebooks","sub_path":"examples/zmq/wait_for_it.py","file_name":"wait_for_it.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3593110502","text":"print(\"Algorytm Euklidesa\")\n\na = int(input(\"Wpisz pierwszą liczbę: \"))\nb = int(input(\"Wpisz drugą liczbę: \"))\n\nwhile a != b:\n if a > b:\n a = a - b\n if a < b:\n b = b - a\n if a == b:\n print(\"Największy wspólny dzielnik wynosi: \", a)","repo_name":"arciol/pytong","sub_path":"ALGORYTM EUKLIDESA.py","file_name":"ALGORYTM EUKLIDESA.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36664702264","text":"from sqlalchemy import Column, Integer, String, Float, Boolean, ForeignKey, create_engine, Index\nfrom sqlalchemy.dialects.mysql import DATETIME\nfrom sqlalchemy.orm import relationship, sessionmaker, declarative_base\n\nimport keys\n\nBase = declarative_base()\n\nclass Coin(Base):\n __tablename__ = 'coin'\n\n symbol = Column(String(20), primary_key=True)\n base_currency = Column(String(10))\n quote_currency = Column(String(10))\n\n __table_args__ = (\n Index('symbol', symbol),\n )\n\nclass Currency(Base):\n __tablename__ = 'currency'\n\n symbol = Column(String(10), primary_key=True)\n\n __table_args__ = (\n Index('symbol', symbol),\n )\n\nclass MarketData(Base):\n __tablename__ = 'market_data_binance'\n\n id = Column(Integer, primary_key=True)\n symbol = Column(String(10), ForeignKey('currency.symbol'))\n open_time = Column(DATETIME)\n open = Column(Float)\n high = Column(Float)\n low = Column(Float)\n close = Column(Float)\n volume = Column(Float)\n close_time = Column(DATETIME)\n quote_asset_volume = Column(Float)\n trades = Column(Integer)\n taker_buy_base = Column(Float)\n taker_buy_quote = Column(Float)\n ignore = Column(Float)\n quote_currency = Column(String(10))\n close_adj = Column(Float)\n\n __table_args__ = (\n Index('symbol', symbol, open_time),\n )\n\n\nengine = create_engine(keys.DB_CONNECTION)\n\nBase.metadata.create_all(engine)","repo_name":"sebadlf/crypto-portfolio-strategy","sub_path":"model_binance.py","file_name":"model_binance.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21485284959","text":"from django.http import Http404, HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView\n\nfrom meow_auth.db import get_tenant_id, get_landlord_id\nfrom meow_contracts.db import create_rent_request, get_tenant_rent_requests, get_tenant_rent_contracts, \\\n get_landlord_rent_requests, get_landlord_rent_contracts, delete_rent_contract, start_rent, end_rent, \\\n get_rent_contract, create_rent_contract, delete_rent_request\nfrom meow_contracts.forms import RentRequestForm, RentContractForm\nfrom meow_main.db import has_privilege\nfrom meow_main.views import UserViewMixin\n\n\nclass RentRequestCreateView(UserViewMixin, TemplateView):\n\n template_name = 'meow_contracts/create_or_edit_page.html'\n\n def get(self, request, **kwargs):\n context_data = self.get_context_data(**kwargs)\n token = request.COOKIES.get('token')\n if not has_privilege(token, 'create_rent_request'):\n raise Http404()\n\n context_data['form'] = RentRequestForm()\n return render(request, self.template_name, context=context_data)\n\n def post(self, request, **kwargs):\n form = RentRequestForm(request.POST)\n context = self.get_context_data(**kwargs)\n area_id = kwargs.get('area_id', None)\n tenant_id = get_tenant_id(request.COOKIES['token'])\n if tenant_id is None:\n raise Http404()\n\n if form.is_valid():\n created = create_rent_request(tenant_id, area_id, form.cleaned_data['description'])\n if created:\n return redirect('rent-request-list')\n else:\n pass\n # form.add_error('non_fields_errors', 'Failed to create shopping mall :C')\n\n context['form'] = form\n return render(request, self.template_name, context=context)\n\n\nclass RentRequestListView(UserViewMixin, TemplateView):\n\n template_name = 'meow_contracts/rent_request_list.html'\n\n def get(self, request, **kwargs):\n token = request.COOKIES.get('token', None)\n context = self.get_context_data(**kwargs)\n if context['user_group_name'] == 'tenants':\n tenant_id = get_tenant_id(token)\n if tenant_id < 0:\n raise Http404()\n context['rent_requests'] = get_tenant_rent_requests(tenant_id)\n else:\n landlord_id = get_landlord_id(token)\n if landlord_id < 0:\n raise Http404()\n context['rent_requests'] = get_landlord_rent_requests(landlord_id)\n return render(request, self.template_name, context=context)\n\n\nclass RentContractListView(UserViewMixin, TemplateView):\n\n template_name = 'meow_contracts/rent_contract_list.html'\n\n def get(self, request, **kwargs):\n token = request.COOKIES.get('token', None)\n context = self.get_context_data(**kwargs)\n if context['user_group_name'] == 'tenants':\n tenant_id = get_tenant_id(token)\n if tenant_id < 0:\n raise Http404()\n context['rent_contracts'] = get_tenant_rent_contracts(tenant_id)\n else:\n landlord_id = get_landlord_id(token)\n if landlord_id < 0:\n raise Http404()\n context['rent_contracts'] = get_landlord_rent_contracts(landlord_id)\n return render(request, self.template_name, context=context)\n\n\nclass DeleteRentContractView(UserViewMixin, TemplateView):\n\n def get(self, request, **kwargs):\n token = request.COOKIES.get('token', None)\n contract_id = kwargs.get('contract_id')\n landlord_id = get_landlord_id(token)\n if landlord_id < 0:\n raise Http404()\n result = delete_rent_contract(landlord_id, contract_id)\n if result < 0:\n raise Http404()\n return redirect('rent-contract-list')\n\n\nclass DeleteRentRequestView(UserViewMixin, TemplateView):\n\n def get(self, request, **kwargs):\n tenant_id = kwargs.get('tenant_id')\n area_id = kwargs.get('area_id')\n result = delete_rent_request(tenant_id, area_id)\n if result < 0:\n raise Http404()\n return redirect('rent-request-list')\n\n\nclass StartRentView(UserViewMixin, TemplateView):\n\n def get(self, request, **kwargs):\n token = request.COOKIES.get('token', None)\n contract_id = kwargs.get('contract_id')\n landlord_id = get_landlord_id(token)\n if landlord_id < 0:\n raise Http404()\n result = start_rent(landlord_id, contract_id)\n if result < 0:\n raise Http404()\n return redirect('rent-contract-list')\n\n\nclass EndRentView(UserViewMixin, TemplateView):\n\n def get(self, request, **kwargs):\n token = request.COOKIES.get('token', None)\n contract_id = kwargs.get('contract_id')\n landlord_id = get_landlord_id(token)\n if landlord_id < 0:\n raise Http404()\n result = end_rent(landlord_id, contract_id)\n if result < 0:\n raise Http404()\n return redirect('rent-contract-list')\n\n\nclass RentContractCreateView(UserViewMixin, TemplateView):\n\n template_name = 'meow_contracts/create_or_edit_page.html'\n\n def get(self, request, **kwargs):\n context_data = self.get_context_data(**kwargs)\n token = request.COOKIES.get('token')\n landlord_id = get_landlord_id(token)\n if landlord_id < 0:\n raise Http404()\n if not has_privilege(token, 'create_or_update_contract'):\n raise Http404()\n\n contract_id = kwargs.get('contract_id', None)\n if contract_id is not None:\n contract = get_rent_contract(landlord_id, contract_id)\n if contract is None:\n raise Http404()\n data = {\n 'code': contract[1],\n 'start_date': contract[2],\n 'end_date': contract[3],\n 'price': contract[4],\n 'additional_payment': contract[5],\n 'discount': contract[6],\n 'checking_account': contract[7],\n 'requirements': contract[8],\n }\n context_data['form'] = RentContractForm(data)\n else:\n context_data['form'] = RentContractForm()\n\n return render(request, self.template_name, context=context_data)\n\n def post(self, request, **kwargs):\n token = request.COOKIES.get('token')\n form = RentContractForm(request.POST)\n context = self.get_context_data(**kwargs)\n contract_id = kwargs.get('contract_id', None)\n area_id = kwargs.get('area_id', None)\n tenant_id = kwargs.get('tenant_id', None)\n landlord_id = get_landlord_id(token)\n if landlord_id < 0:\n raise Http404()\n if form.is_valid():\n delete_rent_request(tenant_id, area_id)\n created = create_rent_contract(contract_id, area_id, tenant_id, landlord_id, form.cleaned_data)\n if created:\n return redirect('rent-contract-list')\n else:\n pass\n\n context['form'] = form\n return render(request, self.template_name, context=context)\n","repo_name":"CatWantsMeow/uni","sub_path":"bachelor/mdisubd/project/meow/meow_contracts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"26568058869","text":"import os\n\nfrom filefind.config import load_config\n\n\ndef test_post_process_config():\n config = load_config(['-i', '*.cpp *.h', '--include', '*.hpp'])\n\n assert config.include == ['*.cpp', '*.h', '*.hpp']\n assert config.exclude == []\n assert config.directory == os.path.abspath('.')\n\n\ndef test_include_as_position_arguments():\n config = load_config(['-i', '*.cpp *.h', '*.hpp'])\n\n assert config.include == ['*.cpp', '*.h', '*.hpp']\n","repo_name":"agateau/filefind","sub_path":"tests/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5182825178","text":"import argparse\nimport pickle as pkl\nimport os\nimport numpy as np\n\nfrom IPython import embed\n\n\ndef compute_forgetting_statistics(stats, npresentations):\n presentations_needed_to_learn = {}\n unlearned_per_presentation = {}\n margins_per_presentation = {}\n first_learned = {}\n for example_id, example_stats in stats.items():\n # Forgetting event is a transition in accuracy from 1 to 0\n presentation_acc = np.array(example_stats['acc'][:npresentations])\n transitions = presentation_acc[1:] - presentation_acc[:-1]\n\n # embed()\n\n # Find all presentations when forgetting occurs\n if len(np.where(transitions == -1)[0]) > 0:\n unlearned_per_presentation[example_id] = np.where(\n transitions == -1)[0] + 1\n else:\n unlearned_per_presentation[example_id] = []\n\n # Find number of presentations needed to learn example,\n # e.g. last presentation when acc is 0\n if len(np.where(presentation_acc == 0)[0]) > 0:\n presentations_needed_to_learn[example_id] = np.where(\n presentation_acc == 0)[0][-1] + 1 #index of the last 0 in the acc list +1 (zero means never learnt)\n else:\n presentations_needed_to_learn[example_id] = 0\n\n # Find the misclassication margin for each presentation of the example\n margins_per_presentation[example_id] = np.array(example_stats['margin'][:npresentations])\n\n # Find the presentation at which the example was first learned,\n # e.g. first presentation when acc is 1\n if len(np.where(presentation_acc == 1)[0]) > 0:\n first_learned[example_id] = np.where(\n presentation_acc == 1)[0][0]\n else:\n first_learned[example_id] = np.nan\n\n return presentations_needed_to_learn, unlearned_per_presentation, margins_per_presentation, first_learned\n\n# Sorts examples by number of forgetting counts during training, in ascending order\n# If an example was never learned, it is assigned the maximum number of forgetting counts\n# If multiple training runs used, sort examples by the sum of their forgetting counts over all runs\n#\n# unlearned_per_presentation_all: list of dictionaries, one per training run\n# first_learned_all: list of dictionaries, one per training run\n# npresentations: number of training epochs\n#\n# Returns 2 numpy arrays containing the sorted example ids and corresponding forgetting counts\n#\ndef sort_examples_by_forgetting(unlearned_per_presentation_all,\n first_learned_all, npresentations):\n # Initialize lists\n example_original_order = []\n example_stats = []\n\n for example_id in unlearned_per_presentation_all.keys():\n\n # Add current example to lists\n example_original_order.append(example_id)\n example_stats.append(0)\n\n # Get all presentations when current example was forgotten during current training run\n stats = unlearned_per_presentation_all[example_id]\n\n # If example was never learned during current training run, add max forgetting counts\n if np.isnan(first_learned_all[example_id]):\n example_stats[-1] += npresentations\n else:\n example_stats[-1] += len(stats)\n\n print('Number of unforgettable examples: {}'.format(\n len(np.where(np.array(example_stats) == 0)[0])))\n return np.array(example_original_order)[np.argsort(\n example_stats)], np.sort(example_stats)\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Options\")\n parser.add_argument('--exp', default=f'..{os.sep}results', required=True)\n parser.add_argument('--epochs', type=int, default=200)\n args = parser.parse_args()\n\n\n exp_path = os.path.join(f'..{os.sep}results', args.exp)\n for run in os.listdir(exp_path):\n run_path = os.path.join(exp_path, run, 'forgetting_stats')\n\n with open(os.path.join(run_path, 'stats.pkl'), 'rb') as fin:\n loaded = pkl.load(fin)\n\n # embed()\n\n # Compute the forgetting statistics per example for training run\n _, unlearned_per_presentation, _, first_learned = compute_forgetting_statistics(\n loaded, args.epochs)\n\n # embed()\n\n # Sort examples by forgetting counts in ascending order, over one or more training runs\n # orderred_examples are the indeces of the dataset ordered\n # ordered_values are the number of forgetting events\n ordered_examples, ordered_values = sort_examples_by_forgetting(\n unlearned_per_presentation, first_learned, args.epochs)\n\n\n # embed()\n","repo_name":"matttrd/information_sampler","sub_path":"code/analyze_forgetting_stats.py","file_name":"analyze_forgetting_stats.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29381860739","text":"from matplotlib import rc\nimport matplotlib.pyplot as plt\nfrom numpy import * \n\nx1 = arange(0, 2.5, .1);\ny1 = -x1+2.5;\nx2 = arange(2.5, 5.1, .1);\ny2 = x2+2.5;\n\nrc('text', usetex=True)\nrc('font', family='serif')\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(x1, y1, color='b')\nax.plot(x2, y2, color='g')\nax.set_xlabel('$x$');\nax.set_ylabel('$p(x)$');\nplt.savefig('score_prior.pdf')","repo_name":"ssanner/xadd-inference","sub_path":"src/prefs/ploting/plot_experiments.py","file_name":"plot_experiments.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"21348025012","text":"from pygments.style import Style\nfrom pygments.token import Keyword, Name, Comment, String, Error, Text, \\\n Number, Operator, Generic, Whitespace, Punctuation, Other, Literal\n\nclass SourcererStyle(Style):\n \"\"\"\n This style mimics the sourcerer color scheme.\n \"\"\"\n\n background_color = \"#222222\"\n highlight_color = \"#49483e\"\n\n styles = {\n # No corresponding class for the following:\n Text: \"#AFAFAF\", # class: ''\n Whitespace: \"\", # class: 'w'\n Error: \"#960050 bg:#1e0010\", # class: 'err'\n Other: \"\", # class 'x'\n\n Comment: \"#5f5f5f\", # class: 'c'\n Comment.Multiline: \"#5f5f5f\", # class: 'cm'\n Comment.Preproc: \"\", # class: 'cp'\n Comment.Single: \"\", # class: 'c1'\n Comment.Special: \"\", # class: 'cs'\n\n Keyword: \"#90B0D1\", # class: 'k'\n Keyword.Constant: \"\", # class: 'kc'\n Keyword.Declaration: \"\", # class: 'kd'\n Keyword.Namespace: \"#6688AA\", # class: 'kn'\n Keyword.Pseudo: \"\", # class: 'kp'\n Keyword.Reserved: \"\", # class: 'kr'\n Keyword.Type: \"\", # class: 'kt'\n\n Operator: \"#D0D0D0\", # class: 'o'\n Operator.Word: \"\", # class: 'ow' - like keywords\n\n Punctuation: \"#D0D0D0\", # class: 'p'\n\n Name: \"#AfAfAf\", # class: 'n'\n Name.Attribute: \"#87875f\", # class: 'na' - to be revised\n Name.Builtin: \"\", # class: 'nb'\n Name.Builtin.Pseudo: \"\", # class: 'bp'\n Name.Class: \"#87875f\", # class: 'nc' - to be revised\n Name.Constant: \"#90B0D1\", # class: 'no' - to be revised\n Name.Decorator: \"#87875f\", # class: 'nd' - to be revised\n Name.Entity: \"\", # class: 'ni'\n Name.Exception: \"#87875f\", # class: 'ne'\n Name.Function: \"#87875f\", # class: 'nf'\n Name.Property: \"\", # class: 'py'\n Name.Label: \"\", # class: 'nl'\n Name.Namespace: \"\", # class: 'nn' - to be revised\n Name.Other: \"#87875f\", # class: 'nx'\n Name.Tag: \"#D0D0D0\", # class: 'nt' - like a keyword\n Name.Variable: \"\", # class: 'nv' - to be revised\n Name.Variable.Class: \"\", # class: 'vc' - to be revised\n Name.Variable.Global: \"\", # class: 'vg' - to be revised\n Name.Variable.Instance: \"\", # class: 'vi' - to be revised\n\n Number: \"#8181A6\", # class: 'm'\n Number.Float: \"\", # class: 'mf'\n Number.Hex: \"\", # class: 'mh'\n Number.Integer: \"\", # class: 'mi'\n Number.Integer.Long: \"\", # class: 'il'\n Number.Oct: \"\", # class: 'mo'\n\n Literal: \"#8181A6\", # class: 'l'\n Literal.Date: \"#87875f\", # class: 'ld'\n\n String: \"#87875f\", # class: 's'\n String.Backtick: \"\", # class: 'sb'\n String.Char: \"\", # class: 'sc'\n String.Doc: \"\", # class: 'sd' - like a comment\n String.Double: \"\", # class: 's2'\n String.Escape: \"#8181A6\", # class: 'se'\n String.Heredoc: \"\", # class: 'sh'\n String.Interpol: \"\", # class: 'si'\n String.Other: \"\", # class: 'sx'\n String.Regex: \"\", # class: 'sr'\n String.Single: \"\", # class: 's1'\n String.Symbol: \"\", # class: 'ss'\n\n Generic: \"\", # class: 'g'\n Generic.Deleted: \"#D0D0D0\", # class: 'gd',\n Generic.Emph: \"\", # class: 'ge'\n Generic.Error: \"\", # class: 'gr'\n Generic.Heading: \"\", # class: 'gh'\n Generic.Inserted: \"#87875f\", # class: 'gi'\n Generic.Output: \"\", # class: 'go'\n Generic.Prompt: \"\", # class: 'gp'\n Generic.Strong: \"\", # class: 'gs'\n Generic.Subheading: \"#FF9800\", # class: 'gu'\n Generic.Traceback: \"\", # class: 'gt'\n }\n","repo_name":"xero/sourcerer","sub_path":"sourcerer.py","file_name":"sourcerer.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"81"} +{"seq_id":"13250762040","text":"from dataclasses import dataclass\nfrom typing import List\nfrom teamiclink.slack.model import Goal\nfrom teamiclink.database import Database\nfrom uuid import UUID\n\n\n@dataclass\nclass GoalStore:\n data_source_name: str\n\n def create_goal(self, content: str, slack_team_id: str) -> Goal:\n query = \"\"\"\n INSERT INTO teamiclink.goal\n (slack_team_id, content)\n VALUES\n (%(slack_team_id)s, %(content)s)\n RETURNING slack_team_id, content, id;\n \"\"\"\n query_params = dict(\n slack_team_id=slack_team_id,\n content=content,\n )\n with Database.connect(data_source_name=self.data_source_name) as connection:\n with Database.create_cursor(connection=connection) as cursor:\n cursor.execute(query, query_params)\n response = cursor.fetchone()\n\n return Goal(**response)\n\n def read_goals(self, slack_team_id: str) -> List[Goal]:\n query = \"\"\"\n SELECT slack_team_id, content, id\n FROM teamiclink.goal\n WHERE slack_team_id = %(slack_team_id)s\n ORDER BY content ASC;\n \"\"\"\n query_params = dict(slack_team_id=slack_team_id)\n with Database.connect(data_source_name=self.data_source_name) as connection:\n with Database.create_cursor(connection=connection) as cursor:\n cursor.execute(query, query_params)\n response = cursor.fetchall()\n\n return [Goal(**goal) for goal in response]\n\n def delete_goal(self, id: UUID) -> int:\n query = \"\"\"\n DELETE FROM teamiclink.goal\n WHERE id=%(id)s;\n \"\"\"\n query_params = dict(id=id)\n with Database.connect(data_source_name=self.data_source_name) as connection:\n with Database.create_cursor(connection=connection) as cursor:\n cursor.execute(query, query_params)\n\n return cursor.rowcount\n","repo_name":"e1004/teamiclink","sub_path":"teamiclink/slack/store_goal.py","file_name":"store_goal.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72762335624","text":"import torch\nfrom torch import nn\n\nfrom ... import epsilon\n\n\nclass SpatialConvOrderK(nn.Module):\n \"\"\"\n Spatial convolution of order K with possibly different diffusion matrices (useful for directed graphs)\n\n Efficient implementation inspired from graph-wavenet codebase\n \"\"\"\n\n def __init__(self, c_in, c_out, support_len=3, order=2, include_self=True):\n super(SpatialConvOrderK, self).__init__()\n self.include_self = include_self\n c_in = (order * support_len + (1 if include_self else 0)) * c_in\n self.mlp = nn.Conv2d(c_in, c_out, kernel_size=1)\n self.order = order\n\n @staticmethod\n def compute_support(adj, device=None):\n if device is not None:\n adj = adj.to(device)\n adj_bwd = adj.T\n adj_fwd = adj / (adj.sum(1, keepdims=True) + epsilon)\n adj_bwd = adj_bwd / (adj_bwd.sum(1, keepdims=True) + epsilon)\n support = [adj_fwd, adj_bwd]\n return support\n\n @staticmethod\n def compute_support_orderK(adj, k, include_self=False, device=None):\n if isinstance(adj, (list, tuple)):\n support = adj\n else:\n support = SpatialConvOrderK.compute_support(adj, device)\n supp_k = []\n for a in support:\n ak = a\n for i in range(k - 1):\n ak = torch.matmul(ak, a.T)\n if not include_self:\n ak.fill_diagonal_(0.)\n supp_k.append(ak)\n return support + supp_k\n\n def forward(self, x, support):\n # [batch, features, nodes, steps]\n if x.dim() < 4:\n squeeze = True\n x = torch.unsqueeze(x, -1)\n else:\n squeeze = False\n out = [x] if self.include_self else []\n if (type(support) is not list):\n support = [support]\n for a in support:\n x1 = torch.einsum('ncvl,wv->ncwl', (x, a)).contiguous()\n out.append(x1)\n for k in range(2, self.order + 1):\n x2 = torch.einsum('ncvl,wv->ncwl', (x1, a)).contiguous()\n out.append(x2)\n x1 = x2\n\n out = torch.cat(out, dim=1)\n out = self.mlp(out)\n if squeeze:\n out = out.squeeze(-1)\n return out\n","repo_name":"Graph-Machine-Learning-Group/grin","sub_path":"lib/nn/layers/spatial_conv.py","file_name":"spatial_conv.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"81"} +{"seq_id":"30081177434","text":"class ThailandPackage:\n def __init__(self,price,day):\n self.price = price\n self.day = day\n def detail(self):\n print(\"태국 패키지 3박5일\")\n \n def detail1(self):\n print(\"한국 패키지 1박2일\")\n \nif __name__ == \"__main__\":\n print(\"Thailand 모듈을 직접실행\")\n trip_to = ThailandPackage(3000,5)\n trip_to.detail()\n # print(\"{0}원 {1}일 여행\".format(trip_to.price,trip_to.day))\n print(\"%s 원 %s일 여행\" % (trip_to.price , trip_to.day))\nelse:\n print(\"Thailand 외부에서 실행\")\n \ntrip_to =ThailandPackage(5000, 3)\ntrip_to.detail()\ntrip_to.detail1()","repo_name":"hgh1025/albo-main","sub_path":"ExcelCalculate/main/templates/thailand.py","file_name":"thailand.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10312832637","text":"import requests\r\nimport time\r\nimport sqlite3\r\nimport json\r\n\r\ndef create_annotations(texts):\r\n url = \"https://api.aicloud.sbercloud.ru/public/v2/summarizator/predict\"\r\n headers = {\r\n \"accept\": \"application/json\",\r\n \"Content-Type\": \"application/json\"\r\n }\r\n for text in texts:\r\n try:\r\n text=text[0]\r\n request = {\r\n \"instances\": [\r\n {\r\n \"text\": text,\r\n \"num_beams\": 3,\r\n \"num_return_sequences\": 6,\r\n \"length_penalty\": 1.5,\r\n \"repetition_penalty\": 1.5,\r\n \"genstrategy\": \"beamsearch\"\r\n }\r\n ]\r\n }\r\n res = requests.post(url=url,headers=headers,json=request)\r\n print(res.status_code)\r\n annotation = json.loads(res.text)\r\n if(res.status_code==200 and annotation[\"comment\"]==\"Ok!\"):\r\n annotation = annotation[\"prediction_best\"][\"bertscore\"]\r\n with sqlite3.connect(\"news.db\") as con:\r\n cursor = con.cursor()\r\n flag=1\r\n cursor.execute(\"UPDATE news SET annotation = ?, flag_annotation = ? WHERE content = ?\", (annotation, flag, text))\r\n con.commit()\r\n else:\r\n with sqlite3.connect(\"news.db\") as con:\r\n cursor = con.cursor()\r\n flag=1\r\n cursor.execute(\"UPDATE news SET annotation = ?, flag_annotation = ? WHERE content = ?\", (\"-\", flag, text))\r\n con.commit()\r\n\r\n time.sleep(1)\r\n except Exception as e:\r\n print(f\"Ошибка при получении доступа к api: {e}\")\r\n with sqlite3.connect(\"news.db\") as con:\r\n cursor = con.cursor()\r\n flag = 1\r\n cursor.execute(\"UPDATE news SET annotation = ?, flag_annotation = ? WHERE content = ?\",\r\n (\"-\", flag, text))\r\n con.commit()\r\n","repo_name":"nikitosik258/Tg_bot_news","sub_path":"news_telebot/annotation.py","file_name":"annotation.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74422075785","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 7 15:52:29 2020\n\n@author: dustan\n\"\"\"\n\nimport pandas as pd\n# df = pd.read_csv(\"https://github.com/neurospin/pystatsml/tree/master/datasets/iris.csv\")\ndf = pd.read_csv(\"https://raw.githubusercontent.com/neurospin/pystatsml/master/datasets/iris.csv\")\nprint(df.columns)\nnumerical = df.select_dtypes(include=\"number\")\nstats = df.groupby('species').mean()","repo_name":"dustanlevenstein/StatisticalMachineLearning","sub_path":"section_03_02_13_exercise.py","file_name":"section_03_02_13_exercise.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33228426326","text":"import requests\nimport time\nfrom influx_db_auth import InfluxDBAuth\nfrom influx_db_common import decode_csv_data, get_time_to_wait\nfrom safe_logger import SafeLogger\nfrom influx_db_constants import InfluxDBConstants\n\n\nTRY_AGAIN_LATER = 409\n\n\nlogger = SafeLogger(\"influx-db plugin\", forbidden_keys=[\"token\", \"password\"])\n\n\nclass InfluxDBSessionError(ValueError):\n pass\n\n\nclass InfluxDBSession(object):\n def __init__(self, server_url, org=None, bucket_id=None, username=None,\n password=None, token=None, generate_verbose_logs=False,\n is_ssh_check_disabled=False):\n assert_valid_url(server_url)\n self.server_url = server_url.strip(\"/\")\n self.session = requests.Session()\n if is_ssh_check_disabled:\n self.session.verify = True\n if token:\n self.session.auth = InfluxDBAuth(server_url, username, password, token)\n else:\n self.session.auth = (username, password)\n self.buffer = []\n self.buffer_size = 0\n self.bucket_id = bucket_id\n self.org = org\n\n def get_bucket_list(self):\n url = \"{}/api/v2/buckets\".format(self.server_url)\n response = self.get(url)\n json_response = response.json()\n buckets = json_response.get(\"buckets\", [])\n return buckets\n\n def get_organization_list(self):\n url = \"{}/api/v2/orgs\".format(self.server_url)\n response = self.get(url)\n json_response = response.json()\n orgs = json_response.get(\"orgs\", [])\n return orgs\n\n def get(self, url, headers=None, params=None):\n headers = headers or {}\n headers.update({\n \"Accept-Encoding\": \"gzip\",\n \"Content-Type\": \"application/json\"\n })\n retry_number = 0\n while retry_number < InfluxDBConstants.MAX_RETRY:\n response = self.session.get(url, headers=headers, params=params)\n if response.status_code == TRY_AGAIN_LATER:\n time_to_wait = get_time_to_wait(response)\n retry_number = retry_number + 1\n logger.warning(\"Error 409 on attempt {}, waiting {} seconds\".format(retry_number, time_to_wait))\n time.sleep(time_to_wait)\n else:\n break\n assert_response_ok(response)\n return response\n\n def batch_write(self, measurement, timestamp, tags, fields):\n self.buffer.append(to_influx_line_format(measurement, timestamp, tags, fields))\n self.buffer_size += 1\n if self.buffer_size > InfluxDBConstants.DEFAULT_BATCH_SIZE:\n logger.info(\"Max batch size reached, flushing\")\n self.flush()\n\n def write(self, buffer):\n url = \"{}/api/v2/write\".format(self.server_url)\n params = {\n \"bucket\": self.bucket_id,\n \"org\": self.org\n }\n headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"text/plain; charset=utf-8\"\n }\n data = '\\n'.join(buffer)\n response = self.post(url, data=data, params=params, headers=headers)\n return response\n\n def post(self, url, headers=None, data=None, params=None):\n headers = headers or {}\n retry_number = 0\n while retry_number < InfluxDBConstants.MAX_RETRY:\n response = self.session.post(url, data=data, headers=headers, params=params)\n if response.status_code == TRY_AGAIN_LATER:\n time_to_wait = 30\n time.sleep(time_to_wait)\n logger.warning(\"Error 409, waiting {} seconds\".format(time_to_wait))\n retry_number = retry_number + 1\n else:\n break\n assert_response_ok(response)\n return response\n\n def flush(self):\n self.write(self.buffer)\n self.buffer = []\n self.buffer_size = 0\n return 0\n\n def close(self):\n row_processed = self.flush()\n return row_processed\n\n def query(self, sql_query):\n results = []\n url = \"{}/api/v2/query\".format(self.server_url)\n params = {\"org\": self.org}\n headers = {\n \"Content-Type\": \"application/vnd.flux\",\n \"Accept\": \"application/csv\"\n }\n response = self.post(url, params=params, headers=headers, data=sql_query)\n results = decode_csv_data(response.content)\n for result in results:\n yield result\n\n\ndef assert_response_ok(response, context=None, can_raise=True, generate_verbose_logs=False):\n error_message = \"\"\n error_context = \" while {} \".format(context) if context else \"\"\n if not isinstance(response, requests.models.Response):\n error_message = \"Did not return a valide response\"\n else:\n status_code = response.status_code\n if status_code >= 400:\n error_message = \"Error {}{}\".format(status_code, error_context)\n json_content = \"\"\n message = \"\"\n json_content = safe_json_extract(response, default={})\n message = json_content.get(\"message\")\n content = response.content\n if message:\n error_message += \". \" + message\n elif json_content:\n error_message += \". \" + json_content\n logger.error(error_message)\n logger.error(content)\n if error_message and can_raise:\n if generate_verbose_logs:\n logger.error(\"last requests url={}, body={}\".format(response.request.url, response.request.body))\n pass\n raise Exception(error_message)\n return error_message\n\n\ndef safe_json_extract(response, default=None):\n json = default\n try:\n json = response.json()\n except Exception as error_message:\n logging.error(\"Error '{}' while decoding json\".format(error_message))\n pass\n return json\n\n\ndef to_influx_line_format(measurement, timestamp, tags, fields):\n # batch_write:timestamp=0, tags=[{'location': 'Kalmath'}], fields=[{'census': 23}]\n tag_tokens = []\n for tag in tags:\n for key in tag:\n tag_tokens.append(\"{}={}\".format(key, tag.get(key)))\n tags_string = \",\".join(tag_tokens)\n if tag_tokens:\n tags_string = \",\"+tags_string\n fields_tokens = []\n for field in fields:\n for key in field:\n fields_tokens.append(\"{}={}\".format(key, field.get(key)))\n fields_string = \",\".join(fields_tokens)\n line = \"{}{} {} {}\".format(measurement, tags_string, fields_string, timestamp)\n # airSensors,sensor_id=TLM0201 temperature=73.97038159354763,humidity=35.23103248356096,co=0.48445310567793615 1630424257000000000\n # [,=[,=]] =[,=] []\n return line\n\n\ndef assert_valid_url(url):\n if not url:\n raise InfluxDBSessionError(\"Server URL not valid\")\n","repo_name":"alexbourret/dss-plugin-influx-db","sub_path":"python-lib/influx_db_session.py","file_name":"influx_db_session.py","file_ext":"py","file_size_in_byte":6877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"6435160144","text":"import tkinter as tk\n\n\nclass SliderInterval:\n def __init__(self, slider:tk.Canvas, start:float, stop:float):\n # Variables\n self.slider = slider\n self.start = start\n self.stop = stop\n self.state_selected = False\n # Colors\n self.color_normal = \"#25d800\"\n self.color_selected = \"#45ff45\"\n # Draw elements\n pad_x = self.slider.pad_x\n pad_y = self.slider.pad_y + self.slider.cursor_radius\n size = self.slider.size\n self.line_iid = self.slider.create_line(\n pad_x + size * self.start, pad_y + 1,\n pad_x + size * self.stop, pad_y + 1,\n fill=self.color_normal,\n width=4\n )\n self.slider.itemconfigure(self.line_iid, tag=f\"interval{self.line_iid}\")\n\n self.slider.tag_bind(f\"interval{self.line_iid}\", \"\", self._on_click_interval)\n\n # Magics\n\n def __repr__(self) -> str:\n return f\"SliderInterval({round(self.start, 2)}, {round(self.stop, 2)})\"\n\n def __str__(self) -> str:\n return f\"SliderInterval({round(self.start, 2)}, {round(self.stop, 2)})\"\n\n def __eq__(self, other) -> bool:\n \"\"\"\n Returns if the interval is equal to another interval.\n\n Args:\n other (int or SliderInterval): The iid of the other interval or the interval.\n \"\"\"\n if isinstance(other, SliderInterval):\n return self.line_iid == other.line_iid\n else:\n return self.line_iid == other\n\n # Events\n\n def _on_click_interval(self, event):\n \"\"\"\n Updates the color when the interval is selected and generates an event of the state change.\n Generates <> when the interval is clicked a first time.\n Genrates <> when the interval is clicked another time.\n \"\"\"\n if not self.state_selected:\n self.state_selected = True\n self.slider.event_generate(\"<>\", x=self.line_iid)\n else:\n self.state_selected = False\n self.slider.event_generate(\"<>\", x=self.line_iid)\n\n # Methods\n\n def is_selected(self) -> bool:\n \"\"\"\n Returns the selection state of the interval.\n \"\"\"\n return self.state_selected\n\n def update_line(self):\n \"\"\"\n Updates the interval in the slider canvas.\n \"\"\"\n # Get new dimensions\n pad_x = self.slider.pad_x\n pad_y = self.slider.pad_y + self.slider.cursor_radius\n size = self.slider.size\n # Update element\n self.slider.coords(\n self.line_iid,\n pad_x + size * self.start, pad_y + 1,\n pad_x + size * self.stop, pad_y + 1,\n )\n\n def update_color(self):\n \"\"\"\n Updates the color of the interval based on its state.\n \"\"\"\n if self.state_selected:\n self.slider.itemconfigure(self.line_iid, fill=self.color_selected)\n else:\n self.slider.itemconfigure(self.line_iid, fill=self.color_normal)\n","repo_name":"deplanty/video-cutter-tags","sub_path":"src/objects/slider_interval.py","file_name":"slider_interval.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"28616345919","text":"from flink.connection import Connection, Iterator, Collector\nfrom flink.functions import RuntimeContext, Function\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass FileInputSplit(object):\n def __init__(self, path, start, end, hosts, additional):\n self.path = path\n self.start = start\n self.end = end\n self.hosts = hosts\n self.additional = additional\n\n\nclass PythonInputFormat(Function.Function):\n def __init__(self):\n super(PythonInputFormat, self).__init__()\n self.close_called = False\n\n def _run(self):\n collector = self._collector\n function = self.deliver\n split = self._iterator.next()\n if split[0] == \"close\":\n self.close_called = True\n while split is not None and not self.close_called:\n try:\n function(split, collector)\n self._iterator._reset()\n self._connection.send_end_signal()\n split = self._iterator.next()\n except Exception:\n logger.exception(\"Exception in udf call.\")\n raise\n if split[0] == \"close\":\n self.close_called = True\n\n collector._close()\n\n def deliver(self, path, collector):\n pass\n\n def computeSplits(self, env, con):\n iterator = Iterator.PlanIterator(con, env)\n collector = Collector.SplitCollector(con, env)\n\n min_num_splits = iterator.next()\n path = iterator.next()\n\n self.createInputSplits(min_num_splits, path, collector)\n\n collector._close()\n \n def createInputSplits(self, minNumSplits, path, collector):\n pass\n","repo_name":"mathiaspet/pyflink","sub_path":"flink-libraries/flink-python/src/main/python/org/apache/flink/python/api/flink/io/PythonInputFormat.py","file_name":"PythonInputFormat.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"81"} +{"seq_id":"10380970334","text":"two_sum = lambda x, y: x + y\nsum = lambda x, y=100: x + y\n# sum_with_100 = sum(100)\n# result = sum_with_100(200)\n# print(result)\n\nlower = lambda x, y: x if x < y else y\nprint(lower(7, 10))\n\nd = [{\"order\": 3}, {\"order\": 1}, {\"order\": 2}]\nd.sort(key=lambda x: x['order'])\nprint(d)\n","repo_name":"strawsyz/straw","sub_path":"study/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"11348297023","text":"#Import libraries\nimport clean_data as clean #script to clean data\nimport ml #script to apply machine learning algorithms\n\n\ndef main():\n\t#build dataframe for analysis\n\ttrain_data = '/home/andy/Projects/Kaggle/Titanic/Data/train.csv'\n\ttest_data = '/home/andy/Projects/Kaggle/Titanic/Data/test.csv'\n\tdf_train, ids_train = clean.build_df(train_data)\n\tdf_test, ids_test = clean.build_df(test_data)\n\n\n\t#build training sets\n\ttrain_X = df_train[0::,1::] #predictor features span all rows and 1st column on\n\ttrain_y = df_train[0::,0] #response variables span all rows and 0th column only\n\n\t#train and predict\n\tml.learn(train_X,train_y,df_test,ids_test)\n\t\n\n\t\nif __name__ == '__main__':\n main()","repo_name":"andylee024/Titanic-Kaggle-","sub_path":"titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72127769224","text":"import sys\nimport math\n\ndef func(N):\n if inpNum == 1:\n return 0\n sum = 1\n upperBound = int(math.sqrt(N)) + 1\n for index in range(2,upperBound):\n if N % index == 0:\n if (index == N//index):\n sum += index\n else:\n sum += (N//index) + index\n return sum\n\n\nnumOfInp = int(sys.stdin.readline())\nfor i in range(int(numOfInp)):\n inpNum = int(sys.stdin.readline())\n print(func(inpNum)) \n \n","repo_name":"muditjain1987/master","sub_path":"Programming/SumDivisor/sumDiv2.py","file_name":"sumDiv2.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"32536942995","text":"from bs4 import BeautifulSoup\nfrom collections import OrderedDict\nfrom datetime import timedelta, datetime\nfrom sc_exceptions import *\nfrom sc_helpers import render_page\nimport multiprocessing\nimport os\nimport re\nimport requests\nimport requests_cache\nimport youtube_dl\nimport redis\n\n# process synchronization for video download\nlck = multiprocessing.Lock()\ncv = multiprocessing.Condition(lck)\nWORKING = 'sc_working_videos'\nr = redis.Redis(host='localhost', port=6379, db=0)\nr.delete(WORKING)\n\nreq_expire_after = timedelta(seconds=600)\ncached_req_session = requests_cache.CachedSession('sc_cache', backend='sqlite', expire_after=req_expire_after)\n\n\ndef get_parsed_html(path):\n # request DC app webpage\n try:\n try:\n response = cached_req_session.get(path, timeout=30)\n except requests.exceptions.MissingSchema:\n path = 'https://' + path\n response = cached_req_session.get(path, timeout=30)\n except requests.exceptions.Timeout:\n raise DCAppError('Request timed out')\n\n if response.status_code != 200:\n raise DCAppError(f'Error code {response.status_code}')\n\n source = response.text\n return BeautifulSoup(source, features='html.parser')\n\n\ndef find_video(parsed_html):\n # try to find video\n return parsed_html.body.find('video').find('source').attrs['src']\n\n\ndef extract_id(url):\n # extract id\n x = re.search(r\"dreamcatcher\\.candlemystar\\.com\\/post\\/(\\d+)\", url)\n if x:\n return x.group(1)\n return None\n\n\ndef dc_app(path):\n \"\"\"Get HQ pictures from DC app\"\"\"\n parsed_html = get_parsed_html(path)\n\n app_images = None\n app_video = None\n app_video_poster = None\n dcapp_id = extract_id(path)\n\n # match image urls\n regex = r\"(?Phttp:\\/\\/|https:\\/\\/)file\\.candlemystar\\.com\\/(?Pcache\\/)?.*?(?P_\\d+x\\d+)?\\.\\w+\"\n\n try:\n # try to find video\n app_video = find_video(parsed_html)\n app_video_poster = parsed_html.body.find('video').attrs['poster']\n except:\n # find all images from app post\n images_html = ''.join([str(h) for h in parsed_html.body.find_all('div', attrs={'class': 'img-box'})])\n x = re.finditer(regex, images_html)\n\n # create urls for full-size images\n files = []\n for match in x:\n url = match.group(0)\n if match.groupdict()[\"cache\"] is not None:\n url = url.replace(match.groupdict()[\"cache\"], '')\n url = url.replace('thumb-', '')\n if match.groupdict()[\"imgdim\"] is not None:\n url = url.replace(match.groupdict()[\"imgdim\"], '')\n files.append(url)\n\n # remove duplicates\n app_images = list(OrderedDict.fromkeys(files))\n\n # find post username and text\n app_poster = parsed_html.body.find('div', attrs={'class': 'card-name'}).text.strip()\n app_text = parsed_html.body.find('div', attrs={'class': 'card-text'}).text.strip()\n\n # find profile picture\n profile_pic = parsed_html.body.find('div', attrs={'class': 'profile-img'}).find('img').attrs['src']\n try:\n match = re.match(regex, profile_pic)\n url = match.group(0)\n if match.groupdict()[\"cache\"] is not None:\n url = url.replace(match.groupdict()[\"cache\"], '')\n url = url.replace('thumb-', '')\n if match.groupdict()[\"imgdim\"] is not None:\n url = url.replace(match.groupdict()[\"imgdim\"], '')\n profile_pic = url\n except Exception as e:\n print(f\"Error getting full size profile picture {e}\")\n\n kwargs = {}\n kwargs['dcapp_id'] = dcapp_id\n kwargs['app_video'] = app_video\n kwargs['app_video_poster'] = app_video_poster\n kwargs['app_images'] = app_images\n kwargs['app_poster'] = app_poster\n kwargs['app_text'] = app_text\n kwargs['profile_pic'] = profile_pic\n kwargs['url'] = path\n kwargs['page_title'] = f'DC App #{dcapp_id}'\n\n return render_page('dc_app.html', **kwargs)\n\ndef dc_app_image(path):\n \"\"\"Get HQ version of DC app picture\"\"\"\n # verify link\n x = re.match(r\"((http://|https://)?file\\.candlemystar\\.com/cache/.*(_\\d+x\\d+)\\.\\w+$)\", path)\n if x is None:\n raise FullSizeDCAppImage\n else:\n # get full size image\n image_link = path.replace('cache/', '')\n image_link = image_link.replace('thumb-', '')\n image_link = image_link.replace(x.groups()[2], '')\n\n # request image link\n if False:\n try:\n response = cached_req_session.get(image_link, timeout=30)\n except requests.exceptions.MissingSchema:\n image_link = 'https://' + image_link\n response = cached_req_session.get(image_link, timeout=30)\n\n if response.status_code == 200:\n app_direct_image = True\n else:\n error_msg = 'Error: Image could not be found'\n raise InvalidDCAppLink\n\n\n app_images = f'\\n'\n\n kwargs = {}\n kwargs['image_link'] = image_link\n kwargs['url'] = path\n\n return render_page('dc_app_image.html', **kwargs)\n\n\ndef get_video_link(url):\n # extract id\n dcapp_id = extract_id(url)\n if dcapp_id is None:\n raise VideoDownloadError\n\n # find m3u8 url\n try:\n video_url = find_video(get_parsed_html(url))\n except Exception as e:\n print(e)\n raise VideoDownloadError\n\n filename = f\"{dcapp_id}.mp4\"\n path = f'dcapp_videos/{filename}'\n temppath = f'{path}.temp'\n\n with cv:\n while r.sismember(WORKING, dcapp_id):\n if not cv.wait(timeout=30):\n raise VideoDownloadError\n r.sadd(WORKING, dcapp_id)\n\n if not os.path.exists(path):\n opts = {\n 'outtmpl': temppath,\n 'noplaylist' : True,\n }\n try:\n with youtube_dl.YoutubeDL(opts) as ydl:\n result = ydl.download([video_url])\n except youtube_dl.utils.DownloadError as e:\n raise VideoDownloadError \n\n os.rename(temppath, path)\n\n with cv:\n r.srem(WORKING, dcapp_id)\n cv.notify_all()\n\n return filename\n","repo_name":"evanc577/sourcecatcher","sub_path":"dcapp.py","file_name":"dcapp.py","file_ext":"py","file_size_in_byte":6159,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"81"} +{"seq_id":"73557224266","text":"'''\nScript referente a PARTE II do enunciado do Projecto 1 - UFCD10793\nFaça um programa para traduzir as coordenadas \"simbólicas\" do Excel para coordenadas lineares.\nPor exemplo, em Excel, internamente, a célula A1 corresponde à célula na linha 0 e coluna 0.\n\nProjecto realizado por:\nAdriana de Souza Gama\nCarlo Braga\n'''\nwhile True:\n\t#Primeiro pedimos ao utilizador para nos indicar as coordenadas pretendidas\n\traw = input(\"Indique as coordenadas: \")\n\t\n\tif raw.lower() == \"sair\": #Verifica se o utilizador quer continuar a usar o programa\n\t\tbreak\n\t\n\tdados = raw.split()\n\tcolunaTemp = dados[0] #Primeiro a coluna é carregada (como letras)\n\tcoluna = 0\n\tlinha = int(dados[1]) - 1 #Os números das linhas começam no 0\n\t\n\tdef converter(char): #Para facilitar o código, podemos definir uma função para converter letras em números\n\t\tletras = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\"]\n\t\tfor i in range(len(letras)):\n\t\t\tif char.lower() == letras[i]:\n\t\t\t\treturn i + 1 #O indice da letra vai ser igual à sua posição no alfabeto (a = 1, b = 2, c = 3, ...)\n\t\n\tfor i in range(len(colunaTemp)):\n\t\tcoluna += converter(colunaTemp[-i - 1]) * 26 ** i #O número é convertido da base 26 para a base 10 segundo esta fórmula\n \n\tcoluna -= 1 #Os números das colunas começam no 0\n\tprint(\"Linha:\", linha, \"Coluna:\", coluna,\"\\n-------\") #O resultado é apresentado\nprint(\"fim do programa\")","repo_name":"Adriana/Projecto1-Python","sub_path":"excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"22850640183","text":"import requests\nimport pprint\nfrom bs4 import BeautifulSoup\n\npp = pprint.PrettyPrinter(indent=4)\n\nclass Article:\n\n\tdef __init__(self, url=None) -> None:\n\t\tself.url = url\n\t\tself.raw = None\n\t\tself.data = {}\n\t\n\tdef getInfo(self):\n\t\treturn self.data\n\n\tdef scrape(self):\n\t\tif not self.url:\n\t\t\traise Exception(\"Article url seems to be empty. Try passing it via the constructor!\")\n\t\tresponse = requests.get(self.url)\n\t\tif response.status_code != 200:\n\t\t\traise Exception(\"Something wen't wrong! Couldn't download article\")\n\t\tself.raw = response.content\n\n\tdef parse(self):\n\t\tsoup = BeautifulSoup(self.raw, 'html.parser')\n\t\tself.data[\"Heading\"] = soup.h1.string\n\t\tself.data[\"Author\"] = soup.find(itemprop=\"name\").get_text()\n\t\tself.data[\"PublishedAt\"] = soup.time.get_text()\n\t\tparagraphs = []\n\t\tfor paragraph in soup.find_all(attrs={'class':\"css-axufdj evys1bk0\"}):\n\t\t\tparagraphs.append(paragraph.get_text())\n\t\tself.data[\"Content\"] = paragraphs\n\nif __name__ == \"__main__\":\n\tnyTimes = Article(url='https://www.nytimes.com/2021/08/07/sports/olympics/covid-closing-ceremony-athletes.html')\n\t# Retreive the Article\n\tnyTimes.scrape()\n\t# Extract Required Information\n\tnyTimes.parse()\n\t# Pretty Print\n\tpp.pprint(nyTimes.getInfo())\n","repo_name":"tnvmadhav/NYtimesScraper","sub_path":"nytimesScraper.py","file_name":"nytimesScraper.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30150060095","text":"import collections\nRectangle = collections.namedtuple(\"Rectangle\", [\"x1\", \"x2\", \"y1\",\"y2\"])\nPoint = collections.namedtuple(\"Point\", (\"x\", \"y\"))\n\ndef find_intersection(R, S):\n x1, x2 = max(R.x1, S.x1), min(R.x2, S.x2)\n y1, y2 = max(R.y1, S.y1), min(R.y2, S.y2)\n return Rectangle(x1, x2, y1, y2) if (x1<=x2 and y1<=y2) else None\n\ndef is_rectangle(A,B,C,D):\n def gradient(P,Q):\n return float('inf') if P.x == Q.x else (P.y - Q.y)/(P.x - Q.x)\n return (gradient(A,B) == gradient(C,D) and gradient(A,D) == gradient(B,C) and\n (gradient(A,B) * gradient(A,D) == -1 ) or (gradient(A,B) == 0 and\n gradient(A,D) == float('inf')) or (gradient(A,D) == 0 and\n gradient(A,B) == float('inf')))\n\n\nprint(find_intersection(Rectangle(1,7,2,5), Rectangle(5,6,0,8)))\nprint(find_intersection(Rectangle(0,2,0,2), Rectangle(2,4,2,4)))\nprint(is_rectangle(Point(0,0), Point(1,1), Point(0,2), Point(-1,2)))\n","repo_name":"finnegan2000/epp","sub_path":"4_Primitives/11_rectangles.py","file_name":"11_rectangles.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40273431413","text":"import mediapipe as mp # Import mediapipe\nimport cv2 # Import opencv\nimport csv\nimport os\nimport numpy as np\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.linear_model import LogisticRegression, RidgeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n\nfrom sklearn.metrics import accuracy_score # Accuracy metrics\nimport pickle\n\nmp_drawing = mp.solutions.drawing_utils # Drawing helpers\nmp_holistic = mp.solutions.holistic # Mediapipe Solutions\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfls = input('enter the video file name :')\n\ndf = pd.read_csv('coords.csv')\n\nX = df.drop('class', axis=1) # features\ny = df['class'] # target value\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1234)\n\n\npipelines = {\n\n 'rc':make_pipeline(StandardScaler(), RidgeClassifier()),\n 'rf':make_pipeline(StandardScaler(), RandomForestClassifier()),\n 'gb':make_pipeline(StandardScaler(), GradientBoostingClassifier()),\n}\n\nfit_models = {}\nfor algo, pipeline in pipelines.items():\n model = pipeline.fit(X_train, y_train)\n fit_models[algo] = model\n\n\nprint(fit_models)\n\nfit_models['rc'].predict(X_test)\n\n\nfor algo, model in fit_models.items():\n yhat = model.predict(X_test)\n print(algo, accuracy_score(y_test, yhat))\n\n\nfit_models['rf'].predict(X_test)\n\n\nwith open('body_language.pkl', 'wb') as f:\n pickle.dump(fit_models['rf'], f)\n\n\nimport pickle\nwith open('body_language.pkl', 'rb') as f:\n model = pickle.load(f)\n\n\nprint(model)\n\ncap = cv2.VideoCapture(fls)\n# Initiate holistic model\nwith mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\n\n while cap.isOpened():\n ret, frame = cap.read()\n \n if ret: \n # Recolor Feed\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image.flags.writeable = False\n else:\n break\n\n # Make Detections\n results = holistic.process(image)\n # print(results.face_landmarks)\n\n # face_landmarks, pose_landmarks, left_hand_landmarks, right_hand_landmarks\n\n # Recolor image back to BGR for rendering\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n\n mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(80,22,10), thickness=2, circle_radius=4),\n mp_drawing.DrawingSpec(color=(80,44,121), thickness=2, circle_radius=2)\n )\n\n\n try:\n # Extract Pose landmarks\n pose = results.right_hand_landmarks.landmark\n pose_row = list(np.array([[landmark.x, landmark.y, landmark.z] for landmark in pose]).flatten())\n\n\n row = pose_row\n\n# # Append class name\n# row.insert(0, class_name)\n\n# # Export to CSV\n# with open('coords.csv', mode='a', newline='') as f:\n# csv_writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n# csv_writer.writerow(row)\n\n # Make Detections\n X = pd.DataFrame([row])\n body_language_class = model.predict(X)[0]\n body_language_prob = model.predict_proba(X)[0]\n print(body_language_class, body_language_prob)\n\n # Grab ear coords\n coords = tuple(np.multiply(\n np.array(\n (results.right_hand_landmarks.landmark[0].x,\n results.right_hand_landmarks.landmark[0].y))\n , [640,480]).astype(int))\n\n cv2.rectangle(image,\n (coords[0], coords[1]+5),\n (coords[0]+len(body_language_class)*20, coords[1]-30),\n (245, 117, 16), -1)\n cv2.putText(image, body_language_class, coords,\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n # Get status box\n cv2.rectangle(image, (0,0), (250, 60), (245, 117, 16), -1)\n\n # Display Class\n cv2.putText(image, 'CLASS'\n , (95,12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)\n cv2.putText(image, body_language_class.split(' ')[0]\n , (90,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n # Display Probability\n cv2.putText(image, 'PROB'\n , (15,12), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)\n cv2.putText(image, str(round(body_language_prob[np.argmax(body_language_prob)],2))\n , (10,40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n except:\n pass\n\n cv2.imshow('Raw Webcam Feed', image)\n\n if cv2.waitKey(10) & 0xFF == ord('q'):\n os.remove('body_language.pkl')\n break\n\ncap.release()\n\n\n","repo_name":"MMj4beer/ASL-alphabet-detection-and-recognition","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74812106506","text":"from __future__ import print_function\nfrom __future__ import unicode_literals\nfrom localization import N_\nfrom outputable import Outputable\nfrom changes import FileDiff\nimport comment\nimport changes\nimport filtering\nimport format\nimport gravatar\nimport interval\nimport missing\nimport multiprocessing\nimport re\nimport subprocess\nimport sys\nimport terminal\nimport textwrap\nimport threading\n\nNUM_THREADS = multiprocessing.cpu_count()\n\nclass BlameEntry:\n\trows = 0\n\tcomments = 0\n\n__thread_lock__ = threading.BoundedSemaphore(NUM_THREADS)\n__blame_lock__ = threading.Lock()\n\nclass BlameThread(threading.Thread):\n\tdef __init__(self, blame_string, extension, blames, filename):\n\t\t__thread_lock__.acquire() # Lock controlling the number of threads running\n\t\tthreading.Thread.__init__(self)\n\n\t\tself.blame_string = blame_string\n\t\tself.extension = extension\n\t\tself.blames = blames\n\t\tself.filename = filename\n\n\tdef run(self):\n\t\tgit_blame_r = subprocess.Popen(self.blame_string, shell=True, bufsize=1, stdout=subprocess.PIPE).stdout\n\t\tis_inside_comment = False\n\n\t\tfor j in git_blame_r.readlines():\n\t\t\tj = j.decode(\"utf-8\", \"replace\")\n\t\t\tif Blame.is_blame_line(j):\n\t\t\t\tauthor_mail = Blame.get_author_mail(j)\n\t\t\t\tcontent = Blame.get_content(j)\n\t\t\t\t__blame_lock__.acquire() # Global lock used to protect calls from here...\n\n\t\t\t\tif self.blames.get((author_mail, self.filename), None) == None:\n\t\t\t\t\tself.blames[(author_mail, self.filename)] = BlameEntry()\n\n\t\t\t\t(comments, is_inside_comment) = comment.handle_comment_block(is_inside_comment, self.extension, content)\n\t\t\t\tself.blames[(author_mail, self.filename)].comments += comments\n\t\t\t\tself.blames[(author_mail, self.filename)].rows += 1\n\t\t\t\t__blame_lock__.release() # ...to here.\n\n\t\tgit_blame_r.close()\n\t\t__thread_lock__.release() # Lock controlling the number of threads running\n\nPROGRESS_TEXT = N_(\"Checking how many rows belong to each author (Progress): {0:.0f}%\")\n\nclass Blame:\n\tdef __init__(self, hard):\n\t\tself.blames = {}\n\t\tls_tree_r = subprocess.Popen(\"git ls-tree --name-only -r \" + interval.get_ref(), shell=True, bufsize=1,\n\t\t stdout=subprocess.PIPE).stdout\n\t\tlines = ls_tree_r.readlines()\n\n\t\tfor i, row in enumerate(lines):\n\t\t\trow = row.strip().decode(\"unicode_escape\", \"ignore\")\n\t\t\trow = row.encode(\"latin-1\", \"replace\")\n\t\t\trow = row.decode(\"utf-8\", \"replace\").strip(\"\\\"\").strip(\"'\").strip()\n\n\t\t\tif FileDiff.is_valid_extension(row) and not filtering.set_filtered(FileDiff.get_filename(row)):\n\t\t\t\tif not missing.add(row):\n\t\t\t\t\tblame_string = \"git blame -w {0} \".format(\"-C -C -M\" if hard else \"\") + \\\n\t\t\t\t\t interval.get_since() + interval.get_ref() + \" -- \\\"\" + row + \"\\\"\"\n\t\t\t\t\tthread = BlameThread(blame_string, FileDiff.get_extension(row), self.blames, row.strip())\n\t\t\t\t\tthread.daemon = True\n\t\t\t\t\tthread.start()\n\n\t\t\t\t\tif hard:\n\t\t\t\t\t\tBlame.output_progress(i, len(lines))\n\n\t\t# Make sure all threads have completed.\n\t\tfor i in range(0, NUM_THREADS):\n\t\t\t__thread_lock__.acquire()\n\n\t@staticmethod\n\tdef output_progress(pos, length):\n\t\tif sys.stdout.isatty() and format.is_interactive_format():\n\t\t\tterminal.clear_row()\n\t\t\tprint(\"\\b\" + _(PROGRESS_TEXT).format(100 * pos / length), end=\"\")\n\t\t\tsys.stdout.flush()\n\n\t@staticmethod\n\tdef is_blame_line(string):\n\t\treturn string.find(\" (\") != -1\n\n\t@staticmethod\n\tdef get_author_mail(string):\n\t\tauthor_mail = re.search(\" \\((.*?)\\d\\d\\d\\d-\\d\\d-\\d\\d\", string)\n\t\treturn author_mail.group(1).strip().lstrip(\"<\").rstrip(\">\")\n\n\t@staticmethod\n\tdef get_content(string):\n\t\tcontent = re.search(\" \\d+\\)(.*)\", string)\n\t\treturn content.group(1).lstrip()\n\n\tdef get_summed_blames(self):\n\t\tsummed_blames = {}\n\t\tfor i in self.blames.items():\n\t\t\tif summed_blames.get(i[0][0], None) == None:\n\t\t\t\tsummed_blames[i[0][0]] = BlameEntry()\n\n\t\t\tsummed_blames[i[0][0]].rows += i[1].rows\n\t\t\tsummed_blames[i[0][0]].comments += i[1].comments\n\n\t\treturn summed_blames\n\n__blame__ = None\n\ndef get(hard):\n\tglobal __blame__\n\tif __blame__ == None:\n\t\t__blame__ = Blame(hard)\n\n\treturn __blame__\n\nBLAME_INFO_TEXT = N_(\"Below are the number of rows from each author that have survived and are still \"\n \"intact in the current revision\")\n\nclass BlameOutput(Outputable):\n\tdef __init__(self, hard):\n\t\tself.hard = hard\n\t\tself.changes = changes.get(hard)\n\t\tOutputable.__init__(self)\n\n\tdef output_html(self):\n\t\tget(self.hard)\n\n\t\tblame_xml = \"
\"\n\t\tblame_xml += \"

\" + _(BLAME_INFO_TEXT) + \".

\"\n\t\tblame_xml += \"\".format(_(\"Author\"),\n\t\t _(\"Rows\"), _(\"% in comments\"))\n\t\tblame_xml += \"\"\n\t\tchart_data = \"\"\n\t\tblames = sorted(__blame__.get_summed_blames().items())\n\t\ttotal_blames = 0\n\n\t\tfor i in blames:\n\t\t\ttotal_blames += i[1].rows\n\n\t\tfor i, entry in enumerate(blames):\n\t\t\twork_percentage = str(\"{0:.2f}\".format(100.0 * entry[1].rows / total_blames))\n\t\t\tblame_xml += \"\" if i % 2 == 1 else \">\")\n\n\t\t\tif format.get_selected() == \"html\":\n\t\t\t\tauthor_email = self.changes.get_author_email(entry[0])\n\t\t\t\tblame_xml += \"\".format(gravatar.get_url(author_email), entry[0])\n\t\t\telse:\n\t\t\t\tblame_xml += \"\"\n\n\t\t\tblame_xml += \"\"\n\t\t\tblame_xml += \"\"\n\t\t\tblame_xml += \"\"\n\t\t\tblame_xml += \"\"\n\t\t\tchart_data += \"{{label: \\\"{0}\\\", data: {1}}}\".format(entry[0], work_percentage)\n\n\t\t\tif blames[-1] != entry:\n\t\t\t\tchart_data += \", \"\n\n\t\tblame_xml += \"
{0} {1} {2}
{1}\" + entry[0] + \"\" + str(entry[1].rows) + \"\" + \"{0:.2f}\".format(100.0 * entry[1].comments / entry[1].rows) + \"\" + work_percentage + \"
 
\"\n\t\tblame_xml += \"
\"\n\t\tblame_xml += \"
\"\n\n\t\tprint(blame_xml)\n\n\tdef output_text(self):\n\t\tprint(\"\")\n\t\tget(self.hard)\n\n\t\tif self.hard and sys.stdout.isatty():\n\t\t\tterminal.clear_row()\n\n\t\tprint(textwrap.fill(_(BLAME_INFO_TEXT) + \":\", width=terminal.get_size()[0]) + \"\\n\")\n\t\tterminal.printb(_(\"Author\").ljust(21) + _(\"Rows\").rjust(10) + _(\"% in comments\").rjust(20))\n\n\t\tfor i in sorted(__blame__.get_summed_blames().items()):\n\t\t\tprint(i[0].ljust(20)[0:20], end=\" \")\n\t\t\tprint(str(i[1].rows).rjust(10), end=\" \")\n\t\t\tprint(\"{0:.2f}\".format(100.0 * i[1].comments / i[1].rows).rjust(19))\n\n\tdef output_xml(self):\n\t\tget(self.hard)\n\n\t\tmessage_xml = \"\\t\\t\" + _(BLAME_INFO_TEXT) + \"\\n\"\n\t\tblame_xml = \"\"\n\n\t\tfor i in sorted(__blame__.get_summed_blames().items()):\n\t\t\tauthor_email = self.changes.get_author_email(i[0])\n\n\t\t\tname_xml = \"\\t\\t\\t\\t\" + i[0] + \"\\n\"\n\t\t\tgravatar_xml = \"\\t\\t\\t\\t\" + gravatar.get_url(author_email) + \"\\n\"\n\t\t\trows_xml = \"\\t\\t\\t\\t\" + str(i[1].rows) + \"\\n\"\n\t\t\tpercentage_in_comments_xml = (\"\\t\\t\\t\\t\" + \"{0:.2f}\".format(100.0 * i[1].comments / i[1].rows) +\n\t\t\t \"\\n\")\n\t\t\tblame_xml += \"\\t\\t\\t\\n\" + name_xml + gravatar_xml + rows_xml + percentage_in_comments_xml + \"\\t\\t\\t\\n\"\n\n\t\tprint(\"\\t\\n\" + message_xml + \"\\t\\t\\n\" + blame_xml + \"\\t\\t\\n\\t\")\n","repo_name":"geekan/cowry","sub_path":"code/python/gitstat/gitinspector-0.3.1/gitinspector/blame.py","file_name":"blame.py","file_ext":"py","file_size_in_byte":7849,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"81"} +{"seq_id":"10371480014","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n# @Time : 2022/2/7 14:46\n# @Author : strawsyz\n# @File : args_utils.py\n# @desc:\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n\nparser = ArgumentParser(description='transformer for soccernet', formatter_class=ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('--GPU', required=False, type=str, default=GPU, help='ID of the GPU to use')\nparser.add_argument(\"--split_data\", required=False, type=int, default=1,\n help='split_data')\nparser.add_argument(\"--model_names_in_type1\", nargs='+', required=False, type=str,\n default=[\"MyOptimizeTransformer6\", \"MyOptimizeTransformer7\", \"MyOptimizeTransformer8\",\n \"MyOptimizeTransformer61\"],\n help='spot_model_path')\nparser.add_argument(\"--test_4_highlights\", required=False, type=bool, default=False,\n help='test_4_highlights')\nparser.add_argument(\"--loss_weight\", required=False, type=float, default=False,\n help='loss_weight')\n\nargs = parser.parse_args()\n\n\n","repo_name":"strawsyz/straw","sub_path":"EasyDeep/utils/args_utils.py","file_name":"args_utils.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"17891498837","text":"\nfrom operators.ms_teams_webhook import MSTeamsWebhookOperator\nfrom datetime import datetime, timedelta\n\n\ndef on_failure(context, **kwargs):\n owner = context['dag'].default_args['owner']\n message = f\"\"\"💩 💩 💩 💩 {owner}\"\"\"\n teams_notification = MSTeamsWebhookOperator(\n status=\"FAILED\",\n task_id=\"msteams_notify_failure\",\n owner=f'{owner}',\n trigger_rule=\"all_done\",\n message=message,\n button_text=\"View log\",\n theme_color=\"FF0000\",\n http_conn_id='ms_team_conn_failure')\n teams_notification.execute(context)\n\n\ndef on_success(context, **kwargs):\n owner = context['dag'].default_args['owner']\n message = f\"\"\"A may ding, gut chop 💞 💞 {owner}\"\"\"\n teams_notification = MSTeamsWebhookOperator(\n status=\"SUCCESS\",\n task_id=\"msteams_notify_success\",\n owner=f'{owner}',\n trigger_rule=\"all_done\",\n message=message,\n button_text=\"View log\",\n theme_color=\"0072C6\",\n http_conn_id='ms_team_conn_success')\n teams_notification.execute(context)\n\n\ndef conditionally_trigger(context, dag_run_obj):\n \"\"\"This function decides whether or not to Trigger the remote DAG\"\"\"\n c_p = context['params']['condition_param']\n print(\"Controller DAG : conditionally_trigger = {}\".format(c_p))\n if c_p:\n consistent = context['params'].get('consistent_run_date', True) # set default as True\n if consistent:\n run_date = context['dag_run'].conf.get('run_date')\n else:\n run_date = (context['dag_run'].execution_date + timedelta(hours=7)).strftime(\"%Y-%m-%d\")\n dag_run_obj.payload = {'message': context['params']['message'],\n 'run_date': run_date,\n \"dag_controller_id\": context['dag_run'].dag_id,\n \"task_controller_id\": context['ti'].task_id}\n print(dag_run_obj.payload)\n return dag_run_obj\n\n\ndef receive_trigger_payload(ds, **kwargs):\n \"\"\"\n Print the payload \"message\" passed to the DagRun conf attribute.\n :param context: The execution context\n :type context: dict\n \"\"\"\n print(\"Received trigger from task {task} in dag {dag} on {run_date}\".format(\n dag=kwargs[\"dag_run\"].conf.get('dag_controller_id', ''),\n task=kwargs[\"dag_run\"].conf.get('task_controller_id', ''),\n run_date=kwargs[\"dag_run\"].conf.get(\"run_date\", None)\n ))\n","repo_name":"tungduongbk/airflow-custom-plugins","sub_path":"utils/python_callable.py","file_name":"python_callable.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"5357226034","text":"\"\"\"Library rules\"\"\"\n\nload(\"@rules_cc//cc:defs.bzl\", \"cc_library\", \"objc_import\", \"objc_library\")\nload(\"@bazel_skylib//lib:paths.bzl\", \"paths\")\nload(\"@bazel_skylib//lib:sets.bzl\", \"sets\")\nload(\"@build_bazel_rules_apple//apple:apple.bzl\", \"apple_dynamic_framework_import\", \"apple_static_framework_import\")\nload(\"@build_bazel_rules_apple//apple:resources.bzl\", \"apple_resource_bundle\")\nload(\"@build_bazel_rules_swift//swift:swift.bzl\", \"swift_library\")\nload(\"//rules:hmap.bzl\", \"headermap\")\nload(\"//rules:substitute_build_settings.bzl\", \"substitute_build_settings\")\nload(\"//rules/library:resources.bzl\", \"wrap_resources_in_filegroup\")\nload(\"//rules/library:xcconfig.bzl\", \"settings_from_xcconfig\")\n\nPrivateHeadersInfo = provider(\n doc = \"Propagates private headers, so they can be accessed if necessary\",\n fields = {\n \"headers\": \"Private headers\",\n },\n)\n\n_MANUAL = [\"manual\"]\n\ndef _private_headers_impl(ctx):\n return [\n PrivateHeadersInfo(\n headers = depset(direct = ctx.files.headers),\n ),\n apple_common.new_objc_provider(),\n ]\n\n_private_headers = rule(\n implementation = _private_headers_impl,\n attrs = {\n \"headers\": attr.label_list(mandatory = True, allow_files = [\".h\", \".hh\", \".hpp\"]),\n },\n)\n\ndef _write_file_impl(ctx):\n ctx.actions.write(\n output = ctx.outputs.destination,\n content = ctx.attr.content,\n )\n\nwrite_file = rule(\n implementation = _write_file_impl,\n attrs = {\n \"content\": attr.string(mandatory = True),\n \"destination\": attr.output(mandatory = True),\n },\n doc = \"Writes out a file verbatim\",\n)\n\ndef _extend_modulemap_impl(ctx):\n args = ctx.actions.args()\n args.add(\"\"\"\nmodule {module_name}.Swift {{\n header \"{swift_umbrella_header}\"\n requires objc\n}}\"\"\".format(\n module_name = ctx.attr.module_name,\n swift_umbrella_header = ctx.attr.swift_header,\n ))\n args.add(ctx.file.source)\n args.add(ctx.outputs.destination)\n ctx.actions.run_shell(\n inputs = [ctx.file.source],\n outputs = [ctx.outputs.destination],\n mnemonic = \"ExtendModulemap\",\n progress_message = \"Extending %s\" % ctx.file.source.basename,\n command = \"echo \\\"$1\\\" | cat <(echo -n 'framework ') $2 - > $3\",\n arguments = [args],\n )\n\nextend_modulemap = rule(\n implementation = _extend_modulemap_impl,\n attrs = {\n \"source\": attr.label(\n doc = \"\",\n allow_single_file = True,\n ),\n \"destination\": attr.output(),\n \"module_name\": attr.string(\n mandatory = True,\n ),\n \"swift_header\": attr.string(\n doc = \"\",\n ),\n },\n doc = \"Extends a modulemap with a Swift submodule\",\n)\n\ndef _write_modulemap(name, library_tools, umbrella_header = None, public_headers = [], private_headers = [], module_name = None, framework = False, **kwargs):\n basename = \"{}.modulemap\".format(name)\n destination = paths.join(name + \"-modulemap\", basename)\n if not module_name:\n module_name = name\n content = \"\"\"\\\nmodule {module_name} {{\n umbrella header \"{umbrella_header}\"\n\n export *\n module * {{ export * }}\n}}\n\"\"\".format(\n module_name = module_name,\n umbrella_header = umbrella_header,\n )\n if framework:\n content = \"framework \" + content\n\n write_file(\n name = basename + \"~\",\n destination = destination,\n content = content,\n tags = _MANUAL,\n )\n return destination\n\ndef _write_umbrella_header(name, library_tools, public_headers = [], private_headers = [], module_name = None, **kwargs):\n basename = \"{name}-umbrella.h\".format(name = name)\n destination = paths.join(name + \"-modulemap\", basename)\n if not module_name:\n module_name = name\n content = \"\"\"\\\n#ifdef __OBJC__\n# import \n# if __has_include()\n# import \n# endif\n#else\n# ifndef FOUNDATION_EXPORT\n# if defined(__cplusplus)\n# define FOUNDATION_EXPORT extern \"C\"\n# else\n# define FOUNDATION_EXPORT extern\n# endif\n# endif\n#endif\n\n\"\"\"\n\n for header in public_headers:\n content += \"#import \\\"{header}\\\"\\n\".format(header = paths.basename(header))\n\n content += \"\"\"\nFOUNDATION_EXPORT double {module_name}VersionNumber;\nFOUNDATION_EXPORT const unsigned char {module_name}VersionString[];\n\"\"\".format(\n module_name = module_name,\n )\n\n write_file(\n name = basename + \"~\",\n destination = destination,\n content = content,\n tags = _MANUAL,\n )\n return destination\n\ndef _generate_resource_bundles(name, library_tools, module_name, resource_bundles, **kwargs):\n bundle_target_names = []\n for bundle_name in resource_bundles:\n target_name = \"%s-%s\" % (name, bundle_name)\n substitute_build_settings(\n name = name + \".info.plist\",\n source = \"@build_bazel_rules_ios//rules/library:resource_bundle.plist\",\n variables = {\n \"PRODUCT_BUNDLE_IDENTIFIER\": \"com.cocoapods.%s\" % bundle_name,\n \"PRODUCT_NAME\": bundle_name,\n },\n tags = _MANUAL,\n )\n apple_resource_bundle(\n name = target_name,\n bundle_name = bundle_name,\n resources = [\n library_tools[\"wrap_resources_in_filegroup\"](name = target_name + \"_resources\", srcs = resource_bundles[bundle_name]),\n ],\n infoplists = [name + \".info.plist\"],\n tags = _MANUAL,\n )\n bundle_target_names.append(target_name)\n return bundle_target_names\n\ndef _error_on_default_xcconfig(name, library_tools, default_xcconfig_name, **kwargs):\n fail(\"{name} specifies a default xcconfig ({default_xcconfig_name}). You must override fetch_default_xcconfig to use this feature.\".format(\n name = name,\n default_xcconfig_name = default_xcconfig_name,\n ))\n\n_DEFAULT_LIBRARY_TOOLS = {\n \"modulemap_generator\": _write_modulemap,\n \"umbrella_header_generator\": _write_umbrella_header,\n \"resource_bundle_generator\": _generate_resource_bundles,\n \"wrap_resources_in_filegroup\": wrap_resources_in_filegroup,\n \"fetch_default_xcconfig\": _error_on_default_xcconfig,\n}\n\ndef _prepend(list, other):\n for item in reversed(other):\n list.insert(0, item)\n\ndef _prepend_copts(copts_struct, objc_copts, cc_copts, swift_copts, linkopts, ibtool_copts, momc_copts, mapc_copts):\n _prepend(objc_copts, copts_struct.objc_copts)\n _prepend(copts_struct.cc_copts, cc_copts)\n _prepend(copts_struct.swift_copts, swift_copts)\n _prepend(copts_struct.linkopts, linkopts)\n _prepend(copts_struct.ibtool_copts, ibtool_copts)\n _prepend(copts_struct.momc_copts, momc_copts)\n _prepend(copts_struct.mapc_copts, mapc_copts)\n\ndef _append_headermap_copts(hmap, flag, objc_copts, swift_copts, cc_copts):\n copt = flag + \"$(execpath :{hmap})\".format(hmap = hmap)\n\n objc_copts.append(copt)\n cc_copts.append(copt)\n swift_copts.extend((\"-Xcc\", copt))\n\ndef _uppercase_string(s):\n return s.upper()\n\ndef _canonicalize_swift_version(swift_version):\n if not swift_version:\n return None\n\n version_parts = swift_version.split(\".\", 2)\n\n if int(version_parts[0]) >= 5:\n # Swift 5+ doesn't allow the minor version to be supplied, though Xcode is more lenient\n swift_version = version_parts[0]\n else:\n # Drop any trailing \".0\" versions\n version_parts_scrubbed = []\n only_zeros_seen = True\n for part in reversed(version_parts):\n if part == \"0\" and only_zeros_seen:\n continue\n only_zeros_seen = False\n version_parts_scrubbed.insert(0, part)\n swift_version = \".\".join(version_parts_scrubbed)\n\n return swift_version\n\ndef apple_library(name, library_tools = {}, export_private_headers = True, namespace_is_module_name = True, default_xcconfig_name = None, xcconfig = {}, **kwargs):\n \"\"\"Create libraries for native source code on Apple platforms.\n\n Automatically handles mixed-source libraries and comes with\n reasonable defaults that mimic Xcode's behavior.\n\n Args:\n name: The base name for all of the underlying targets.\n library_tools: An optional dictionary containing overrides for\n default behaviors.\n export_private_headers: Whether private headers should be exported via\n a `PrivateHeadersInfo` provider.\n namespace_is_module_name: Whether the module name should be used as the\n namespace for header imports, instead of the target name.\n default_xcconfig_name: The name of a default xcconfig to be applied to this target.\n xcconfig: A dictionary of Xcode build settings to be applied to this target in the\n form of different `copt` attributes.\n **kwargs: keyword arguments.\n\n Returns:\n Struct with a bunch of info\n \"\"\"\n library_tools = dict(_DEFAULT_LIBRARY_TOOLS, **library_tools)\n swift_sources = []\n objc_sources = []\n objc_non_arc_sources = []\n cpp_sources = []\n public_headers = kwargs.pop(\"public_headers\", [])\n private_headers = kwargs.pop(\"private_headers\", [])\n objc_hdrs = [f for f in public_headers if f.endswith((\".h\", \".hh\"))]\n objc_non_exported_hdrs = []\n objc_private_hdrs = [f for f in private_headers if f.endswith((\".h\", \".hh\"))]\n if public_headers:\n public_headers = sets.make(public_headers)\n if private_headers:\n private_headers = sets.make(private_headers)\n for f in sorted(kwargs.pop(\"non_arc_srcs\", []), key = _uppercase_string):\n if f.endswith((\".m\", \".mm\")):\n objc_non_arc_sources.append(f)\n else:\n kwargs[\"srcs\"] = kwargs.pop(\"srcs\", []) + [f]\n for f in sorted(kwargs.pop(\"srcs\", []), key = _uppercase_string):\n if f.endswith((\".h\", \".hh\")):\n if (private_headers and sets.contains(private_headers, f)) or \\\n (public_headers and sets.contains(public_headers, f)):\n pass\n elif public_headers and private_headers:\n objc_non_exported_hdrs.append(f)\n elif public_headers:\n objc_private_hdrs.append(f)\n else:\n objc_hdrs.append(f)\n elif f.endswith((\".m\", \".mm\", \".c\")):\n objc_sources.append(f)\n elif f.endswith((\".swift\")):\n swift_sources.append(f)\n elif f.endswith((\".cc\", \".cpp\")):\n cpp_sources.append(f)\n else:\n fail(\"Unable to compile %s in apple_framework %s\" % (f, name))\n\n module_name = kwargs.pop(\"module_name\", name)\n namespace = module_name if namespace_is_module_name else name\n module_map = kwargs.pop(\"module_map\", None)\n cc_copts = kwargs.pop(\"cc_copts\", [])\n swift_copts = kwargs.pop(\"swift_copts\", [])\n ibtool_copts = kwargs.pop(\"ibtool_copts\", [])\n momc_copts = kwargs.pop(\"momc_copts\", [])\n mapc_copts = kwargs.pop(\"mapc_copts\", [])\n linkopts = kwargs.pop(\"linkopts\", [])\n objc_copts = kwargs.pop(\"objc_copts\", [])\n other_inputs = kwargs.pop(\"other_inputs\", [])\n sdk_dylibs = kwargs.pop(\"sdk_dylibs\", [])\n sdk_frameworks = kwargs.pop(\"sdk_frameworks\", [])\n weak_sdk_frameworks = kwargs.pop(\"weak_sdk_frameworks\", [])\n sdk_includes = kwargs.pop(\"sdk_includes\", [])\n pch = kwargs.pop(\"pch\", \"@build_bazel_rules_ios//rules/library:common.pch\")\n deps = kwargs.pop(\"deps\", [])\n data = kwargs.pop(\"data\", [])\n tags = kwargs.pop(\"tags\", [])\n tags_manual = tags if \"manual\" in tags else tags + _MANUAL\n internal_deps = []\n lib_names = []\n\n if default_xcconfig_name:\n for (setting, value) in library_tools[\"fetch_default_xcconfig\"](name, library_tools, default_xcconfig_name, **kwargs).items():\n if not setting in xcconfig:\n xcconfig[setting] = value\n xcconfig_settings = settings_from_xcconfig(xcconfig)\n _prepend_copts(xcconfig_settings, objc_copts, cc_copts, swift_copts, linkopts, ibtool_copts, momc_copts, mapc_copts)\n\n for (k, v) in {\"momc_copts\": momc_copts, \"mapc_copts\": mapc_copts, \"ibtool_copts\": ibtool_copts}.items():\n if v:\n fail(\"Specifying {attr} for {name} is not yet supported. Given: {opts}\".format(\n attr = k,\n name = name,\n opts = repr(v),\n ))\n\n if linkopts:\n linkopts_name = \"%s_linkopts\" % (name)\n\n # https://docs.bazel.build/versions/master/be/c-cpp.html#cc_library\n cc_library(\n name = linkopts_name,\n linkopts = linkopts,\n )\n internal_deps.append(linkopts_name)\n\n for vendored_static_framework in kwargs.pop(\"vendored_static_frameworks\", []):\n import_name = \"%s-%s-import\" % (name, paths.basename(vendored_static_framework))\n apple_static_framework_import(\n name = import_name,\n framework_imports = native.glob([\"%s/**/*\" % vendored_static_framework]),\n tags = _MANUAL,\n )\n deps.append(import_name)\n for vendored_dynamic_framework in kwargs.pop(\"vendored_dynamic_frameworks\", []):\n import_name = \"%s-%s-import\" % (name, paths.basename(vendored_dynamic_framework))\n apple_dynamic_framework_import(\n name = import_name,\n framework_imports = native.glob([\"%s/**/*\" % vendored_dynamic_framework]),\n deps = [],\n tags = _MANUAL,\n )\n deps.append(import_name)\n for vendored_static_library in kwargs.pop(\"vendored_static_libraries\", []):\n import_name = \"%s-%s-library-import\" % (name, paths.basename(vendored_static_library))\n objc_import(\n name = import_name,\n archives = [vendored_static_library],\n tags = _MANUAL,\n )\n deps.append(import_name)\n for vendored_dynamic_library in kwargs.pop(\"vendored_dynamic_libraries\", []):\n fail(\"no import for %s\" % vendored_dynamic_library)\n\n resource_bundles = library_tools[\"resource_bundle_generator\"](\n name = name,\n library_tools = library_tools,\n resource_bundles = kwargs.pop(\"resource_bundles\", {}),\n module_name = module_name,\n **kwargs\n )\n deps += resource_bundles\n\n # TODO: remove framework if set\n # Needs to happen before headermaps are made, so the generated umbrella header gets added to those headermaps\n if namespace_is_module_name and not module_map and \\\n (objc_hdrs or objc_private_hdrs or swift_sources or objc_sources or cpp_sources):\n umbrella_header = library_tools[\"umbrella_header_generator\"](\n name = name,\n library_tools = library_tools,\n public_headers = objc_hdrs,\n private_headers = objc_private_hdrs,\n module_name = module_name,\n **kwargs\n )\n if umbrella_header:\n objc_hdrs.append(umbrella_header)\n module_map = library_tools[\"modulemap_generator\"](\n name = name,\n library_tools = library_tools,\n umbrella_header = paths.basename(umbrella_header),\n public_headers = objc_hdrs,\n private_headers = objc_private_hdrs,\n module_name = module_name,\n framework = False if swift_sources else True,\n **kwargs\n )\n\n ## BEGIN HMAP\n\n public_hmap_name = name + \"_public_hmap\"\n public_hdrs_filegroup = name + \"_public_hdrs\"\n native.filegroup(\n name = public_hdrs_filegroup,\n srcs = objc_hdrs,\n tags = _MANUAL,\n )\n\n # Public hmaps are for vendored static libs to export their header only.\n # Other dependencies' headermaps will be generated by li_ios_framework\n # rules.\n headermap(\n name = public_hmap_name,\n namespace = namespace,\n hdrs = [public_hdrs_filegroup],\n tags = _MANUAL,\n )\n internal_deps.append(public_hmap_name)\n\n private_hmap_name = name + \"_private_hmap\"\n private_angled_hmap_name = name + \"_private_angled_hmap\"\n private_hdrs_filegroup = name + \"_private_hdrs\"\n private_angled_hdrs_filegroup = name + \"_private_angled_hdrs\"\n native.filegroup(\n name = private_hdrs_filegroup,\n srcs = objc_non_exported_hdrs + objc_private_hdrs + objc_hdrs,\n tags = _MANUAL,\n )\n native.filegroup(\n name = private_angled_hdrs_filegroup,\n srcs = objc_non_exported_hdrs + objc_private_hdrs,\n tags = _MANUAL,\n )\n\n headermap(\n name = private_hmap_name,\n hdrs = [private_hdrs_filegroup],\n tags = _MANUAL,\n )\n internal_deps.append(private_hmap_name)\n headermap(\n name = private_angled_hmap_name,\n namespace = namespace,\n hdrs = [private_angled_hdrs_filegroup],\n tags = _MANUAL,\n )\n internal_deps.append(private_angled_hmap_name)\n\n ## END HMAP\n\n # vfs_name = name + '_vfs'\n # vfs_overlay(name = vfs_name, deps = deps)\n # internal_deps.append(vfs_name)\n\n _append_headermap_copts(private_hmap_name, \"-I\", objc_copts, swift_copts, cc_copts)\n _append_headermap_copts(public_hmap_name, \"-I\", objc_copts, swift_copts, cc_copts)\n _append_headermap_copts(private_angled_hmap_name, \"-I\", objc_copts, swift_copts, cc_copts)\n _append_headermap_copts(private_hmap_name, \"-iquote\", objc_copts, swift_copts, cc_copts)\n\n objc_copts += [\n \"-fmodules\",\n \"-fmodule-name=%s\" % module_name,\n \"-gmodules\",\n ]\n\n swift_copts += [\n \"-Xcc\",\n \"-D__SWIFTC__\",\n ]\n\n swift_version = _canonicalize_swift_version(kwargs.pop(\"swift_version\", None))\n if swift_version:\n swift_copts += [\"-swift-version\", swift_version]\n\n objc_libname = \"%s_objc\" % name\n swift_libname = \"%s_swift\" % name\n cpp_libname = \"%s_cpp\" % name\n\n if swift_sources:\n swift_copts.extend((\"-Xcc\", \"-I.\"))\n if module_map:\n swift_copts += [\n \"-Xcc\",\n \"-fmodule-map-file=\" + \"$(execpath \" + module_map + \")\",\n \"-import-underlying-module\",\n ]\n swiftc_inputs = other_inputs + objc_hdrs\n if module_map:\n swiftc_inputs.append(module_map)\n generated_swift_header_name = module_name + \"-Swift.h\"\n\n swift_library(\n name = swift_libname,\n module_name = module_name,\n generated_header_name = generated_swift_header_name,\n srcs = swift_sources,\n copts = swift_copts,\n deps = deps + internal_deps + lib_names,\n swiftc_inputs = swiftc_inputs,\n features = [\"swift.no_generated_module_map\"],\n tags = tags_manual,\n **kwargs\n )\n lib_names.append(swift_libname)\n\n # Add generated swift header to header maps for angle bracket imports\n swift_doublequote_hmap_name = name + \"_swift_doublequote_hmap\"\n headermap(\n name = swift_doublequote_hmap_name,\n namespace = namespace,\n hdrs = [],\n direct_hdr_providers = [swift_libname],\n tags = _MANUAL,\n )\n internal_deps.append(swift_doublequote_hmap_name)\n _append_headermap_copts(swift_doublequote_hmap_name, \"-iquote\", objc_copts, swift_copts, cc_copts)\n\n # Add generated swift header to header maps for double quote imports\n swift_angle_bracket_hmap_name = name + \"_swift_angle_bracket_hmap\"\n headermap(\n name = swift_angle_bracket_hmap_name,\n namespace = namespace,\n hdrs = [],\n direct_hdr_providers = [swift_libname],\n tags = _MANUAL,\n )\n internal_deps.append(swift_angle_bracket_hmap_name)\n _append_headermap_copts(swift_angle_bracket_hmap_name, \"-I\", objc_copts, swift_copts, cc_copts)\n\n if module_map:\n extend_modulemap(\n name = module_map + \".extended.\" + name,\n destination = \"%s.extended.modulemap\" % name,\n source = module_map,\n swift_header = generated_swift_header_name,\n module_name = module_name,\n tags = _MANUAL,\n )\n module_map = \"%s.extended.modulemap\" % name\n\n if cpp_sources and False:\n cc_copts.append(\"-I.\")\n cc_library(\n name = cpp_libname,\n srcs = cpp_sources + objc_private_hdrs,\n hdrs = objc_hdrs,\n copts = cc_copts,\n deps = deps,\n tags = tags_manual,\n )\n lib_names.append(cpp_libname)\n\n objc_library_data = library_tools[\"wrap_resources_in_filegroup\"](name = objc_libname + \"_data\", srcs = data)\n objc_copts.append(\"-I.\")\n objc_library(\n name = objc_libname,\n srcs = objc_sources + objc_private_hdrs + objc_non_exported_hdrs,\n non_arc_srcs = objc_non_arc_sources,\n hdrs = objc_hdrs,\n copts = objc_copts,\n deps = deps + internal_deps + lib_names,\n module_map = module_map,\n sdk_dylibs = sdk_dylibs,\n sdk_frameworks = sdk_frameworks,\n weak_sdk_frameworks = weak_sdk_frameworks,\n sdk_includes = sdk_includes,\n pch = pch,\n data = [objc_library_data],\n tags = tags_manual,\n **kwargs\n )\n launch_screen_storyboard_name = name + \"_launch_screen_storyboard\"\n native.filegroup(\n name = launch_screen_storyboard_name,\n srcs = [objc_library_data],\n output_group = \"launch_screen_storyboard\",\n tags = _MANUAL,\n )\n lib_names.append(objc_libname)\n\n if export_private_headers:\n private_headers_name = \"%s_private_headers\" % name\n lib_names.append(private_headers_name)\n _private_headers(name = private_headers_name, headers = objc_private_hdrs, tags = _MANUAL)\n\n return struct(\n lib_names = lib_names,\n transitive_deps = deps,\n deps = lib_names + deps,\n module_name = module_name,\n launch_screen_storyboard_name = launch_screen_storyboard_name,\n namespace = namespace,\n linkopts = linkopts,\n )\n","repo_name":"PicPay/rules_ios","sub_path":"rules/library.bzl","file_name":"library.bzl","file_ext":"bzl","file_size_in_byte":22183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"74578370503","text":"from models.conquest.conquest_hedron import ConquestHedron, EmpireHedron, FederationHedron\nfrom models.conquest.conquest_prism import ConquestPrism, FederationPrism, EmpirePrism\nfrom models.conquest.vehicles.crafts.space.fighters import SpaceFighterLaser\nfrom models.conquest.vehicles.crafts.space.space_craft import SpaceCraft, SpaceCraftType, SpaceCraftCockpit, \\\n SpaceCraftEngine, SpaceCraftLifeSupport, SpaceCraftArmor, SpaceCraftShields, SpaceCraftLaser\nfrom models.conquest.vehicles.crafts.squadrons import SpaceCraftSquadron\n\n\nclass ShuttleArmor(SpaceCraftArmor):\n def __init__(self):\n shuttle_armor = {\n \"Cockpit\": 5,\n \"Shields\": 5,\n \"Engine\": 5,\n \"LifeSupport\": 5,\n \"PrimaryWeapon\": 5,\n \"SecondaryWeapon\": 5,\n }\n super().__init__(shuttle_armor)\n\n\nclass ShuttleEngine(SpaceCraftEngine):\n def __init__(self):\n super().__init__(engine_health=7)\n\n\nclass ShuttleLifeSupport(SpaceCraftLifeSupport):\n def __init__(self):\n super().__init__(life_support_health=7)\n\n\nclass ShuttleShields(SpaceCraftShields):\n def __init__(self):\n super().__init__(shield_health=5)\n\n\nclass SpaceShuttle(SpaceCraft):\n def __init__(self,\n name,\n pilot: ConquestPrism,\n crew: ConquestHedron = None\n ):\n super().__init__(name,\n SpaceCraftType.Shuttle,\n SpaceCraftCockpit(pilot),\n ShuttleShields(),\n ShuttleArmor(),\n ShuttleEngine(),\n ShuttleLifeSupport(),\n SpaceFighterLaser(),\n SpaceFighterLaser())\n self.crew = crew\n\n def load_crew(self, new_crew):\n self.crew = new_crew\n\n def deploy_crew(self):\n old_crew = self.crew\n self.crew = None\n return old_crew\n\n def is_online(self):\n if self.crew is None:\n return False\n online_status = super().is_online()\n return online_status\n\n\nclass EmpireSpaceShuttle(SpaceShuttle):\n def __init__(self, pilot: EmpirePrism, crew: EmpireHedron = None):\n super().__init__(\n f\"{pilot.first_name}'s Empire Space Shuttle\",\n pilot,\n crew\n )\n\n\nclass FederationSpaceShuttle(SpaceShuttle):\n def __init__(self, pilot: FederationPrism, crew: FederationHedron = None):\n super().__init__(\n f\"{pilot.first_name}'s Federation Space Shuttle\",\n pilot,\n crew\n )\n\n\nclass SpaceShuttleSquadron(SpaceCraftSquadron):\n def __init__(self, shuttle: SpaceShuttle):\n super().__init__(SpaceCraftType.Shuttle, [shuttle])\n\n def deploy_crew(self):\n return self.squad[0].deploy_crew()\n\n def load_crew(self, crew: ConquestHedron):\n self.squad[0].load_crew(crew)\n\n def is_online(self):\n is_squad_online = super().is_online()\n if len(self.squad) <= 0:\n return False\n for member in self.squad:\n is_squad_online |= member.is_online()\n return is_squad_online\n\n\nclass EmpireSpaceShuttleSquadron(SpaceShuttleSquadron):\n def __init__(self, pilot: EmpirePrism, crew: EmpireHedron):\n super().__init__(EmpireSpaceShuttle(pilot, crew))\n\n\nclass FederationSpaceShuttleSquadron(SpaceShuttleSquadron):\n def __init__(self, pilot: FederationPrism, crew: FederationHedron):\n super().__init__(FederationSpaceShuttle(pilot, crew))\n\n","repo_name":"WestiferRobin/mu-prism","sub_path":"models/conquest/vehicles/crafts/space/shuttles.py","file_name":"shuttles.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8105386529","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n'''\r\npython 起始ip和ip总数列出每个ip\r\n\r\n'''\r\n\r\n\r\nimport xlrd\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\ndata = xlrd.open_workbook('X:\\ip.xlsx')\r\nfilewrite = open(\"D:\\eachip.txt\", 'a')\r\ntable = data.sheets()[0]\r\nnrows = table.nrows\r\ncountlist = []\r\niplist = []\r\nfor x in range(nrows-1):\r\n countlist.append(int(table.cell(x+1, 2).value))\r\nfor y in range(nrows-1):\r\n iplist.append(table.cell(y + 1, 1).value)\r\nfor i in range(len(countlist)):\r\n ip = iplist[i]\r\n count = countlist[i]\r\n ss = ip.split('.')\r\n num1 = int(ss[0])\r\n num2 = int(ss[1])\r\n num3 = int(ss[2])\r\n num4 = int(ss[3])\r\n\r\n for i in range(count):\r\n\r\n if num4 > 254:\r\n\r\n num3 +=1\r\n num4 = 1\r\n if num3 > 254:\r\n\r\n num2 += 1\r\n num3 = 1\r\n num4 = 1\r\n\r\n\r\n if num2 > 254:\r\n\r\n num1 += 1\r\n num2 = 1\r\n num3 = 1\r\n num4 = 1\r\n num4 += 1\r\n resaut = str(num1) + str('.') + str(num2) + str('.') + str(num3) + str('.') + str(num4)\r\n print (resaut)\r\n filewrite.write(resaut + '\\n')\r\n\r\n# ip = \"1.34.0.0\"\r\n# count = 131072\r\n","repo_name":"noobxx/eachip","sub_path":"eachip.py","file_name":"eachip.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12276295916","text":"#!/usr/bin/env python3\n\n#usage:error study\n#date:190819\n#author:Distantskyline\n\n\nimport random\n\n\ndef verificationCode(number):\n \"\"\"\n product verification code of user input number\n :param number: verification code's number\n :return: verification code\n \"\"\"\n vCode = ''\n for n in range(number):\n nums = random.randint(0, 9)\n word = chr(random.randint(65, 90))\n middle = random.choice([nums, word])\n vCode += str(middle)\n return vCode\n\n\nverCode = verificationCode(4)\nprint(verCode)\nuserInput = input('Please input verification code: ')\nif userInput.lower() != verCode.lower():\n print('your Stupid !')\nelse:\n print('please load...')\n\n\n\n\n\ntry:\n file = open('file.txt', 'r')\n content = file.read()\n print(content)\nexcept Exception as error:\t\t## Exception是其他所有非系统异常的基类,能够匹配任意非系统异常\n print('[ERROR] {}'.format(error))\nfinally:\n print('program end')\n\nprint('continue running...')\nnumber = [1, 2, 3, 4, 5]\nfor element in number:\n if element % 2 == 0:\n print(element)\n\n\n\n#except: ##无内容,捕捉所有异常\n print('Other error')\n\n\n\n##自定义跑出异常\nclass MyInputError(Exception):\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return '{} is invalid input'.format(self.value)\n\n\ntry:\n string = input('>>>Input: ')\n if 'abcd' in string:\n raise MyInputError(string) #收集异常\n else:\n print(string)\nexcept MyInputError as error:\n print(error)","repo_name":"Distantskyline/python3","sub_path":"2.面向对象/5.error/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3623859890","text":"def sumar(a, b):\n sum = 0\n if a > b:\n for i in range(b, a+1, 1):\n sum += i\n elif b > a:\n for i in range(a, b+1, 1):\n sum += i\n else:\n sum = 0\n return sum\n\n#Programa\n\na = int(input(\"Introdueix primer número: \"))\nb = int(input(\"Introdueix segon número: \"))\nc = sumar(a, b)\nprint(\"La suma dels números entre {} i {} és {}\".format(a, b, c))","repo_name":"DNG222-code/Python","sub_path":"ex52.py","file_name":"ex52.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"40794382027","text":"\"\"\"\nParsing routines for different data structures.\n\nAll functions in this module work with Python3 native\ndatastructures.\nInspired from:\nhttps://github.com/julie-forman-kay-lab/IDPConformerGenerator/blob/3aef6b085ec09eeebc5812639a5eb6832c0215cd/src/idpconfgen/libs/libparse.py\n\"\"\"\nimport ast\n\n\ndef values_to_dict(values):\n \"\"\"\n Generalization of converting parameters to dict.\n \n Adapted from:\n https://github.com/joaomcteixeira/taurenmd/blob/6bf4cf5f01df206e9663bd2552343fe397ae8b8f/src/taurenmd/libs/libcli.py#L94-L138\n \n Parameters\n ----------\n values : string\n List of values with the format \"par1=1 par2='string' par3=[1,2,3]\n \n Returns\n -------\n param_dict : dictionary\n Converted string above to dictionary with `=` denoting linkage\n E.g. {'par1': 1, 'par2':'string', 'par3': [1,2,3]}\n \"\"\"\n bool_value = {\n 'true': True,\n 'false': False,\n }\n\n param_dict = {}\n for kv in values:\n # print(param_dict, kv)\n try:\n k, v = kv.split('=')\n except ValueError:\n param_dict[kv] = True\n else:\n if ',' in v:\n vs = v.split(',')\n try:\n param_dict[k] = tuple(ast.literal_eval(i) for i in vs)\n except (ValueError, TypeError, SyntaxError):\n param_dict[k] = tuple(i for i in vs)\n else:\n try:\n param_dict[k] = ast.literal_eval(v)\n except (ValueError, TypeError): # is string or list\n param_dict[k] = bool_value.get(v.lower(), v)\n except (SyntaxError):\n param_dict[k] = v\n\n return param_dict\n","repo_name":"THGLab/X-EISDv2","sub_path":"src/xeisd/libs/libparse.py","file_name":"libparse.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8891095176","text":"import tensorflow as tf\nimport numpy as np\n\nfrom align import ALIGN\n\n\ndef predict_text(self, features):\n idx, text = features\n return idx, self.text_encoder(text, training=False)\n\ndef predict_image(self, features):\n idx, image = features\n return idx, self.image_encoder(image, training=False)\n\ndef eval_retrieval(dataset, model):\n\n ALIGN.predict_step = predict_image\n model.compile()\n img_idxs, img_embs = model.predict(dataset.get_image_dataset(), verbose=1,\n steps=dataset.num_steps())\n\n img_idxs = img_idxs[:dataset.num_samples(),...]\n img_embs = img_embs[:dataset.num_samples(),...]\n\n ALIGN.predict_step = predict_text\n model.compile()\n txt_idxs, txt_embs = model.predict(dataset.get_text_dataset(), verbose=1)\n\n # np.save(f'eval_img_idxs.npy', img_idxs)\n # np.save(f'eval_img_embs.npy', img_embs)\n # np.save(f'eval_txt_idxs.npy', txt_idxs)\n # np.save(f'eval_txt_embs.npy', txt_embs)\n\n return retrieval_score(img_idxs, img_embs, txt_idxs, txt_embs)\n\ndef cosine_similarity(x, y):\n x /= tf.norm(x, ord=2, axis=-1, keepdims=True)\n y /= tf.norm(y, ord=2, axis=-1, keepdims=True)\n\n return tf.matmul(x, y, transpose_b=True)\n\ndef retrieval_score(img_idxs, img_embs, txt_idxs, txt_embs):\n\n cosine_sim = cosine_similarity(img_embs, txt_embs)\n\n def calc_recall_at_k(from_idxs, to_idxs, similiarity_mat):\n # transpose back and forth to top_k in axis=0 so that k shape can be broadcasted\n _, sorted_idx = tf.math.top_k(tf.transpose(similiarity_mat), k=10, sorted=True)\n sorted_idx = tf.transpose(sorted_idx).numpy()\n\n ret = {}\n for k in (1, 5, 10):\n top_k_match = (from_idxs == to_idxs[sorted_idx[:k,:]]).any(axis=0)\n recall_k = top_k_match.mean()\n ret[f'R@{k}'] = recall_k\n return ret\n\n # t2i\n t2i = calc_recall_at_k(txt_idxs, img_idxs, cosine_sim)\n t2i = {k:f'{v*100:.3f}' for k, v in t2i.items()}\n\n # i2t\n i2t = calc_recall_at_k(img_idxs, txt_idxs, tf.transpose(cosine_sim))\n i2t = {k:f'{v*100:.3f}' for k, v in i2t.items()}\n\n return {'I2T': i2t, 'T2I': t2i}\n\n\nif __name__ == '__main__':\n img_idxs = np.load(f'eval_img_idxs.npy')\n img_embs = np.load(f'eval_img_embs.npy')\n txt_idxs = np.load(f'eval_txt_idxs.npy')\n txt_embs = np.load(f'eval_txt_embs.npy')\n retrieval_score(img_idxs, img_embs, txt_idxs, txt_embs)\n","repo_name":"kakaobrain/coyo-align","sub_path":"evaluate/eval_retrieval.py","file_name":"eval_retrieval.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"81"} +{"seq_id":"2055046600","text":"import os\nfrom time import strftime, gmtime\n\nfrom Parser.FindFolderName import find_folder\n\n\ndef compile_html(sub, dir_path):\n file = sub.file_link\n new_name = file.replace(\".tex\", \".html\")\n code = \"pandoc -s \\\"\" + file +\"\\\" -o \\\"\" +new_name + \"\\\"\"\n sub.htmlLink = new_name\n os.system(code)\ndef compile_root(dir_path, filename, folder):\n filename = dir_path + \"/\" + filename\n [_, file] = find_folder(filename)\n \"\"\"\n \n \n new_path_tex = filename.replace(\".tex\",\"_supr.tex\")\n with open(filename , 'r') as myfile:\n data=myfile.read()\n ne = data.replace(\"\\input\",\"%\\input\")\n with open(new_path_tex,'w') as f:\n f.write(ne)\n f.close()\n \n \n code = \"cd \\\"\" + folder + \"\\\" && r | htlatex \"+ new_path_tex + \"\"\n \n os.system(code)\n copyfile(new_path_tex.replace(\".tex\",\".html\"), new_path)\n copyfile(new_path_tex.replace(\".tex\",\".css\"), new_path.replace(\".html\",\"_supr.css\"))\n \"\"\"\n new_path = dir_path + \"/compiled/pages\" + file.replace(\".tex\",\".html\")\n code = \"pandoc -s \\\"\" + filename +\"\\\" -o \\\"\" +new_path + \"\\\"\"\n os.system(code)\n with open(new_path , 'r') as myfile:\n data=myfile.read()\n new_d = data.replace(\"\", \"\" + strftime(\"%a, %d %b %Y %H:%M:%S\", gmtime()) + \"\")\n with open(new_path,'w') as f:\n f.write(new_d)\n f.close()\n return new_path","repo_name":"moranabadie/TexToWiki","sub_path":"HTMLCoverter/compileHTML.py","file_name":"compileHTML.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"173830069","text":"import random\nfrom iso import *\nfrom constants import *\nfrom heading import *\nfrom assets import *\n\n\n\nclass Car(Sprite):\n '''\n Car class for cars travelling the screen.\n '''\n def __init__(self, path):\n Sprite.__init__(self, self.get_random_car_image(path.heading))\n self.scale_up()\n self.path = path\n self.interpolator = 0\n self.set_location(path.start)\n self.layer = Layer.OBJECTS_LAYER\n pass\n\n\n def get_random_car_image(self, heading):\n '''\n Get a random car image for vehicles to vary.\n '''\n options = [\"bmw\", \"dodge\", \"peterbilt\", \"van\", \"bmw_blue\", \"impreza\"]\n heading_str = \"north\" if heading == Heading.NORTH else \"south\"\n car_name = random.choice(options)\n img_name = car_name + \"_\" + heading_str + \".png\"\n return load_image(\"cars/\" + img_name)\n\n def scale_up(self):\n '''\n Scale up 2x the vehicle image since the images are small and borrowed from my other project.\n '''\n w, h = self.image.get_size()\n scaled_image = pygame.Surface((w * 2, h * 2)).convert_alpha()\n pygame.transform.scale2x(self.image, scaled_image)\n self.image = scaled_image\n self.anchor = vec2(w, h)\n\n def update(self, clock):\n '''\n Update the car position by interpolating the vehicle path, and play the sound of a car randomly\n '''\n\n rnd = random.randint(0,11000)\n if rnd == 0 or rnd == 1:\n car_sound = load_sound(f\"car_{rnd + 1}.wav\")\n car_sound.play()\n\n loc = self.path.start.lerp(self.path.end, self.interpolator)\n distance = self.path.start.distance(self.path.end)\n self.set_location(loc)\n self.interpolator += (0.01 / distance) * (clock.get_time() / 30)\n if self.interpolator < 1:\n return True\n else:\n return False\n","repo_name":"ThinhNgo123/learn_python","sub_path":"SREM/SREM-master/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"38566405745","text":"\"\"\"View module for handling requests about feat types\"\"\"\nfrom django.http import HttpResponseServerError\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import action\nfrom rest_framework import serializers, status\nfrom HeroForgeApi.models import Feat, FeatSet, FeatOption\nfrom HeroForgeApi.models.character import Character\n\n\nclass FeatSetView(ViewSet):\n \"\"\"Level up featset types view\"\"\"\n\n def retrieve(self, request, pk):\n \"\"\"Handle GET requests for single featset\n\n Returns:\n Response -- JSON serialized featset\n \"\"\"\n try:\n featset = FeatSet.objects.get(pk=pk)\n serializer = FeatSetSerializer(featset)\n return Response(serializer.data)\n except FeatSet.DoesNotExist as exception:\n return Response({'message': exception.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n def list(self, request):\n \"\"\"Handle GET requests to get all featsets\n\n Returns:\n Response -- JSON serialized list of featsets\n \"\"\"\n featsets = FeatSet.objects.all()\n serializer = FeatSetSerializer(featsets, many=True)\n return Response(serializer.data)\n\n def create(self, request):\n \"\"\"Handle POST operations\n\n Returns\n Response -- JSON serialized featset instance\n \"\"\"\n if request.auth.user.is_staff: # only admins can C-UD\n featset = FeatSet.objects.create(\n name=request.data['name'],\n )\n serializer = FeatSetSerializer(featset)\n return Response(serializer.data, status=201)\n else:\n return Response({'message': \"how did you find this\"}, status=403)\n\n def destroy(self, request, pk):\n \"\"\"Handle Delete operations submitted by staff\n\n Returns\n Response --- 204 no content\n \"\"\"\n if request.auth.user.is_staff: # only admins can C-UD\n featset = FeatSet.objects.get(pk=pk)\n featset.delete()\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n else:\n return Response({'message': \"how did you find this\"}, status=403)\n\n def update(self, request, pk):\n \"\"\"Handle PUT requests for a category\n\n Returns:\n Response -- Empty body with 204 status code\n \"\"\"\n if request.auth.user.is_staff: # only admins can C-UD\n featset = FeatSet.objects.get(pk=pk)\n featset.name = request.data['name']\n featset.save()\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n else:\n return Response({'message': \"how did you find this\"}, status=403)\n \n @action(methods=['POST'], detail=True)\n def expandSet(self, request, pk):\n \"\"\"lets a character learn a feat\"\"\"\n if (request.auth.user.is_staff):\n FeatOption.objects.create(\n feat = Feat.objects.get(pk=request.data[\"feat\"]),\n featSet = FeatSet.objects.get(pk=pk)\n )\n featSet = FeatSet.objects.get(pk=pk)\n serializer = FeatSetSerializer(featSet)\n return Response(serializer.data, status=201)\n else:\n return Response({'message': \"how did you find this\"}, status=403)\n \n @action(methods=['DELETE'], detail=True)\n def reduceSet(self, request, pk):\n \"\"\"lets a character learn a feat\"\"\"\n if (request.auth.user.is_staff):\n featOption = FeatOption.objects.get(\n feat = Feat.objects.get(pk=request.data[\"feat\"]),\n featSet = FeatSet.objects.get(pk=pk)\n )\n featOption.delete()\n featSet = FeatSet.objects.get(pk=pk)\n serializer = FeatSetSerializer(featSet)\n return Response(serializer.data, status=204)\n else:\n return Response({'message': \"how did you find this\"}, status=403)\n\n\n\nclass FeatSetSerializer(serializers.ModelSerializer):\n \"\"\"JSON serializer for featsets\n \"\"\"\n class Meta:\n model = FeatSet\n fields = ('id', 'name', 'featOptions')\n depth = 1","repo_name":"Dervalanana/HeroForgeServerFullStack","sub_path":"HeroForgeApi/views/featSet.py","file_name":"featSet.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"351727662","text":"dropout_rate = 0.5\nnb_classes = 6\n# похуй, пляшем\n# 3 на вход, 24 на выходе \nmodel.add(Dense(24, input_dim=3, activation='tanh'))\n# для предотвращения переобучения, как работает хз\nmodel.add(Dropout(dropout_rate))\n# model.add(Dense(6, activation='tanh'))\n# model.add(Dropout(dropout_rate))\n# для многоклассовой классификации \nmodel.add(Dense(nb_classes, activation='softmax'))\n\n\n\n\n\n\n\n\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nmodel.fit(x_train, Y_train,\n epochs=7000,\n batch_size=2000)","repo_name":"nervoushark/fqc-files","sub_path":"keras81.py","file_name":"keras81.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74654534023","text":"from PIL import Image\nfrom PIL import ImageDraw\nfrom math import ceil, pi, sin, cos\n\n\ndef affichagePointsDeCouleur(nomFichier, pointsDeCouleurExtraits, diametreLed):\n \"\"\"Renders an image containing ellipses at picked points positions with the picked color\n\n Args:\n nomFichier (string): The rendered image output nomFichier\n pointsDeCouleurExtraits (array): The list of picked point colors\n diametreLed ([type]): The diameter of each ellipse representing a picked color point\n \"\"\"\n maxSizingColorPoint = max(pointsDeCouleurExtraits,\n key=(lambda x: max(x[0][1])))\n size = max(maxSizingColorPoint[0][1])\n size += diametreLed\n size = ceil(size)\n outputImg = Image.new('RGB', (size, )*2, (0, 0, 0))\n draw = ImageDraw.Draw(outputImg)\n draw.rectangle([(0, 0), (size, )*2], 'black')\n for pickedColorPoint in pointsDeCouleurExtraits:\n upperLeftCornerCoordinates = (\n pickedColorPoint[0][1][0]-diametreLed/2, pickedColorPoint[0][1][1]-diametreLed/2)\n bottomRightCornerCoordinates = (\n upperLeftCornerCoordinates[0]+diametreLed, upperLeftCornerCoordinates[1]+diametreLed)\n draw.ellipse([upperLeftCornerCoordinates,\n bottomRightCornerCoordinates], fill=pickedColorPoint[1])\n outputImg.save(nomFichier)\n\n\ndef arrayPickedColors(pickedColors):\n cleanedPickedColors = []\n lastTheta = 0\n thetaIndex = 0\n for pickedColor in pickedColors:\n if pickedColor[0][0][0] != lastTheta:\n thetaIndex += 1\n lastTheta = pickedColor[0][0][0]\n cleanedPickedColors.append(\n (thetaIndex, pickedColor[0][0][1], pickedColor[1][0], pickedColor[1][1], pickedColor[1][2]))\n cleanedPickedColors.sort(key=lambda x: (x[0], x[1]))\n\n return cleanedPickedColors\n\n\ndef extractionCouleurs(pointsDextraction, image):\n \"\"\"Pick pixel colors on image\n\n Args:\n pointsDextraction (array): Array of tuples containing picking points coordinates\n image (PIL.Image): The image where colors must be picked\n\n Returns:\n Array: List of picked colors in the form (((theta,rIndex),(x,y)),(r,g,b)))\n \"\"\"\n pointsDeCouleurExtraits = []\n for pointsDextraction in pointsDextraction:\n pixel = image.getpixel(pointsDextraction[1])\n pointsDeCouleurExtraits.append((pointsDextraction, pixel))\n return pointsDeCouleurExtraits\n\n\ndef coordonneePointsLigneDeDiametre(diametreZoneDextraction, nbPoints, angle):\n \"\"\"Computes the coordinates of aligned points where color must be picked on image\n\n Args:\n diametreZoneDextraction (int): The diameter of the area where points must be picked\n nbPoints (int): The expected number of points\n angle (float): The angle of the diameter line where holes must be alligned\n\n Returns:\n array: Array of (x,y) tuples containing holes center positions\n \"\"\"\n spaceBetweenPoints = diametreZoneDextraction / nbPoints\n radialCoordinates = []\n if (nbPoints % 2): # number of points is odd, so we place the first one on the center\n radialOrigin = 0\n nbPoints -= 1\n else:\n radialOrigin = spaceBetweenPoints/2\n nbPoints -= 2\n radialCoordinates.append(radialOrigin)\n\n for i in range(0, nbPoints//2):\n radialCoordinates.append(radialOrigin+(i+1)*spaceBetweenPoints)\n pointsPositions = []\n for radialIndex, radial in enumerate(radialCoordinates):\n pointsPositions.append(((angle, radialIndex), (diametreZoneDextraction /\n 2+radial*sin(angle), diametreZoneDextraction/2+radial*cos(angle))))\n pointsPositions.append(((angle, -radialIndex), (diametreZoneDextraction /\n 2-radial*sin(angle), diametreZoneDextraction/2-radial*cos(angle))))\n return pointsPositions\n\n\ndef pointsDextraction(diametreZoneDextraction, nbPointsParLigne, angleAvance, angleMin=0, angleMax=180):\n \"\"\"Builds \n\n Args:\n diametreZoneDextraction (int): The diameter of the area where points must be picked\n nbHoles (int): The expected number of holes in each diametral line\n angleAvance (int): The angle between two consecutive diametral lines\n angleMin (int, optional): The angle of the first line of holes. Defaults to 0.\n angleMax (int, optional): The angle of the last line of holes. Defaults to 180.\n\n Returns:\n array: Array of picking points\n \"\"\"\n points = []\n for i in range(angleMin, angleMax, angleAvance):\n angle_rad = pi/180.0 * i\n points += coordonneePointsLigneDeDiametre(\n diametreZoneDextraction, nbPointsParLigne, angle_rad)\n if (nbPointsParLigne % 2): # if the number of points per line is odd then we add the center point once\n points.append(((0, 0), (diametreZoneDextraction//2,)*2))\n return points\n\n\n# Configuration variables\nnbLedsHelice = 2*29\nnomFichier = 'pepsi.png'\nnomFichierSortie = 'result_picking.png'\nangleAvance = 20\nzoomFactor = 100 # zoom factor in percent\n\nif __name__ == \"__main__\":\n imageSource = Image.open(nomFichier)\n imageSource.convert('RGBA')\n imgMaxSize = max(imageSource.size)\n baseImgSize = imgMaxSize*100//zoomFactor\n imageCentree = Image.new('RGB', (baseImgSize,)*2, 'black')\n\n imageCentree.paste(imageSource, ((\n baseImgSize-imageSource.size[0])//2, (baseImgSize-imageSource.size[1])//2))\n imageCentree.save('TEST.png')\n\n pointsDextraction = pointsDextraction(\n baseImgSize, nbLedsHelice, angleAvance)\n pointsDeCouleur = extractionCouleurs(pointsDextraction, imageCentree)\n\n arrayPoints = arrayPickedColors(pointsDeCouleur)\n # print(arrayPoints)\n\n colorsForStrip = [[[0 for _ in range(3)]\n for _ in range(29)] for _ in range(9)]\n colorsForStrip2 = [[[0 for _ in range(3)]\n for _ in range(29)] for _ in range(9)]\n\n for i in range(len(arrayPoints)):\n sectorIndex = arrayPoints[i][0]\n rIndex = arrayPoints[i][1]\n\n if rIndex >= 0:\n colorsForStrip[sectorIndex][rIndex][0] = arrayPoints[i][2]\n colorsForStrip[sectorIndex][rIndex][1] = arrayPoints[i][3]\n colorsForStrip[sectorIndex][rIndex][2] = arrayPoints[i][4]\n else:\n rIndex = -rIndex\n colorsForStrip2[sectorIndex][rIndex][0] = arrayPoints[i][2]\n colorsForStrip2[sectorIndex][rIndex][1] = arrayPoints[i][3]\n colorsForStrip2[sectorIndex][rIndex][2] = arrayPoints[i][4]\n\n print(str(colorsForStrip).replace(\"[\", \"{\").replace(\"]\", \"}\"))\n\n with open(\"./test_color_array.h\", 'w') as outfile:\n outfile.write(\"uint8_t colorsForStrip[9][29][3] =\" + str(\n colorsForStrip).replace(\"[\", \"{\").replace(\"]\", \"}\") + \";\\n\" + \"uint8_t colorsForStrip2[9][29][3] =\" + str(\n colorsForStrip2).replace(\"[\", \"{\").replace(\"]\", \"}\") + \";\\n\"\n )\n\n affichagePointsDeCouleur(nomFichierSortie, pointsDeCouleur, 10)\n","repo_name":"ziedAlmia7/heliceHolo_3il","sub_path":"ExtractionPointsCouleur/extractionPointCouleur.py","file_name":"extractionPointCouleur.py","file_ext":"py","file_size_in_byte":6954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35290106718","text":"#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#PROBLEMA CU DISTRIBUTIE BITI RNG:\n#\n#Împreună cu echipa de la firmă ați inventat un nou algoritm de generare de numere pseudo-aleatoare.\n#Pentru a valida că generatorul poate fi folosit în algoritmi criptografici (cryptographically secure)\n#trebuie să implementați și să rulați o baterie de teste. Unul din aceste teste verifică numărul de apariții\n#pentru fiecare secvență posibilă de doi biți: 00, 01, 10 și 11 cât și raportul între numărul de biți de 0\n#și de 1. Pentru ca secvența de biți să fie aleatoare, trebuie ca numărul de apariții pentru fiecare din\n#cele patru perechi să fie aproximativ egale și în același timp numărul de biți de 0 să fie aproximativ\n#egal cu cei de 1. Mai precis, trebuie ca raporturile R1 dintre numărul de apariții a perechii care apare\n#de cele mai multe ori și numărul de apariții a perechii care apare de cele mai puține ori, cât și raportul\n#R2 între numărul de apariții ale celui mai frecvent bit și numărul de apariții ale celui mai puțin frecvent\n#bit să fie mai mici sau egale cu 110%.\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#CERINTA:\n#\n#Dându-se un număr n reprezentând numărul de biți generat de RNG și secvența de n biți, să se\n#calculeze raporturile R1 și R2 și să se decidă dacă generatorul este valid sau nu.\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#DATE DE INTRARE:\n#\n#Pe prima linie se află n, numărul de biți generați. Pe a doua linie se află o secvență continuă de n biți\n#(valori de 0 sau 1), ne-separați prin spații.\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#DATE DE IESIRE:\n#\n#Programul va afișa în consolă (pe stream-ul stdout) pe prima linie raporturile R1 și R2 calculate\n#conform descrierii, valori fracționare cu două zecimale, separate prin spațiu, iar pe a doua linie\n#valoarea 1 dacă generatorul este valid sau 0 dacă nu este.\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n#REZOLVARE:\n\nn = int(input(\"Number n, number of bits RNG: \"))\nbits = str(input(\"Bits sequence: \"))\n\nbits_dict = {\n \"00\": 0,\n \"01\": 0,\n \"10\": 0,\n \"11\": 0\n }\nbit_0 = 0\nbit_1 = 0\ni = 0\nR1 = 0\nR2 = 0\n\nwhile i < n: #number of bit pairs (00, 01, 10, 11)\n bits_dict[bits[i:i+2]] += 1\n i += 2\n\nfor i in bits: #number of bits \"0\" and \"1\"\n if(i == '0'):\n bit_0 += 1\n if(i == '1'):\n bit_1 += 1\n \nif min(bits_dict.values()) > 0: #calculate value of R1 and R2 with function min and max\n R1 = max(bits_dict.values())/min(bits_dict.values())\nif min(bit_0,bit_1) > 0:\n R2 = max(bit_0,bit_1)/min(bit_0,bit_1)\n\nprint(\"%.2f\" %R1,\"%.2f\" %R2)\nif R1 <= 1.1 and R2 <= 1.1: #validity check\n print(\"1\")\nelse:\n print(\"0\")\n\n\n#TIMP NECESAR = 60 min\n","repo_name":"helga99/Practica","sub_path":"Distributie_biti_RNG.py","file_name":"Distributie_biti_RNG.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8645288361","text":"from PIL import ImageEnhance\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nfrom PIL import Image, ImageDraw\r\n\r\ns = int(input('Choose(0-6) :'))\r\nimage = Image.open(\"nebo.jpg\")\r\ndraw = ImageDraw.Draw(image)\r\nwidth = image.size[0]\r\nheight = image.size[1]\r\npix = image.load()\r\n\r\nif (s == 0):\r\n\tfor i in range(width):\r\n\t\tfor j in range(height):\r\n\t\t\tx = pix[i, j][0]\r\n\t\t\ty = pix[i, j][1]\r\n\t\t\tz = pix[i, j][2]\r\n\t\t\tS = (x + y + z) // 3\r\n\t\t\tdraw.point((i, j), (S, S, S))\r\n\r\nif (s == 1):\r\n\tdepth = int(input('depth:'))\r\n\tfor i in range(width):\r\n\t\tfor j in range(height):\r\n\t\t\tx = pix[i, j][0]\r\n\t\t\ty = pix[i, j][1]\r\n\t\t\tz = pix[i, j][2]\r\n\t\t\tS = (x + y + z) // 3\r\n\t\t\tx = S + depth * 2\r\n\t\t\ty = S + depth\r\n\t\t\tz = S\r\n\t\t\tif (x > 255):\r\n\t\t\t\tx = 255\r\n\t\t\tif (y > 255):\r\n\t\t\t\ty = 255\r\n\t\t\tif (z > 255):\r\n\t\t\t\tz = 255\r\n\t\t\tdraw.point((i, j), (x, y, z))\r\n\r\nif (s == 2):\r\n\tfor i in range(width):\r\n\t\tfor j in range(height):\r\n\t\t\tx = pix[i, j][0]\r\n\t\t\ty = pix[i, j][1]\r\n\t\t\tz = pix[i, j][2]\r\n\t\t\tdraw.point((i, j), (255 - x, 255 - y, 255 - z))\r\n\r\nif (s == 3):\r\n\tfactor = int(input('factor:'))\r\n\tfor i in range(width):\r\n\t\tfor j in range(height):\r\n\t\t\trand = random.randint(-factor, factor)\r\n\t\t\tx = pix[i, j][0] + rand\r\n\t\t\ty = pix[i, j][1] + rand\r\n\t\t\tz = pix[i, j][2] + rand\r\n\t\t\tif (x < 0):\r\n\t\t\t\tx = 0\r\n\t\t\tif (y < 0):\r\n\t\t\t\ty = 0\r\n\t\t\tif (z < 0):\r\n\t\t\t\tz = 0\r\n\t\t\tif (x > 255):\r\n\t\t\t\tx = 255\r\n\t\t\tif (y > 255):\r\n\t\t\t\ty = 255\r\n\t\t\tif (z > 255):\r\n\t\t\t\tz = 255\r\n\t\t\tdraw.point((i, j), (x, y, z))\r\n\r\nif (s == 4):\r\n\tfactor = int(input('factor:'))\r\n\tfor i in range(width):\r\n\t\tfor j in range(height):\r\n\t\t\tx = pix[i, j][0] + factor\r\n\t\t\ty = pix[i, j][1] + factor\r\n\t\t\tz = pix[i, j][2] + factor\r\n\t\t\tif (x < 0):\r\n\t\t\t\tx = 0\r\n\t\t\tif (y < 0):\r\n\t\t\t\ty = 0\r\n\t\t\tif (z < 0):\r\n\t\t\t\tz = 0\r\n\t\t\tif (x > 255):\r\n\t\t\t\tx = 255\r\n\t\t\tif (y > 255):\r\n\t\t\t\ty = 255\r\n\t\t\tif (z > 255):\r\n\t\t\t\tz = 255\r\n\t\t\tdraw.point((i, j), (x, y, z))\r\n\r\nif (s == 5):\r\n\tfactor = int(input('factor:'))\r\n\tfor i in range(width):\r\n\t\tfor j in range(height):\r\n\t\t\tx = pix[i, j][0]\r\n\t\t\ty = pix[i, j][1]\r\n\t\t\tz = pix[i, j][2]\r\n\t\t\tS = x + y + z\r\n\t\t\tif (S > (((255 + factor) // 2) * 3)):\r\n\t\t\t\tx, y, z = 255, 255, 255\r\n\t\t\telse:\r\n\t\t\t\tx, y, z = 0, 0, 0\r\n\t\t\tdraw.point((i, j), (x, y, z))\r\n\r\nif (s == 6):\r\n\tim = Image.open(\"nebo.jpg\")\r\n\r\n\tenhancer = ImageEnhance.Contrast(im)\r\n\r\n\tfactor = 1\r\n\tim_output = enhancer.enhance(factor)\r\n\tim_output.save('original-image.png')\r\n\r\n\tfactor = 0.5\r\n\tim_output = enhancer.enhance(factor)\r\n\tim_output.save('less-contrast-image.png')\r\n\r\n\tfactor = 1.5\r\n\tim_output = enhancer.enhance(factor)\r\n\tim_output.save('more-contrast-image.png')\r\n\r\n\timg = cv2.imread('nebo.jpg')\r\n\timg_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n\r\n\tgrayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n\tgaussianBlur = cv2.GaussianBlur(grayImage, (3, 3), 0)\r\n\r\n\tret, binary = cv2.threshold(gaussianBlur, 127, 255, cv2.THRESH_BINARY)\r\n\r\n\tkernelx = np.array([[-1, 0], [0, 1]], dtype=int)\r\n\tkernely = np.array([[0, -1], [1, 0]], dtype=int)\r\n\tx = cv2.filter2D(binary, cv2.CV_16S, kernelx)\r\n\ty = cv2.filter2D(binary, cv2.CV_16S, kernely)\r\n\tabsX = cv2.convertScaleAbs(x)\r\n\tabsY = cv2.convertScaleAbs(y)\r\n\tRoberts = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)\r\n\r\n\tkernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]], dtype=int)\r\n\tkernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]], dtype=int)\r\n\tx = cv2.filter2D(binary, cv2.CV_16S, kernelx)\r\n\ty = cv2.filter2D(binary, cv2.CV_16S, kernely)\r\n\tabsX = cv2.convertScaleAbs(x)\r\n\tabsY = cv2.convertScaleAbs(y)\r\n\tPrewitt = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)\r\n\r\n\tx = cv2.Sobel(binary, cv2.CV_16S, 1, 0)\r\n\ty = cv2.Sobel(binary, cv2.CV_16S, 0, 1)\r\n\tabsX = cv2.convertScaleAbs(x)\r\n\tabsY = cv2.convertScaleAbs(y)\r\n\tSobel = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)\r\n\r\n\tdst = cv2.Laplacian(binary, cv2.CV_16S, ksize=3)\r\n\tLaplacian = cv2.convertScaleAbs(dst)\r\n\r\n\tplt.subplot(231), plt.imshow(img_RGB), plt.axis('off')\r\n\tplt.subplot(232), plt.imshow(gaussianBlur, cmap=plt.cm.gray), plt.axis('off')\r\n\tplt.subplot(233), plt.imshow(Roberts, cmap=plt.cm.gray), plt.axis('off')\r\n\tplt.subplot(234), plt.imshow(Prewitt, cmap=plt.cm.gray), plt.axis('off')\r\n\tplt.subplot(235), plt.imshow(Sobel, cmap=plt.cm.gray), plt.axis('off')\r\n\tplt.subplot(236), plt.imshow(Laplacian, cmap=plt.cm.gray), plt.axis('off')\r\n\r\n\tplt.show()\r\n\r\nimage.save(\"result.jpg\", \"JPEG\")\r\ndel draw","repo_name":"Slipvin/Projekt2","sub_path":"PhotoshopVL.py","file_name":"PhotoshopVL.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"16768528744","text":"\nHoMeP_ZaDaHuA=int(1)\nwhile ((HoMeP_ZaDaHuA>0)&(HoMeP_ZaDaHuA!=10)):\n print (\"1-if5; 2-if26; 3-for11; 4-for2; 5-while25; 6-while13; 7-series35; 8-series33, 9-ничего; 10-выход\")\n HoMeP_ZaDaHuA=int(input(\"введите номер задания:\"))\n if (HoMeP_ZaDaHuA==1):\n print(\"выбрана программа if5, выводит сколько полож. и отр. чисел (из трех), +от меня количесто нулей\")\n pol=int(0)\n nol=int(0)\n otr=int(0)\n i=int(0)\n while (i<3):\n i+=1\n chislo=int(input(\"введите число:\"))\n if (chislo<0): otr+=1\n if (chislo>0): pol+=1\n if (chislo==0): nol+=1\n print (\"положительных:\", pol)\n print (\"отрицетальных:\", otr)\n print (\"нулей:\", nol)\n if (HoMeP_ZaDaHuA==2):\n print(\"выбрана программа if26, сложное описание пропущено...\")\n x=float(input(\"введите х:\"))\n if (x<=0): print(-x)\n if ((x>0)&(x<2)): print(x*x)\n if (x>=2): print(\"4\")\n if (HoMeP_ZaDaHuA==3):\n print(\"выбрана программа for11, опять сложное(лень писать) описание...\")\n i=int(0)\n N=int(input(\"введите N:\"))\n summ=(N**2)\n for i in range(N):\n i+=1\n summ+=((N+i)**2)\n print(\"получилось:\", summ)\n if (HoMeP_ZaDaHuA==4):\n print(\"выбрана программа for2, вывести числа от меньш. к больш. +их кол-во\")\n A=int(input(\"введите меньшее число:\"))\n B=int(input(\"введите большее число:\"))\n N=int(B-A+1)\n i=int(0)\n for i in range(B-A+1):\n print (A+i)\n i+=1\n print(\"всего получилось чисел:\",N)\n if (HoMeP_ZaDaHuA==5):\n print('выбрана программа while25, найти первое число Фибоначчи, большее N')\n F3=int(2)\n F2=int(1)\n F1=int(1)\n N=int(input(\"введите N:\"))\n while(F3<=N):\n F1=F2\n F2=F3\n F3=(F1+F2)\n print(F3)\n if (HoMeP_ZaDaHuA==6):\n print(\"выбрана программа while13, выводит какое-то число и сумму\")\n A=float(input(\"введите А>1:\"))\n K=int(1)\n summ=float(1)\n while (summ<=A):\n K+=1\n summ+=(1/K)\n print(\"K =\", K)\n print(\"сумма =\", summ)\n if (HoMeP_ZaDaHuA==7):\n print(\"выбрана программа series35, какое-то К, наборы с К, очень интересно, спасибо\") \n K=int(input(\"введите К:\"))\n i=int(1)\n elem_B=int(0)\n m=[0]*K\n while (i<=K):\n i+=1\n print(\"след.массив\")\n x=int(input(\"введите число (0-заканчивает):\"))\n while (x!=0):\n elem_B+=1\n m[i-2]+=1\n x=int(input(\"введите число (0-заканчивает):\"))\n i=0\n while(i0): \n print(\"последняя двойка находится на месте:\",aaaaaaaa)\n else:\n print(\"тут двоек нет! держите '0', как просится в условии\")\n if (HoMeP_ZaDaHuA==10):\n print(\"спасибо, что проверили мою работу, досвиданья!\")\n if (HoMeP_ZaDaHuA>10):\n print(\"тут всего 8 заданий, выбранное отсутствует\") #как и мой креатив после 8ми заданий\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"andrey-u/name1","sub_path":"от 20.09.2020.py","file_name":"от 20.09.2020.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"11872311889","text":"import os\r\nfrom shutil import move, copy\r\nfrom sklearn.model_selection import train_test_split\r\nimport glob\r\nimport json\r\nimport cv2\r\nfrom xml.dom.minidom import Document\r\nimport xml.etree.ElementTree as ET\r\nimport torch\r\nimport numpy as np\r\n\r\nclass Label():\r\n def __init__(self, label_dir, img_dir, out_dir=None):\r\n self.label_dir = label_dir\r\n self.img_dir = img_dir\r\n self.out_dir = out_dir\r\n\r\n def no_label_search (self):\r\n label_list = []\r\n img_list = []\r\n\r\n for label in os.listdir(self.label_dir):\r\n label_list.append(label[:-4])\r\n for img in os.listdir(self.img_dir):\r\n img_list.append(img[:-4])\r\n\r\n no_label_img_list = set(label_list)^set(img_list)\r\n\r\n print('No labeled image ID are ')\r\n print(no_label_img_list)\r\n\r\n out_dir = self.out_dir\r\n if out_dir == None:\r\n out_dir = './out_dir/no_label_output'\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n else:\r\n out_dir = os.path.join(out_dir,'no_label_output')\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n for no_label_img in no_label_img_list:\r\n img = no_label_img+'.jpg'\r\n src_path = os.path.join(self.img_dir,img)\r\n dst_path = os.path.join(out_dir,img)\r\n\r\n move(src_path, dst_path)\r\n\r\n def no_img_search(self):\r\n label_list = []\r\n img_list = []\r\n\r\n for label in os.listdir(self.label_dir):\r\n label_list.append(label[:-4])\r\n for img in os.listdir(self.img_dir):\r\n img_list.append(img[:-4])\r\n\r\n no_image_label_list = set(img_list) ^ set(label_list)\r\n\r\n print('No imaging label ID are ')\r\n print(no_image_label_list)\r\n\r\n out_dir = self.out_dir\r\n if out_dir == None:\r\n out_dir = './out_dir/no_img_output'\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n else:\r\n out_dir = os.path.join(out_dir, 'no_img_output')\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n for no_img_label in no_image_label_list:\r\n label = no_img_label + '.txt'\r\n src_path = os.path.join(self.label_dir, label)\r\n dst_path = os.path.join(out_dir, label)\r\n\r\n move(src_path, dst_path)\r\n\r\n\r\n def train_val (self, ratio=0.2, random_state=None, test=False):\r\n label_list = []\r\n img_list = []\r\n\r\n for label in os.listdir(self.label_dir):\r\n label_list.append(label[:-4])\r\n for img in os.listdir(self.img_dir):\r\n img_list.append(img[:-4])\r\n\r\n LabelinImg = [False for c in label_list if c not in img_list]\r\n ImginLabel = [False for c in img_list if c not in label_list]\r\n\r\n if LabelinImg == False or ImginLabel == False:\r\n print('Image or label not match !')\r\n\r\n else:\r\n print('Split process start !')\r\n out_dir = self.out_dir\r\n if out_dir == None:\r\n out_dir = './out_dir/split_dataset'\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n else:\r\n out_dir = os.path.join(out_dir, 'split_dataset')\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n img_train_dir = os.path.join(out_dir,'images', 'train')\r\n img_val_dir = os.path.join(out_dir, 'images', 'val')\r\n label_train_dir = os.path.join(out_dir,'labels', 'train')\r\n label_val_dir = os.path.join(out_dir, 'labels', 'val')\r\n\r\n if not os.path.exists(img_train_dir):\r\n os.makedirs(img_train_dir)\r\n if not os.path.exists(img_val_dir):\r\n os.makedirs(img_val_dir)\r\n if not os.path.exists(label_train_dir):\r\n os.makedirs(label_train_dir)\r\n if not os.path.exists(label_val_dir):\r\n os.makedirs(label_val_dir)\r\n\r\n\r\n if random_state == None:\r\n train_list, val_list = train_test_split(label_list, test_size=ratio)\r\n\r\n else:\r\n train_list, val_list = train_test_split(label_list, test_size=ratio, random_state=random_state)\r\n\r\n print('Train list number: ' + str(len(train_list)) + ' Val list number: ' + str(len(val_list)))\r\n\r\n\r\n for item in train_list:\r\n img_name = item+'.jpg'\r\n\r\n src_path = os.path.join(self.img_dir, img_name)\r\n dst_path = os.path.join(img_train_dir, img_name)\r\n\r\n copy(src_path,dst_path)\r\n\r\n label_name = item + '.txt'\r\n\r\n label_src_path = os.path.join(self.label_dir, label_name)\r\n label_dst_path = os.path.join(label_train_dir, label_name)\r\n\r\n copy(label_src_path,label_dst_path)\r\n\r\n if test == False:\r\n for item_val in val_list:\r\n img_name = item_val + '.jpg'\r\n\r\n src_path = os.path.join(self.img_dir, img_name)\r\n dst_path = os.path.join(img_val_dir, img_name)\r\n\r\n copy(src_path, dst_path)\r\n\r\n label_name = item_val + '.txt'\r\n\r\n label_src_path = os.path.join(self.label_dir, label_name)\r\n label_dst_path = os.path.join(label_val_dir, label_name)\r\n\r\n copy(label_src_path, label_dst_path)\r\n else:\r\n if random_state == None:\r\n val_list, test_list = train_test_split(val_list, test_size=ratio)\r\n\r\n else:\r\n val_list, test_list = train_test_split(val_list, test_size=ratio, random_state=random_state)\r\n\r\n img_test_dir = os.path.join(out_dir, 'images', 'test')\r\n label_test_dir = os.path.join(out_dir, 'labels', 'test')\r\n\r\n if not os.path.exists(img_test_dir):\r\n os.makedirs(img_test_dir)\r\n if not os.path.exists(img_test_dir):\r\n os.makedirs(img_test_dir)\r\n\r\n for item_val in val_list:\r\n img_name = item_val + '.jpg'\r\n\r\n src_path = os.path.join(self.img_dir, img_name)\r\n dst_path = os.path.join(img_val_dir, img_name)\r\n\r\n copy(src_path, dst_path)\r\n\r\n label_name = item_val + '.txt'\r\n\r\n label_src_path = os.path.join(self.label_dir, label_name)\r\n label_dst_path = os.path.join(label_val_dir, label_name)\r\n\r\n copy(label_src_path, label_dst_path)\r\n\r\n for item_test in test_list:\r\n img_name = item_test + '.jpg'\r\n\r\n src_path = os.path.join(self.img_dir, img_name)\r\n dst_path = os.path.join(img_test_dir, img_name)\r\n\r\n copy(src_path, dst_path)\r\n\r\n label_name = item_test + '.txt'\r\n\r\n label_src_path = os.path.join(self.label_dir, label_name)\r\n label_dst_path = os.path.join(label_test_dir, label_name)\r\n\r\n copy(label_src_path, label_dst_path)\r\n\r\n print('Split finish !!!')\r\n\r\n\r\n def label_replace(self, replaced_label, replaced_label_dir=None):\r\n '''\r\n\r\n :param replaced_label:\r\n {'1':0,\r\n '2':0}\r\n :param replaced_label_dir: '/output'\r\n :return:\r\n '''\r\n if replaced_label_dir == None:\r\n replaced_label_dir = self.label_dir\r\n\r\n if len(os.listdir(replaced_label_dir)) != len(glob.glob(os.path.join(replaced_label_dir, \"*.txt\"))):\r\n print('Some files are not label file, this function only support YOLO label format !'\r\n 'You can use label convert to change your label format !')\r\n else:\r\n print('Replace label process start !')\r\n\r\n out_dir = self.out_dir\r\n if out_dir == None:\r\n out_dir = './out_dir/replaced_label'\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n else:\r\n out_dir = os.path.join(out_dir,'replaced_label')\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n key_list = list(replaced_label.keys())\r\n\r\n for label in os.listdir(replaced_label_dir):\r\n new_label = []\r\n label_path = os.path.join(replaced_label_dir, label)\r\n\r\n for line in open(label_path):\r\n line_list = line.split(' ')\r\n if line_list[0] in key_list:\r\n line_list[0] = str(replaced_label[line_list[0]])\r\n str_1 = ' '\r\n new_line = str_1.join(line_list)\r\n new_label.append(new_line)\r\n\r\n label_name = os.path.join(out_dir, label)\r\n yolo_txt = open(label_name, 'a')\r\n for label in new_label:\r\n yolo_txt.write(label)\r\n\r\n print('Replace label process finish !!!')\r\n\r\n def label_remove(self, remove_label, remove_label_dir=None):\r\n num_remove = 0\r\n if remove_label_dir == None:\r\n remove_label_dir = self.label_dir\r\n\r\n if len(os.listdir(remove_label_dir)) != len(glob.glob(os.path.join(remove_label_dir, \"*.txt\"))):\r\n print('Some files are not label file, this function only support YOLO label format !'\r\n 'You can use label convert to change your label format !')\r\n else:\r\n print('Remove label process start !')\r\n out_dir = self.out_dir\r\n if out_dir == None:\r\n out_dir = './out_dir/removed_label'\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n else:\r\n out_dir = os.path.join(out_dir,'removed_label')\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n for label in os.listdir(remove_label_dir):\r\n new_label = []\r\n label_path = os.path.join(remove_label_dir, label)\r\n\r\n for line in open(label_path):\r\n line_list = line.split(' ')\r\n if line_list[0] not in remove_label:\r\n str_1 = ' '\r\n new_line = str_1.join(line_list)\r\n new_label.append(new_line)\r\n elif line_list[0] in remove_label:\r\n num_remove = num_remove + 1\r\n\r\n label_name = os.path.join(out_dir, label)\r\n yolo_txt = open(label_name, 'a')\r\n for label in new_label:\r\n yolo_txt.write(label)\r\n\r\n print('Remove label process finish !!! Remove number: ' + str(num_remove))\r\n\r\n def label_select(self, select_label, select_label_dir=None):\r\n num_selected = 0\r\n if select_label_dir == None:\r\n select_label_dir = self.label_dir\r\n\r\n if len(os.listdir(select_label_dir)) != len(glob.glob(os.path.join(select_label_dir, \"*.txt\"))):\r\n print('Some files are not label file, this function only support YOLO label format !'\r\n 'You can use label convert to change your label format !')\r\n else:\r\n print('Selected label process start !')\r\n out_dir = self.out_dir\r\n selected_label_folder = 'selected_label_' + str(select_label)\r\n if out_dir == None:\r\n out_dir = os.path.join('./out_dir',selected_label_folder)\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n else:\r\n out_dir = os.path.join(out_dir,selected_label_folder)\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n for label in os.listdir(select_label_dir):\r\n label_path = os.path.join(select_label_dir, label)\r\n\r\n for line in open(label_path):\r\n line_list = line.split(' ')\r\n if line_list[0] == select_label:\r\n src_path = os.path.join(self.label_dir, label)\r\n dst_path = os.path.join(out_dir, label)\r\n\r\n if not os.path.exists(dst_path):\r\n copy(src_path, dst_path)\r\n\r\n num_selected = num_selected + 1\r\n\r\n print('Selected label process finish !!! Select number: ' + str(num_selected))\r\n\r\n def emtpy_label(self, empty_img_dir = None):\r\n\r\n out_dir = self.out_dir\r\n if out_dir == None:\r\n out_dir = './out_dir/empty_label_output'\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n else:\r\n out_dir = os.path.join(out_dir, 'empty_label_output')\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n target_txt_path = out_dir\r\n if empty_img_dir == None:\r\n img_dir = self.img_dir\r\n else:\r\n img_dir = empty_img_dir\r\n\r\n img_list = os.listdir(img_dir)\r\n\r\n label_num = 0\r\n for img_name in img_list:\r\n txt = str(img_name[:-4])\r\n with open(os.path.join(target_txt_path, '{}.txt'.format(txt)), 'w', encoding='utf-8') as f:\r\n f.write('')\r\n label_num = label_num + 1\r\n\r\n print('Total image number is ' + str(len(img_list)) + ' and empty label is ' + str(label_num))\r\n\r\n def xywhn2xyxy(self, x, w, h, padw=0, padh=0):\r\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\r\n y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x\r\n y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y\r\n y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x\r\n y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y\r\n return y\r\n\r\n def crop_img_yolo(self, img_path, label_path, save_path, img_id):\r\n if os.path.getsize(label_path) != 0:\r\n with open(label_path, 'r') as f:\r\n lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels\r\n img = cv2.imread(img_path)\r\n\r\n h, w = img.shape[:2]\r\n\r\n lb[:, 1:] = self.xywhn2xyxy(lb[:, 1:], w, h, 0, 0)\r\n\r\n n = 0\r\n for _, x in enumerate(lb):\r\n class_id = int(x[0])\r\n if not os.path.exists(os.path.join(save_path,str(class_id))):\r\n os.makedirs(os.path.join(save_path,str(class_id)))\r\n x0 = int(x[1])\r\n y0 = int(x[2])\r\n x1 = int(x[3])\r\n y1 = int(x[4])\r\n crop = img[y0: y1, x0: x1]\r\n if crop.size != 0:\r\n save_img = str(img_id) + str(n) + '.JPG'\r\n save = os.path.join(save_path,str(class_id), save_img)\r\n cv2.imwrite(filename=save, img=crop)\r\n n += 1\r\n\r\n def crop_img(self):\r\n print('Label format should be YOLO format')\r\n img_dir = self.img_dir\r\n label_dir = self.label_dir\r\n\r\n img_list = os.listdir(img_dir)\r\n\r\n out_dir = self.out_dir\r\n if out_dir == None:\r\n out_dir = './out_dir/crop_images'\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n else:\r\n out_dir = os.path.join(out_dir, 'crop_images')\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n for img in img_list:\r\n img_path = os.path.join(img_dir,img)\r\n label_name = img[:-4] + '.txt'\r\n\r\n label_path = os.path.join(label_dir,label_name)\r\n self.crop_img_yolo(img_path=img_path,label_path=label_path,save_path=out_dir, img_id=img[:-4])\r\n\r\n print(str(img) + ' done !')\r\n\r\n\r\n\r\nclass label_change():\r\n '''\r\n # voc2yolo: class_file = ['bud']\r\n # yolo2voc: class_file = {'0':'bud'}\r\n '''\r\n def __init__(self, label_dir, img_dir, save_dir=None):\r\n self.label_dir = label_dir\r\n self.img_dir = img_dir\r\n self.save_dir = save_dir\r\n if self.save_dir == None:\r\n out_dir = 'label_change'\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n self.save_dir = out_dir\r\n\r\n def creatML2Yolo(self, json_file, img, class_file, save_path):\r\n w, h, d = img.shape\r\n\r\n with open(json_file, 'r') as f:\r\n # loading creatML json\r\n json_label = json.load(f)\r\n img_name = json_label[0]['image'][:-4]\r\n\r\n # writing txt\r\n label_name = os.path.join(save_path, str(img_name + '.txt'))\r\n yolo_txt = open(label_name, 'a')\r\n\r\n for num_label in range(len(json_label[0]['annotations'])):\r\n label = json_label[0]['annotations'][num_label][\"label\"]\r\n label_id = class_file[label]\r\n x_yolo = round((json_label[0]['annotations'][num_label][\"coordinates\"][\"x\"]) / h, 6)\r\n y_yolo = round((json_label[0]['annotations'][num_label][\"coordinates\"][\"y\"]) / w, 6)\r\n w_yolo = round((json_label[0]['annotations'][num_label][\"coordinates\"][\"width\"]) / h, 6)\r\n h_yolo = round((json_label[0]['annotations'][num_label][\"coordinates\"][\"height\"]) / w, 6)\r\n writing = str(label_id) + ' ' + str(x_yolo) + ' ' + str(y_yolo) + ' ' + str(w_yolo) + ' ' + str(h_yolo)\r\n\r\n yolo_txt.write(writing)\r\n yolo_txt.write('\\n')\r\n\r\n yolo_txt.close()\r\n\r\n\r\n def final_creatML2Yolo(self,class_file):\r\n img_path = self.img_dir\r\n label_path = self.label_dir\r\n save_path = self.save_dir\r\n class_file = class_file\r\n\r\n if not os.path.exists(save_path):\r\n os.makedirs(save_path)\r\n\r\n files = glob.glob(label_path + \"*.json\")\r\n files = [i.split(\"\\\\\")[-1].split(\".json\")[0] for i in files]\r\n\r\n for file in files:\r\n print(file)\r\n img = str(file) + '.JPG'\r\n json_file = str(file) + '.json'\r\n\r\n img = os.path.join(img_path, img)\r\n img = cv2.imread(img)\r\n json_file = os.path.join(label_path, json_file)\r\n\r\n self.creatML2Yolo(json_file, img, class_file, save_path)\r\n print('done !!!')\r\n\r\n\r\n def yolo2voc(self,class_file): # txt所在文件夹路径,xml文件保存路径,图片所在文件夹路径\r\n \"\"\"此函数用于将yolo格式txt标注文件转换为voc格式xml标注文件\r\n \"\"\"\r\n dic = class_file\r\n picPath = self.img_dir\r\n txtPath = self.label_dir\r\n xmlPath = os.path.join(self.save_dir, 'yolo2voc')\r\n if not os.path.exists(xmlPath):\r\n os.makedirs(xmlPath)\r\n\r\n files = os.listdir(txtPath)\r\n for i, name in enumerate(files):\r\n print(str(name))\r\n xmlBuilder = Document()\r\n annotation = xmlBuilder.createElement(\"annotation\") # 创建annotation标签\r\n xmlBuilder.appendChild(annotation)\r\n txtFile = open(os.path.join(txtPath,name))\r\n txtList = txtFile.readlines()\r\n img = cv2.imread(picPath + '/' + name[0:-4] + \".jpg\")\r\n Pheight, Pwidth, Pdepth = img.shape\r\n\r\n folder = xmlBuilder.createElement(\"folder\") # folder标签\r\n foldercontent = xmlBuilder.createTextNode(\"driving_annotation_dataset\")\r\n folder.appendChild(foldercontent)\r\n annotation.appendChild(folder) # folder标签结束\r\n\r\n filename = xmlBuilder.createElement(\"filename\") # filename标签\r\n filenamecontent = xmlBuilder.createTextNode(name[0:-4] + \".jpg\")\r\n filename.appendChild(filenamecontent)\r\n annotation.appendChild(filename) # filename标签结束\r\n\r\n size = xmlBuilder.createElement(\"size\") # size标签\r\n width = xmlBuilder.createElement(\"width\") # size子标签width\r\n widthcontent = xmlBuilder.createTextNode(str(Pwidth))\r\n width.appendChild(widthcontent)\r\n size.appendChild(width) # size子标签width结束\r\n\r\n height = xmlBuilder.createElement(\"height\") # size子标签height\r\n heightcontent = xmlBuilder.createTextNode(str(Pheight))\r\n height.appendChild(heightcontent)\r\n size.appendChild(height) # size子标签height结束\r\n\r\n depth = xmlBuilder.createElement(\"depth\") # size子标签depth\r\n depthcontent = xmlBuilder.createTextNode(str(Pdepth))\r\n depth.appendChild(depthcontent)\r\n size.appendChild(depth) # size子标签depth结束\r\n\r\n annotation.appendChild(size) # size标签结束\r\n\r\n for j in txtList:\r\n oneline = j.strip().split(\" \")\r\n object = xmlBuilder.createElement(\"object\") # object 标签\r\n picname = xmlBuilder.createElement(\"name\") # name标签\r\n namecontent = xmlBuilder.createTextNode(dic[oneline[0]])\r\n picname.appendChild(namecontent)\r\n object.appendChild(picname) # name标签结束\r\n\r\n pose = xmlBuilder.createElement(\"pose\") # pose标签\r\n posecontent = xmlBuilder.createTextNode(\"Unspecified\")\r\n pose.appendChild(posecontent)\r\n object.appendChild(pose) # pose标签结束\r\n\r\n truncated = xmlBuilder.createElement(\"truncated\") # truncated标签\r\n truncatedContent = xmlBuilder.createTextNode(\"0\")\r\n truncated.appendChild(truncatedContent)\r\n object.appendChild(truncated) # truncated标签结束\r\n\r\n difficult = xmlBuilder.createElement(\"difficult\") # difficult标签\r\n difficultcontent = xmlBuilder.createTextNode(\"0\")\r\n difficult.appendChild(difficultcontent)\r\n object.appendChild(difficult) # difficult标签结束\r\n\r\n bndbox = xmlBuilder.createElement(\"bndbox\") # bndbox标签\r\n xmin = xmlBuilder.createElement(\"xmin\") # xmin标签\r\n mathData = int(((float(oneline[1])) * Pwidth + 1) - (float(oneline[3])) * 0.5 * Pwidth)\r\n xminContent = xmlBuilder.createTextNode(str(mathData))\r\n xmin.appendChild(xminContent)\r\n bndbox.appendChild(xmin) # xmin标签结束\r\n\r\n ymin = xmlBuilder.createElement(\"ymin\") # ymin标签\r\n mathData = int(((float(oneline[2])) * Pheight + 1) - (float(oneline[4])) * 0.5 * Pheight)\r\n yminContent = xmlBuilder.createTextNode(str(mathData))\r\n ymin.appendChild(yminContent)\r\n bndbox.appendChild(ymin) # ymin标签结束\r\n\r\n xmax = xmlBuilder.createElement(\"xmax\") # xmax标签\r\n mathData = int(((float(oneline[1])) * Pwidth + 1) + (float(oneline[3])) * 0.5 * Pwidth)\r\n xmaxContent = xmlBuilder.createTextNode(str(mathData))\r\n xmax.appendChild(xmaxContent)\r\n bndbox.appendChild(xmax) # xmax标签结束\r\n\r\n ymax = xmlBuilder.createElement(\"ymax\") # ymax标签\r\n mathData = int(((float(oneline[2])) * Pheight + 1) + (float(oneline[4])) * 0.5 * Pheight)\r\n ymaxContent = xmlBuilder.createTextNode(str(mathData))\r\n ymax.appendChild(ymaxContent)\r\n bndbox.appendChild(ymax) # ymax标签结束\r\n\r\n object.appendChild(bndbox) # bndbox标签结束\r\n\r\n annotation.appendChild(object) # object标签结束\r\n\r\n f = open(xmlPath + '/' + name[0:-4] + \".xml\", 'w')\r\n xmlBuilder.writexml(f, indent='\\t', newl='\\n', addindent='\\t', encoding='utf-8')\r\n f.close()\r\n print('done !')\r\n\r\n\r\n def clear_hidden_files(self, path):\r\n dir_list = os.listdir(path)\r\n for i in dir_list:\r\n abspath = os.path.join(os.path.abspath(path), i)\r\n if os.path.isfile(abspath):\r\n if i.startswith(\"._\"):\r\n os.remove(abspath)\r\n else:\r\n self.clear_hidden_files(abspath)\r\n\r\n #数据转换\r\n def convert(self, size, box):\r\n dw = 1. / size[0]\r\n dh = 1. / size[1]\r\n x = (box[0] + box[1]) / 2.0\r\n y = (box[2] + box[3]) / 2.0\r\n w = box[1] - box[0]\r\n h = box[3] - box[2]\r\n x = x * dw\r\n w = w * dw\r\n y = y * dh\r\n h = h * dh\r\n return (x, y, w, h)\r\n\r\n #编写格式\r\n def convert_annotation(self, xml_label, yolo_label, classes):\r\n '''\r\n\r\n :param xml_label:\r\n :param yolo_label:\r\n :param classes: [\"class 1\", 'class 2'] for example classes = [\"boot\", 'heading']\r\n :return:\r\n '''\r\n in_file = open(xml_label)\r\n out_file = open(yolo_label, 'w')\r\n tree = ET.parse(in_file)\r\n root = tree.getroot()\r\n size = root.find('size')\r\n w = int(size.find('width').text)\r\n h = int(size.find('height').text)\r\n\r\n for obj in root.iter('object'):\r\n difficult = obj.find('difficult').text\r\n cls = obj.find('name').text\r\n if cls not in classes or int(difficult) == 1:\r\n continue\r\n cls_id = classes.index(cls)\r\n xmlbox = obj.find('bndbox')\r\n b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),\r\n float(xmlbox.find('ymax').text))\r\n bb = self.convert((w, h), b)\r\n out_file.write(str(cls_id) + \" \" + \" \".join([str(a) for a in bb]) + '\\n')\r\n in_file.close()\r\n out_file.close()\r\n\r\n\r\n def voc2yolo(self,class_file):\r\n '''\r\n :param classes: [\"class 1\", 'class 2'] for example classes = [\"boot\", 'heading']\r\n :return:\r\n '''\r\n label_dir = self.label_dir\r\n save_dir = os.path.join(self.save_dir,'voc2yolo')\r\n if not os.path.exists(save_dir):\r\n os.makedirs(save_dir)\r\n classes = class_file\r\n label_list = os.listdir(label_dir)\r\n for label in label_list:\r\n label_path = os.path.join(label_dir, label)\r\n label_name = str(label[:-4]) + '.txt'\r\n save_label = os.path.join(save_dir, label_name)\r\n self.convert_annotation(label_path, save_label, classes)","repo_name":"EtingX/LabelSmart","sub_path":"labelSmart.py","file_name":"labelSmart.py","file_ext":"py","file_size_in_byte":26839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"35896769051","text":"#Known bugs:\r\n#After first round of playing, some of the shapes will always start higher\r\n#ROTATING SHAPE WHEN AT TOP OF GAME BOARD\r\n#ROTATING SHAPES TOO QUICK MAKES THEM DO WALL BOUNCE\r\n#PRESSING DOWN AND LEFT/RIGHT AT SAME TIME DOESNT MOVE IT LEFT/RIGHT\r\n\r\nimport discord\r\nfrom discord.ext import commands\r\nimport random\r\nimport asyncio\r\n\r\nboard = []\r\nnum_of_rows = 18\r\nnum_of_cols = 10\r\nempty_square = ':black_large_square:'\r\nblue_square = ':blue_square:'\r\nbrown_square = ':brown_square:'\r\norange_square = ':orange_square:'\r\nyellow_square = ':yellow_square:'\r\ngreen_square = ':green_square:'\r\npurple_square = ':purple_square:'\r\nred_square = ':red_square:'\r\nembed_colour = 0x077ff7 #colour of line on embeds\r\npoints = 0\r\nlines = 0 #how many lines cleared\r\ndown_pressed = False #if down button has been pressed\r\nrotate_clockwise = False\r\nrotation_pos = 0\r\nh_movement = 0 #amount to move left or right\r\nis_new_shape = False\r\nstart_higher = False #for when near top of board\r\ngame_over = False\r\nindex = 0\r\n\r\n\r\nclass Tetronimo: #Tetris pieces\r\n def __init__(self, starting_pos, colour, rotation_points):\r\n self.starting_pos = starting_pos #list\r\n self.colour = colour\r\n self.rotation_points = rotation_points #list\r\n\r\nmain_wall_kicks = [ #for J, L, T, S, Z tetronimos\r\n [[0, 0], [0, -1], [-1, -1], [2, 0], [2, -1]],\r\n [[0, 0], [0, 1], [1, 1], [-2, 0], [-2, 1]],\r\n [[0, 0], [0, 1], [-1, 1], [2, 0], [2, 1]],\r\n [[0, 0], [0, -1], [1, -1], [-2, 0], [-2, -1]]\r\n ]\r\n\r\ni_wall_kicks = [ #for I tetronimo\r\n [[0, 0], [0, -2], [0, 1], [1, -2], [-2, 1]],\r\n [[0, 0], [0, -1], [0, 2], [-2, -1], [1, 2]],\r\n [[0, 0], [0, 2], [0, -1], [-1, 2], [2, -1]],\r\n [[0, 0], [0, 1], [0, -2], [2, 1], [-1, -2]]\r\n ]\r\n\r\nrot_adjustments = { #to move when rotations are slightly off\r\n #blue: not sure if needs any rn\r\n ':blue_square:': [[0, 1], [-1, -1], [0, 0], [-1, 0]], #[[0, 0], [0, 0], [0, 0], [0, 0]]\r\n #brown: left 1, right 1, right 1, left 1,\r\n ':brown_square:': [[0, 0], [0, 1], [0, 0], [0, -1]], #[[0, -1], [0, 1], [0, 1], [0, -1]]'\r\n #orange: left 1, nothing, right 1, nothing\r\n ':orange_square:': [[0, -1], [0, 0], [-1, 1], [0, 0]], #[[0, -1], [0, 0], [0, 1], [0, 0]]\r\n #none for yellow\r\n ':yellow_square:': [[0, 0], [0, 0], [0, 0], [0, 0]],\r\n #green: right 1, nothing, right 1, nothing\r\n ':green_square:': [[0, 0], [0, 0], [0, 0], [0, 0]], #[[0, 1], [0, 0], [0, 1], [0, 0]]\r\n #purple: nothing, right 1, left 1 (possibly up too), right 1\r\n ':purple_square:': [[0, 0], [1, 1], [0, -1], [0, 1]], #[[0, 0], [0, 1], [0, -1], [0, 1]]\r\n #red: left 1, up 1, right 1, up 1\r\n ':red_square:': [[1, -1], [-1, -1], [0, 2], [-1, -1]] #[[0, -1], [-1, 0], [0, 1], [-1, 0]]\r\n }\r\n\r\n#starting spots, right above the board ready to be lowered. Col is 3/4 to start in middle\r\nshape_I = Tetronimo([[0, 3], [0, 4], [0, 5], [0, 6]], blue_square, [1, 1, 1, 1])\r\nshape_J = Tetronimo([[0, 3], [0, 4], [0, 5], [-1, 3]], brown_square, [1, 1, 2, 2])\r\nshape_L = Tetronimo([[0, 3], [0, 4], [0, 5], [-1, 5]], orange_square, [1, 2, 2, 1])\r\nshape_O = Tetronimo([[0, 4], [0, 5], [-1, 4], [-1, 5]], yellow_square, [1, 1, 1, 1])\r\nshape_S = Tetronimo([[0, 3], [0, 4], [-1, 4], [-1, 5]], green_square, [2, 2, 2, 2])\r\nshape_T = Tetronimo([[0, 3], [0, 4], [0, 5], [-1, 4]], purple_square, [1, 1, 3, 0])\r\nshape_Z = Tetronimo([[0, 4], [0, 5], [-1, 3], [-1, 4]], red_square, [0, 1, 0, 2])\r\n\r\n\r\n#fill board with empty squares\r\ndef make_empty_board():\r\n for row in range(num_of_rows):\r\n board.append([])\r\n for col in range(num_of_cols):\r\n board[row].append(empty_square)\r\n\r\ndef fill_board(emoji):\r\n for row in range(num_of_rows):\r\n for col in range(num_of_cols):\r\n if board[row][col] != emoji:\r\n board[row][col] = emoji\r\n\r\n\r\ndef format_board_as_str():\r\n board_as_str = ''\r\n for row in range(num_of_rows):\r\n for col in range(num_of_cols):\r\n board_as_str += (board[row][col]) # + \" \" possibly\r\n if col == num_of_cols - 1:\r\n board_as_str += \"\\n \"\r\n return board_as_str\r\n\r\ndef get_random_shape():\r\n global index\r\n # ordered_shapes = [shape_J, shape_T, shape_L, shape_O, shape_S, shape_Z, shape_S, shape_T, shape_J, shape_Z, shape_S, shape_I, shape_Z, shape_O, shape_T, shape_J, shape_L, shape_Z, shape_I]\r\n # random_shape = ordered_shapes[index]\r\n shapes = [shape_I, shape_J, shape_L, shape_O, shape_S, shape_T, shape_Z]\r\n random_shape = shapes[random.randint(0, 6)] #0, 6\r\n index += 1\r\n if start_higher == True:\r\n for s in random_shape.starting_pos[:]: #for each square\r\n s[0] = s[0] - 1 #make row 1 above\r\n else:\r\n starting_pos = random_shape.starting_pos[:]\r\n random_shape = [random_shape.starting_pos[:], random_shape.colour, random_shape.rotation_points] #gets starting point of shapes and copies, doesn't change them\r\n global is_new_shape\r\n is_new_shape = True\r\n return random_shape #returns array with starting pos and colour\r\n\r\ndef do_wall_kicks(shape, old_shape_pos, shape_colour, attempt_kick_num):\r\n new_shape_pos = []\r\n\r\n if shape_colour == blue_square:\r\n kick_set = main_wall_kicks[rotation_pos]\r\n else:\r\n kick_set = i_wall_kicks[rotation_pos]\r\n\r\n print('Kick set: ' + str(kick_set))\r\n for kick in kick_set:\r\n print('Kick: ' + str(kick))\r\n for square in shape:\r\n square_row = square[0]\r\n square_col = square[1]\r\n new_square_row = square_row + kick[0]\r\n new_square_col = square_col + kick[1]\r\n if (0 <= new_square_col < num_of_cols) and (0 <= new_square_row < num_of_rows): #if square checking is on board\r\n square_checking = board[new_square_row][new_square_col] #get the square to check if empty\r\n if (square_checking != empty_square) and ([new_square_row, new_square_col] not in old_shape_pos): #if square is not empty / won't be when other parts of shape have moved\r\n #shape doesn't fit\r\n new_shape_pos = [] #reset new_shape\r\n break\r\n else: #shape does fit\r\n new_shape_pos.append([new_square_row, new_square_col]) #store pos\r\n print('New shape: ' + str(new_shape_pos))\r\n if len(new_shape_pos) == 4:\r\n print('Returned new shape after doing kicks')\r\n return new_shape_pos #return shape with kicks added\r\n else:\r\n #shape doesn't fit\r\n new_shape_pos = [] #reset new_shape\r\n break\r\n\r\n print('Returned old, unrotated shape')\r\n return old_shape_pos #return shape without rotation\r\n\r\n\r\ndef rotate_shape(shape, direction, rotation_point_index, shape_colour):\r\n rotation_point = shape[rotation_point_index] #coords of rotation point\r\n new_shape = [] #to store coords of rotated shape\r\n\r\n #Rotate shape\r\n for square in shape:\r\n square_row = square[0]\r\n square_col = square[1]\r\n if direction == 'clockwise':\r\n new_square_row = (square_col - rotation_point[1]) + rotation_point[0] + rot_adjustments.get(shape_colour)[rotation_pos-1][0]\r\n print('Adjustment made: ' + str(rot_adjustments.get(shape_colour)[rotation_pos-1][0]))\r\n new_square_col = -(square_row - rotation_point[0]) + rotation_point[1] + rot_adjustments.get(shape_colour)[rotation_pos-1][1]\r\n print('Adjustment made: ' + str(rot_adjustments.get(shape_colour)[rotation_pos-1][1]))\r\n elif direction == 'anticlockwise': #currently not a thing\r\n new_square_row = -(square_col - rotation_point[1]) + rotation_point[0]\r\n new_square_col = (square_row - rotation_point[0]) + rotation_point[1]\r\n new_shape.append([new_square_row, new_square_col]) #store pos of rotated square\r\n if (0 <= square_col < num_of_cols) and (0 <= square_row < num_of_rows): #if on board\r\n board[square_row][square_col] = empty_square #make empty old square pos\r\n\r\n new_shape = do_wall_kicks(new_shape, shape, shape_colour, 0) #offset shape\r\n\r\n new_shape = sorted(new_shape, key=lambda l:l[0], reverse=True) #sort so that bottom squares are first in list\r\n print('Rotated shape: ' + str(new_shape))\r\n\r\n #Place rotated shape (in case can't move down)\r\n if new_shape != shape: #if not same as old unrotated shape (in case places at start pos)\r\n for square in new_shape:\r\n square_row = square[0]\r\n square_col = square[1]\r\n board[square_row][square_col] = shape_colour\r\n\r\n return new_shape\r\n\r\ndef clear_lines():\r\n global board\r\n global points\r\n global lines\r\n lines_to_clear = 0\r\n for row in range(num_of_rows):\r\n row_full = True #assume line is full\r\n for col in range(num_of_cols):\r\n if board[row][col] == empty_square:\r\n row_full = False\r\n break #don't clear this row\r\n if row_full: #if line to clear\r\n lines_to_clear += 1\r\n #bring all lines above down\r\n board2 = board[:] #clone board\r\n for r in range(row, 0, -1): #for every row above row\r\n if r == 0: #if top row\r\n for c in range(num_of_cols):\r\n board2[r][c] = empty_square #make each spot empty\r\n else:\r\n for c in range(num_of_cols):\r\n board2[r][c] = board[r - 1][c] #make each spot the one above\r\n board = board2[:]\r\n if lines_to_clear == 1:\r\n points += 100\r\n lines += 1\r\n elif lines_to_clear == 2:\r\n points += 300\r\n lines += 2\r\n elif lines_to_clear == 3:\r\n points += 500\r\n lines += 3\r\n elif lines_to_clear == 4:\r\n points += 800\r\n lines += 4\r\n\r\n\r\ndef get_next_pos(cur_shape_pos):\r\n global h_movement\r\n global start_higher\r\n global game_over\r\n\r\n #Check if new pos for whole shape is available\r\n movement_amnt = 1\r\n\r\n if down_pressed == False:\r\n amnt_to_check = 1 #check space one below\r\n else:\r\n amnt_to_check = num_of_rows #check all rows until furthest available space\r\n\r\n for i in range(amnt_to_check):\r\n square_num_in_shape = -1\r\n for square in cur_shape_pos:\r\n next_space_free = True\r\n square_num_in_shape += 1\r\n square_row = square[0]\r\n square_col = square[1]\r\n if (0 <= square_col < num_of_cols): #if current column spot will fit\r\n if not (0 <= square_col + h_movement < num_of_cols): #if spot with column position changed won't fit\r\n h_movement = 0 #just change row position\r\n if (0 <= square_row + movement_amnt < num_of_rows): #if new square row pos is on board\r\n square_checking = board[square_row + movement_amnt][square_col + h_movement] #get the square to check if empty\r\n if (square_checking != empty_square) and ([square_row + movement_amnt, square_col + h_movement] not in cur_shape_pos): #if square is not empty / won't be when other parts of shape have moved\r\n #check if space free if not moving horizontally (in case going into wall) but still going down\r\n h_movement = 0\r\n square_checking = board[square_row + movement_amnt][square_col + h_movement]\r\n if (square_checking != empty_square) and ([square_row + movement_amnt, square_col + h_movement] not in cur_shape_pos):\r\n if movement_amnt == 1:\r\n next_space_free = False #can't put shape there\r\n print('Detected a space that isnt free')\r\n print('Square checking: ' + str(square_row + movement_amnt) + ', ' + str(square_col + h_movement))\r\n if is_new_shape: #if can't place new shape\r\n if start_higher == True:\r\n game_over = True\r\n else:\r\n start_higher = True\r\n elif movement_amnt > 1: #if sending down\r\n movement_amnt -= 1 #accomodate for extra 1 added to check if its free\r\n return [movement_amnt, next_space_free] #stop checking\r\n elif down_pressed == True:\r\n if square_num_in_shape == 3: #only on last square in shape\r\n movement_amnt += 1 #increase amount to move shape by\r\n elif square_row + movement_amnt >= num_of_rows: #new square row isn't on board\r\n if movement_amnt == 1:\r\n next_space_free = False #can't put shape there\r\n print('Detected a space that isnt free')\r\n elif movement_amnt > 1: #if sending down\r\n movement_amnt -= 1 #accomodate for extra 1 added to check if its free\r\n return [movement_amnt, next_space_free] #stop checking\r\n elif down_pressed == True:\r\n if square_num_in_shape == 3: #only on last square in shape\r\n movement_amnt += 1 #increase amount to move shape by\r\n\r\n return [movement_amnt, next_space_free]\r\n\r\n\r\nasync def run_game(msg, cur_shape):\r\n global is_new_shape\r\n global h_movement\r\n global rotate_clockwise\r\n global rotation_pos\r\n\r\n cur_shape_pos = cur_shape[0]\r\n cur_shape_colour = cur_shape[1]\r\n\r\n if rotate_clockwise == True and cur_shape_colour != yellow_square:\r\n cur_shape_pos = rotate_shape(cur_shape_pos, 'clockwise', cur_shape[2][rotation_pos], cur_shape_colour) #rotate shape\r\n cur_shape = [cur_shape_pos, cur_shape_colour, cur_shape[2]] #update shape\r\n\r\n next_pos = get_next_pos(cur_shape_pos)[:]\r\n movement_amnt = next_pos[0]\r\n next_space_free = next_pos[1]\r\n\r\n #move/place shape if pos is available\r\n square_num_in_shape = -1\r\n if next_space_free:\r\n for square in cur_shape_pos:\r\n square_num_in_shape += 1\r\n square_row = square[0]\r\n square_col = square[1]\r\n if (0 <= square_row + movement_amnt < num_of_rows): #if new square row pos is on board\r\n square_changing = board[square_row + movement_amnt][square_col + h_movement] #get square to change\r\n board[square_row + movement_amnt][square_col + h_movement] = cur_shape_colour #changes square colour to colour of shape\r\n if is_new_shape == True:\r\n is_new_shape = False #has been placed, so not new anymore\r\n if square_row > -1: #stops from wrapping around list and changing colour of bottom rows.\r\n board[square_row][square_col] = empty_square #make old square empty again\r\n cur_shape_pos[square_num_in_shape] = [square_row + movement_amnt, square_col + h_movement] #store new pos of shape square\r\n else: #if new square row pos is not on board\r\n cur_shape_pos[square_num_in_shape] = [square_row + movement_amnt, square_col + h_movement] #store new pos of shape square\r\n else:\r\n global down_pressed\r\n down_pressed = False #reset it\r\n clear_lines() #check for full lines and clear them\r\n cur_shape = get_random_shape() #change shape\r\n rotation_pos = 0 #reset rotation\r\n print('Changed shape.')\r\n\r\n if not game_over:\r\n #Update board\r\n embed = discord.Embed(description=format_board_as_str(), color=embed_colour)\r\n h_movement = 0 #reset horizontal movement\r\n rotate_clockwise = False #reset clockwise rotation\r\n await msg.edit(embed=embed)\r\n if not is_new_shape:\r\n await asyncio.sleep(1) #to keep under api rate limit\r\n await run_game(msg, cur_shape)\r\n else:\r\n print('GAME OVER')\r\n desc = 'Score: {} \\n Lines: {} \\n \\n Press ▶ to play again.'.format(points, lines)\r\n embed = discord.Embed(title='GAME OVER', description=desc, color=embed_colour)\r\n await msg.edit(embed=embed)\r\n await msg.remove_reaction(\"⬅\", client.user) #Left\r\n await msg.remove_reaction(\"⬇\", client.user) #Down\r\n await msg.remove_reaction(\"➡\", client.user) #Right\r\n await msg.remove_reaction(\"🔃\", client.user) #Rotate\r\n await msg.add_reaction(\"▶\") #Play\r\n\r\n\r\nasync def reset_game():\r\n global down_pressed\r\n global rotate_clockwise\r\n global rotation_pos\r\n global h_movement\r\n global is_new_shape\r\n global start_higher\r\n global game_over\r\n global points\r\n global lines\r\n fill_board(empty_square)\r\n down_pressed = False\r\n rotate_clockwise = False\r\n rotation_pos = 0\r\n h_movement = 0 #amount to move left or right\r\n is_new_shape = False\r\n start_higher = False\r\n game_over = False\r\n next_space_free = True\r\n points = 0\r\n lines = 0\r\n\r\nmake_empty_board()\r\n\r\n\r\n#-------------------------------------------------------------------------------\r\n\r\nclient = commands.Bot(command_prefix = 't!')\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(\"tetris bot started poggies\")\r\n\r\n@client.command()\r\nasync def test(ctx):\r\n await ctx.send('test working poggies pogchamp')\r\n\r\n@client.command()\r\nasync def start(ctx): #Starts embed\r\n await reset_game()\r\n embed = discord.Embed(title='Tetris in Discord', description=format_board_as_str(), color=embed_colour)\r\n embed.add_field(name='How to Play:', value='Use ⬅ ⬇ ➡ to move left, down, and right respectively. \\n \\n Use 🔃 to rotate the shape clockwise. \\n \\n Press ▶ to Play.', inline=False)\r\n\r\n msg = await ctx.send(embed=embed)\r\n\r\n #Add button choices / reactions\r\n await msg.add_reaction(\"▶\") #Play\r\n\r\n #On new reaction:\r\n #Update board and board_as_str\r\n #await msg.edit(embed=embed)\r\n\r\n@client.event\r\nasync def on_reaction_add(reaction, user):\r\n global h_movement\r\n global rotation_pos\r\n if user != client.user:\r\n msg = reaction.message\r\n if str(reaction.emoji) == \"▶\": #Play button pressed\r\n print('User pressed play')\r\n await reset_game()\r\n await msg.remove_reaction(\"❌\", client.user) #Remove delete\r\n embed = discord.Embed(description=format_board_as_str(), color=embed_colour)\r\n await msg.remove_reaction(\"▶\", user)\r\n await msg.remove_reaction(\"▶\", client.user)\r\n await msg.edit(embed=embed)\r\n await msg.add_reaction(\"⬅\") #Left\r\n await msg.add_reaction(\"⬇\") #Down\r\n await msg.add_reaction(\"➡\") #Right\r\n await msg.add_reaction(\"🔃\") #Rotate\r\n await msg.add_reaction(\"❌\") #Stop game\r\n starting_shape = get_random_shape()\r\n await run_game(msg, starting_shape)\r\n\r\n if str(reaction.emoji) == \"⬅\": #Left button pressed\r\n print('Left button pressed')\r\n h_movement = -1 #move 1 left\r\n await msg.remove_reaction(\"⬅\", user)\r\n if str(reaction.emoji) == \"➡\": #Right button pressed\r\n print('Right button pressed')\r\n h_movement = 1 #move +1 right\r\n await msg.remove_reaction(\"➡\", user)\r\n if str(reaction.emoji) == \"⬇\": #Down button pressed\r\n print('Down button pressed')\r\n global down_pressed\r\n down_pressed = True\r\n await msg.remove_reaction(\"⬇\", user)\r\n if str(reaction.emoji) == \"🔃\": #Rotate clockwise button pressed\r\n print('Rotate clockwise button pressed')\r\n global rotate_clockwise\r\n rotate_clockwise = True\r\n if rotation_pos < 3:\r\n rotation_pos += 1\r\n else:\r\n rotation_pos = 0 #go back to original pos\r\n await msg.remove_reaction(\"🔃\", user)\r\n if str(reaction.emoji) == \"❌\": #Stop game button pressed\r\n #In future maybe put score screen here or a message saying stopping.\r\n await reset_game()\r\n await msg.delete()\r\n if str(reaction.emoji) == \"🔴\":\r\n await message.edit(content=\"\")\r\n\r\n\r\nclient.run('Your token here')\r\n","repo_name":"willcantcode/Tetris-Discord-Bot","sub_path":"tetris_bot.py","file_name":"tetris_bot.py","file_ext":"py","file_size_in_byte":20746,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"787989259","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfig, axes = plt.subplots(figsize=(4, 3), nrows=2, ncols=1, dpi=200)\nplt.tight_layout()\nx = np.random.randint(0, 15, 10)\ny = np.sqrt(x ** 2)\n\naxes[1].plot(x, y, color='green', label='x/x squared')\naxes[1].set_xlabel('X')\naxes[1].set_ylabel('Y')\naxes[1].set_title('plot-2')\n\naxes[0].plot(y / 2, x / 10, color='red')\naxes[0].set_xlabel('X')\naxes[0].set_ylabel('Y')\naxes[0].set_title('plot-1')\n","repo_name":"PuspaKamalOli/matplotlib","sub_path":"subplots.py","file_name":"subplots.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"73697443145","text":"import re\nimport numpy as np\nimport pandas as pd\nimport gc\n\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n\n# One-hot encoding for categorical columns with get_dummies\ndef one_hot_encoder(df, nan_as_category = True):\n original_columns = list(df.columns)\n categorical_columns = [col for col in df.columns if df[col].dtype == 'object']\n df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)\n new_columns = [c for c in df.columns if c not in original_columns]\n return df, new_columns\n\n\n# Preprocess application_train.csv and application_test.csv\ndef application_train_test(path,df_file,num_rows = None):\n # Read data and merge\n df = pd.read_csv(path+ df_file, nrows= num_rows)\n # Optional: Remove 4 applications with XNA CODE_GENDER (train set)\n df = df.drop(columns='CODE_GENDER')\n\n # Categorical features with Binary encode (0 or 1; two categories)\n for bin_feature in ['FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:\n df[bin_feature], uniques = pd.factorize(df[bin_feature])\n # Categorical features with One-Hot encode\n df, cat_cols = one_hot_encoder(df,nan_as_category = True)\n\n # NaN values for DAYS_EMPLOYED: 365.243 -> nan\n df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)\n # Some simple new features (percentages)\n df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']\n df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']\n df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS']\n df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']\n df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']\n return df\n\n######################################################\n\n# Preprocess bureau.csv and bureau_balance.csv\ndef bureau_and_balance(path,num_rows = None):\n bureau = pd.read_csv(path+'bureau.csv', nrows = num_rows)\n bb = pd.read_csv(path+'bureau_balance.csv', nrows = num_rows)\n bb, bb_cat = one_hot_encoder(bb, nan_as_category = True)\n bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category = True)\n \n # Bureau balance: Perform aggregations and merge with bureau.csv\n bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size']}\n for col in bb_cat:\n bb_aggregations[col] = ['mean']\n bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)\n bb_agg.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in bb_agg.columns.tolist()])\n bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')\n bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)\n del bb, bb_agg\n gc.collect()\n \n # Bureau and bureau_balance numeric features\n num_aggregations = {\n 'DAYS_CREDIT': ['min', 'max', 'mean', 'var'],\n 'DAYS_CREDIT_ENDDATE': ['min', 'max', 'mean'],\n 'DAYS_CREDIT_UPDATE': ['mean'],\n 'CREDIT_DAY_OVERDUE': ['max', 'mean'],\n 'AMT_CREDIT_MAX_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'],\n 'AMT_CREDIT_SUM_OVERDUE': ['mean'],\n 'AMT_CREDIT_SUM_LIMIT': ['mean', 'sum'],\n 'AMT_ANNUITY': ['max', 'mean'],\n 'CNT_CREDIT_PROLONG': ['sum'],\n 'MONTHS_BALANCE_MIN': ['min'],\n 'MONTHS_BALANCE_MAX': ['max'],\n 'MONTHS_BALANCE_SIZE': ['mean', 'sum']\n }\n # Bureau and bureau_balance categorical features\n cat_aggregations = {}\n for cat in bureau_cat: cat_aggregations[cat] = ['mean']\n for cat in bb_cat: cat_aggregations[cat + \"_MEAN\"] = ['mean']\n \n bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})\n bureau_agg.columns = pd.Index(['BURO_' + e[0] + \"_\" + e[1].upper() for e in bureau_agg.columns.tolist()])\n # Bureau: Active credits - using only numerical aggregations\n active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]\n active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)\n active_agg.columns = pd.Index(['ACTIVE_' + e[0] + \"_\" + e[1].upper() for e in active_agg.columns.tolist()])\n bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')\n del active, active_agg\n gc.collect()\n # Bureau: Closed credits - using only numerical aggregations\n closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]\n closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)\n closed_agg.columns = pd.Index(['CLOSED_' + e[0] + \"_\" + e[1].upper() for e in closed_agg.columns.tolist()])\n bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')\n del closed, closed_agg, bureau\n gc.collect()\n return bureau_agg\n\n# Preprocess previous_applications.csv\ndef previous_applications(path,num_rows = None):\n prev = pd.read_csv(path+'previous_application.csv', nrows = num_rows)\n prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)\n # Days 365.243 values -> nan\n prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)\n prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)\n prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)\n prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)\n prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)\n # Add feature: value ask / value received percentage\n prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']\n # Previous applications numeric features\n num_aggregations = {\n 'AMT_ANNUITY': ['min', 'max', 'mean'],\n 'AMT_APPLICATION': ['min', 'max', 'mean'],\n 'AMT_CREDIT': ['min', 'max', 'mean'],\n 'APP_CREDIT_PERC': ['min', 'max', 'mean', 'var'],\n 'AMT_DOWN_PAYMENT': ['min', 'max', 'mean'],\n 'AMT_GOODS_PRICE': ['min', 'max', 'mean'],\n 'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],\n 'RATE_DOWN_PAYMENT': ['min', 'max', 'mean'],\n 'DAYS_DECISION': ['min', 'max', 'mean'],\n 'CNT_PAYMENT': ['mean', 'sum'],\n }\n # Previous applications categorical features\n cat_aggregations = {}\n for cat in cat_cols:\n cat_aggregations[cat] = ['mean']\n \n prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})\n prev_agg.columns = pd.Index(['PREV_' + e[0] + \"_\" + e[1].upper() for e in prev_agg.columns.tolist()])\n # Previous Applications: Approved Applications - only numerical features\n approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]\n approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)\n approved_agg.columns = pd.Index(['APPROVED_' + e[0] + \"_\" + e[1].upper() for e in approved_agg.columns.tolist()])\n prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')\n # Previous Applications: Refused Applications - only numerical features\n refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]\n refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)\n refused_agg.columns = pd.Index(['REFUSED_' + e[0] + \"_\" + e[1].upper() for e in refused_agg.columns.tolist()])\n prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')\n del refused, refused_agg, approved, approved_agg, prev\n gc.collect()\n return prev_agg\n\n# Preprocess POS_CASH_balance.csv\ndef pos_cash(path,num_rows = None):\n pos = pd.read_csv(path+'POS_CASH_balance.csv', nrows = num_rows)\n pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)\n # Features\n aggregations = {\n 'MONTHS_BALANCE': ['max', 'mean', 'size'],\n 'SK_DPD': ['max', 'mean'],\n 'SK_DPD_DEF': ['max', 'mean']\n }\n for cat in cat_cols:\n aggregations[cat] = ['mean']\n \n pos_agg = pos.groupby('SK_ID_CURR').agg(aggregations)\n pos_agg.columns = pd.Index(['POS_' + e[0] + \"_\" + e[1].upper() for e in pos_agg.columns.tolist()])\n # Count pos cash accounts\n pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR').size()\n del pos\n gc.collect()\n return pos_agg\n \n# Preprocess installments_payments.csv\ndef installments_payments(path, num_rows = None):\n ins = pd.read_csv(path+'installments_payments.csv', nrows = num_rows)\n ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)\n # Percentage and difference paid in each installment (amount paid and installment value)\n ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']\n ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']\n # Days past due and days before due (no negative values)\n ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']\n ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']\n ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)\n ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)\n # Features: Perform aggregations\n aggregations = {\n 'NUM_INSTALMENT_VERSION': ['nunique'],\n 'DPD': ['max', 'mean', 'sum'],\n 'DBD': ['max', 'mean', 'sum'],\n 'PAYMENT_PERC': ['max', 'mean', 'sum', 'var'],\n 'PAYMENT_DIFF': ['max', 'mean', 'sum', 'var'],\n 'AMT_INSTALMENT': ['max', 'mean', 'sum'],\n 'AMT_PAYMENT': ['min', 'max', 'mean', 'sum'],\n 'DAYS_ENTRY_PAYMENT': ['max', 'mean', 'sum']\n }\n for cat in cat_cols:\n aggregations[cat] = ['mean']\n ins_agg = ins.groupby('SK_ID_CURR').agg(aggregations)\n ins_agg.columns = pd.Index(['INSTAL_' + e[0] + \"_\" + e[1].upper() for e in ins_agg.columns.tolist()])\n # Count installments accounts\n ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()\n del ins\n gc.collect()\n return ins_agg\n\n# Preprocess credit_card_balance.csv\ndef credit_card_balance(path, num_rows = None):\n cc = pd.read_csv(path+'credit_card_balance.csv', nrows = num_rows)\n cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)\n # General aggregations\n cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)\n cc_agg = cc.groupby('SK_ID_CURR').agg(['min', 'max', 'mean', 'sum', 'var'])\n cc_agg.columns = pd.Index(['CC_' + e[0] + \"_\" + e[1].upper() for e in cc_agg.columns.tolist()])\n # Count credit card lines\n cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR').size()\n del cc\n gc.collect()\n return cc_agg\n\n\ndef end_preprocess_df(path, df):\n df.set_index('SK_ID_CURR', inplace=True)\n \n df.replace(np.inf, 0, inplace=True)\n \n threshold_value = 80\n threshold = ((1-(threshold_value/100))*df.shape[0])\n \n df.dropna(axis=1, thresh = threshold, inplace = True)\n \n df.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x), inplace = True)\n \n lst_features = pd.read_csv(path+'list_feature_lgb.csv') #list_features for L1based feature\n lst = list(lst_features[\"Col_to_keep\"].values)\n df_filter = df[[c for c in df.columns if c in lst]]\n\n return df_filter\n \n\n\n\ndef data_prep(path = \"B:/OpenClassRooms/Data_Scientist/projet_7/data/\",\n df_file = 'application_train.csv',debug = False):\n num_rows = 10000 if debug else None\n path = path\n df_file = df_file\n print(\"Path of the data folder : \"+path)\n print(\"Dataframe : \"+df_file)\n print(\"_\"*10)\n print(\" \")\n \n print(\"1) Loading of df\")\n df = application_train_test(path, df_file,num_rows)\n print(\"Raw df shape:\", df.shape)\n print(\"_\"*10)\n print(\" \")\n \n print(\"2) Processing of bureau\")\n bureau = bureau_and_balance(path,num_rows)\n print(\"Bureau df shape:\", bureau.shape)\n df = df.join(bureau, how='left', on='SK_ID_CURR')\n print(\"df shape after merging:\", df.shape)\n del bureau\n gc.collect()\n print(\"_\"*10)\n print(\" \")\n \n print(\"3) Process previous_applications\")\n prev = previous_applications(path,num_rows)\n print(\"Previous applications df shape:\", prev.shape)\n df = df.join(prev, how='left', on='SK_ID_CURR')\n print(\"df shape after merging:\", df.shape)\n del prev\n gc.collect()\n print(\"_\"*10)\n print(\" \")\n \n print(\"4) Process POS-CASH balance\")\n pos = pos_cash(path,num_rows)\n print(\"Pos-cash balance df shape:\", pos.shape)\n df = df.join(pos, how='left', on='SK_ID_CURR')\n print(\"df shape after merging:\", df.shape)\n del pos\n gc.collect()\n print(\"_\"*10)\n print(\" \")\n \n print(\"5) Process installments payments\")\n ins = installments_payments(path,num_rows)\n print(\"Installments payments df shape:\", ins.shape)\n df = df.join(ins, how='left', on='SK_ID_CURR')\n print(\"df shape after merging:\", df.shape)\n del ins\n gc.collect()\n print(\"_\"*10)\n print(\" \")\n \n print(\"6) Process credit card balance\")\n cc = credit_card_balance(path,num_rows)\n print(\"Credit card balance df shape:\", cc.shape)\n df = df.join(cc, how='left', on='SK_ID_CURR')\n print(\"df shape after merging:\", df.shape)\n del cc\n gc.collect()\n print(\"_\"*10)\n print(\" \")\n \n print(\"7) Process filter column\")\n df_filter = end_preprocess_df(path,df)\n print(\"df final shape:\", df_filter.shape)\n gc.collect()\n\n print(\"_\"*10)\n print(\" \")\n print(\"Dataframe preparation completed\")\n print(\"_\"*10)\n print(\" \")\n\n return df_filter\n \n\npath = input(\"1) select the path of the data > \")\ndata = input(\"2) Name of the csv file to prepare > \")\noutput = input(\"3) Name of the output file (with extension) > \")\n\ndf = data_prep(path = path,\n df_file = data)\n\nprint(\"Saving the transformed dataframe ...\")\nprint(\"_\"*10)\nprint(\" \")\ndf.to_csv(output, index_label='SK_ID_CURR',sep=\";\")\n\nprint(\"File saved\")\nprint(\"_\"*10)\nprint(\" \")","repo_name":"Cdubois1992/OCr_DS_P7","sub_path":"Data_preparation_fct.py","file_name":"Data_preparation_fct.py","file_ext":"py","file_size_in_byte":13443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21735756231","text":"import datetime\nimport os\nimport re\n\nfrom parameter import Parameter\n\nclass Experiment(Parameter):\n param_path = \"experiment\"\n param_name = \"Experiment\"\n\n def __init__(self):\n self.global_params = [\n \"exp_name\",\n\n \"model\",\n ]\n\n self.params = [\n \"trainer\",\n\n \"evaluator\",\n ]\n\n self.shared_params = []\n\n for global_param in self.global_params:\n setattr(self, global_param, None)\n\n for param in self.params:\n setattr(self, param, None)\n\n def param_build(self):\n if re.match(\"_[0-9]{4}_[0-9]{2}_[0-9]{2}_[0-9]{2}_[0-9]{2}_[0-9]{2}\", self.exp_name[-20:]):\n self.exp_name = self.exp_name[:-20]\n currentDT = datetime.datetime.now()\n self.exp_name = self.exp_name + \"_\" + currentDT.strftime(\"%Y_%m_%d_%H_%M_%S\")\n\n self.set_global_params(overwrite=True)\n\n for p in self.global_params:\n v = getattr(self, p)\n if isinstance(v, Parameter):\n v.param_build()\n\n for p in self.params:\n v = getattr(self, p)\n if isinstance(v, Parameter):\n v.param_build()\n\n self._build()\n\n def _build(self):\n try:\n os.mkdir(self.exp_name)\n except:\n pass\n\n def save(self):\n super().save(self.exp_name)\n","repo_name":"alvinzz/neural_CA","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34426038365","text":"from Autocomplete.model import NgramCounter, NgramModel\n\nngram_len = 3 # the length of ngrams to form from the text\n\n# Count the frequency of each ngram in the given file\nng_counter = NgramCounter(file_path='data/tokenized_emma',\n ngram_len=ngram_len)\ncounts = ng_counter.count()\n\n# Instantiate the ngram model with the pretrained counter\nngram_model = NgramModel(ng_counter)\ntext = ngram_model.generate_text(target_words='It is time', number_of_sents=3)\nprint(text)\n","repo_name":"BonySmoke/NgramModel","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21164437980","text":"'''5. Faça um programa com uma função que necessite de um parâmetro. A função deve retornar “Positivo”, \r\nse seu o número for maior que zero, “Negativo” se o número for menor que zero, e “Zero” se o número for igual a zero.'''\r\n\r\ndef verificar_numeros(numero):\r\n if numero < 0:\r\n return ('Negativo')\r\n elif numero == 0:\r\n return('Zero')\r\n else:\r\n return('Positivo')\r\n \r\nnumero = int(input(\"Digite um número inteiro: \"))\r\nresultado = verificar_numeros(numero)\r\nprint(resultado)\r\n\r\n","repo_name":"GleisonAmorim/Estudos-Dev-Python","sub_path":"Ultima School -Desenvolvedo r Python completo/Módulo 1 Introdução aos fundamentos do Python/3ª semana/M1S3 (5).py","file_name":"M1S3 (5).py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"9255789064","text":"import pycell.library\n\nfrom pycell.chars_in_file import chars_in_file\nfrom pycell.env import Env\nfrom pycell.lexer import lex\nfrom pycell.parser import parse\n\n\ndef compile_operation(expr, env, indent):\n return \"%s %s %s\" % (\n compile_expr(expr[2], env, indent),\n expr[1],\n compile_expr(expr[3], env, indent)\n )\n\n\ndef native_equals(args, env, indent):\n if len(args) != 2:\n raise Exception(\n \"Wrong number of argumentss to equals - expected 2, but got %d\"\n % len(args)\n )\n else:\n return \"(%s===%s ? 1 : 0)\" % tuple(compile_expr(e, env, indent) for e in args)\n\n\ndef native_if(args, env, indent):\n if len(args) != 3:\n raise Exception(\n \"Wrong number of arguments to equals - expected 3, but got %d\"\n % len(args)\n )\n else:\n ind = \" \" * indent\n return (\n \"\"\"(function() {\n%s if( %s !== 0 ) {\n%s%s } else {\n%s%s }\n%s})()\"\"\" % (\n ind,\n compile_expr(args[0], env, indent),\n compile_list(args[1][2], env, indent + 8, True),\n ind,\n compile_list(args[2][2], env, indent + 8, True),\n ind,\n ind\n )\n )\n\n\ndef native_print(args, env, indent):\n if len(args) != 1:\n raise Exception(\n \"Wrong number of arguments to equals - expected 1, but got %d\"\n % len(args)\n )\n else:\n return (\n \"console.log(%s)\" % (\n \", \".join(compile_expr(e, env, indent) for e in args)\n )\n )\n\n\ndef native_set(args, env, indent):\n if len(args) != 2:\n raise Exception(\n \"Wrong number of arguments to set - expected 2, but got %d\"\n % len(args)\n )\n else:\n # TODO: check args[1] compiles to a string, which refers\n # to the name of an existing symbol\n var_name = compile_expr(args[0], env, indent)\n if var_name[0] == \"'\":\n return (\n \"(%s = %s)\" % ( var_name[1:-1], compile_expr(args[1], env, indent) )\n )\n else:\n # TODO: use eval to set the variable\n return \"TODO\"\n\n\ndef compile_call(expr, env, indent):\n fn_name = compile_expr(expr[1], env, indent)\n if fn_name == \"equals\":\n return native_equals(expr[2], env, indent)\n elif fn_name == \"if\":\n return native_if(expr[2], env, indent)\n elif fn_name == \"print\":\n return native_print(expr[2], env, indent)\n elif fn_name == \"set\":\n return native_set(expr[2], env, indent)\n else:\n return (\n \"%s(%s)\" % (\n fn_name,\n \", \".join(compile_expr(e, env, indent) for e in expr[2])\n )\n )\n\n\ndef compile_assignment(expr, env, indent):\n # TODO: check expr[1] compiles to a symbol\n # TODO: add symbol value to environment and check it later\n return \"var %s = %s\" % (\n compile_expr(expr[1], env, indent), compile_expr(expr[2], env, indent))\n\n\ndef compile_function_def(expr, env, indent):\n # TODO: check args are symbols\n ret = \"(function(%s) {\\n\" % ( \", \".join(s[1] for s in expr[1]) )\n ret += compile_list(expr[2], env, indent + 4, True)\n ret += \"%s})\" % ( \" \" * indent )\n return ret\n\n\njs_keywords = (\"for\", )\n\ndef mangle_symbol(sym):\n if sym in js_keywords:\n return sym + \"__\"\n elif sym == \"None\":\n return \"null\"\n else:\n return sym\n\ndef compile_expr(expr, env, indent):\n typ = expr[0]\n if typ == \"number\":\n return expr[1]\n elif typ == \"string\":\n return \"'%s'\" % expr[1].replace(\"'\",\"\\\\'\")\n elif typ == \"symbol\":\n return mangle_symbol(expr[1])\n elif typ == \"function\":\n return compile_function_def(expr, env, indent)\n elif typ == \"assignment\":\n return compile_assignment(expr, env, indent)\n elif typ == \"call\":\n return compile_call(expr, env, indent)\n elif typ == \"operation\":\n return compile_operation(expr, env, indent)\n else:\n raise Exception(\"Compiling unknown type '%s'.\" % str(expr))\n\n\ndef compile_list(exprs, env, indent=0, return_last = False):\n ret = \"\"\n lst_exprs = list(exprs)\n for i, expr in enumerate(lst_exprs):\n ret += \" \" * indent\n if return_last and i == len(lst_exprs) - 1:\n ret += \"return \"\n ret += compile_expr(expr, env, indent)\n ret += \";\\n\"\n return ret\n\n\ndef compile_(output, filename):\n env = Env()\n with open(output, \"w\") as outfile:\n outfile.write(\n compile_list(parse(lex(pycell.library.as_text(env))), env))\n with open(filename, encoding=\"ascii\") as infile:\n outfile.write(\n compile_list(parse(lex(chars_in_file(infile))), env))\n","repo_name":"andybalaam/cell","sub_path":"pycell/compile_.py","file_name":"compile_.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"81"} +{"seq_id":"13378867908","text":"import argparse, sys, os\nimport numpy as np\nimport matplotlib.pyplot as plt \n\nrepo_paths = ['/Users/Lorena/ML_IPAM/IPAM2021_ML/', '/Users/simonealbanesi/repos/IPAM2021_ML/']\nfor rp in repo_paths:\n if os.path.isdir(rp):\n repo_path = rp\n break\nsys.path.insert(0, repo_path+'algo/classy_NN/')\nsys.path.insert(0, repo_path+'utils/')\nfrom utils import extract_data, chirpMass, findSecondMassFromMc\n\n# this is hard-coded, but at this point I don't think we will change this number\nNFEATURES = 4 \n\ndef escapeLatex(text):\n if text: import matplotlib\n if text and matplotlib.rcParams['text.usetex']:\n return text.replace('_', '{\\\\textunderscore}')\n else:\n return text\n\n###################################################\n# Plots \n###################################################\ndef plot_recovered_vs_predicted(data):\n \"\"\" Usual plot with injection on x-axis and \n recovered/predicted on y. data is the struct\n produced in the main\n \"\"\"\n dot_size = 1\n edge_color_factor = 1\n fig, axs = plt.subplots(2,2,figsize = (9,9))\n color_rec = np.array([0.7,0.7,0.7]);\n color_pred = np.array([1,0.8,0]);\n for i in range(NFEATURES):\n ax = axs[int(i/2), i%2]\n ax.scatter(data.inj[:,i], data.rec[:,i], label='recovered', s=dot_size, \n color=color_rec, edgecolors=color_rec/edge_color_factor)\n ax.scatter(data.inj[:,i], data.pred[:,i], label='predicted', s=dot_size, \n color=color_pred, edgecolors=color_pred/edge_color_factor)\n ax.plot(data.inj[:,i],data.inj[:,i], color='k')\n ylabel = escapeLatex(data.var_names_tex[i]) + ' - recovered/predicted'\n ax.set_ylabel(ylabel, fontsize=15)\n xlabel = escapeLatex(data.var_names_tex[i]) + ' - injected'\n ax.set_xlabel(xlabel, fontsize=15)\n #plt.legend()\n plt.subplots_adjust(wspace=0.4)\n if data.savepng:\n figname = data.plots_prefix+'recvspred.png'\n fullname = data.plots_dir+'/'+figname\n plt.savefig(fullname,dpi=200,bbox_inches='tight')\n if data.verbose:\n print(figname, 'saved in', data.plots_dir)\n plt.show() \n return \n\ndef plot_parspace(data):\n \"\"\" Plot injections\n \"\"\"\n dot_size = 1\n fig, axs = plt.subplots(1,2,figsize=(10,5))\n \n if data.parspace_colorful:\n m1 = data.inj[:,0]\n if data.regr_vars=='m1m2chi1chi2':\n m2 = data.inj[:,1]\n elif data.regr_vars=='m1Mcchi1chi2':\n Mc = data.inj[:,1]\n m2 = findSecondMassFromMc(Mc,m1)\n else:\n raise RuntimeError('Invalid regression variables')\n chi1 = data.inj[:,2]\n chi2 = data.inj[:,3]\n mask = {}\n colors = {}\n mask['bbh'] = np.where(( (m1>=5) & (m2>=5) ))\n mask['bhns'] = np.where(( (m1>=5) & (m2<5) ))\n mask['bns'] = np.where(( (m1<5) & (m2<5) ))\n colors['bbh'] = [1,0,0]\n colors['bhns'] = [0,0,1]\n colors['bns'] = [0,1,0]\n m1_dict = {}\n m2_dict = {}\n chi1_dict = {}\n chi2_dict = {}\n y_dict = {}\n keys = mask.keys()\n for k in keys:\n m1_dict[k] = m1[mask[k]]\n m2_dict[k] = m2[mask[k]]\n chi1_dict[k] = chi1[mask[k]]\n chi2_dict[k] = chi2[mask[k]]\n if data.regr_vars=='m1m2chi1chi2':\n y_dict[k] = m2_dict[k]\n elif data.regr_vars=='m1Mcchi1chi2':\n y_dict[k] = chirpMass(m1_dict[k], m2_dict[k])\n \n axs[0].scatter(m1_dict[k], y_dict[k], s=dot_size, color=colors[k])\n axs[1].scatter(chi1_dict[k], chi2_dict[k], s=dot_size, color=colors[k])\n\n else:\n color = [0.3,0.3,1]\n axs[0].scatter(data.inj[:,0], data.inj[:,1], s=dot_size, color=color)\n axs[1].scatter(data.inj[:,2], data.inj[:,3], s=dot_size, color=color)\n \n xlab1 = escapeLatex(data.var_names_tex[0])\n ylab1 = escapeLatex(data.var_names_tex[1])\n xlab2 = escapeLatex(data.var_names_tex[2])\n ylab2 = escapeLatex(data.var_names_tex[3])\n axs[0].set_xlabel(xlab1, fontsize=15)\n axs[0].set_ylabel(ylab1, fontsize=15)\n axs[1].set_xlabel(xlab2, fontsize=15)\n axs[1].set_ylabel(ylab2, fontsize=15)\n plt.subplots_adjust(wspace=0.4)\n if data.savepng:\n figname = data.regr_vars+'_parspace.png'\n fullname = data.plots_dir+'/'+figname\n plt.savefig(fullname,dpi=200,bbox_inches='tight')\n if data.verbose:\n print(figname, 'saved in', data.plots_dir)\n plt.show()\n return\n\ndef plot_histograms(data):\n fig, axs = plt.subplots(2,2,figsize=(9,9))\n color_rec = np.array([0.7,0.7,0.7]);\n color_pred = np.array([1,0.8,0]);\n for i in range(NFEATURES):\n ax = axs[int(i/2), i%2]\n var_name = data.var_names[i]\n var_name_tex = escapeLatex(data.var_names_tex[i])\n if var_name=='chi1' or var_name=='chi2':\n dy_rec = data.stats['diffs_rec'][:,i]\n dy_pred = data.stats['diffs_pred'][:,i]\n tmp = var_name_tex.replace('$', '') \n y_lab = escapeLatex(r'$\\Delta{} y^{\\rm pred/rec}$').replace('y',tmp)\n else:\n dy_rec = data.stats['errors_rec'][:,i]\n dy_pred = data.stats['errors_pred'][:,i]\n tmp = var_name_tex.replace('$', '') \n y_lab = escapeLatex(r'$\\delta{} y^{\\rm pred/rec}$').replace('y',tmp)\n \n fmin = data.histo_fmins[i]\n fmax = data.histo_fmaxs[i]\n nbins = data.histo_nbins[i]\n fstep = (fmax-fmin)/nbins\n ax.hist(dy_rec, bins=np.arange(fmin, fmax, fstep), color=color_rec, histtype='bar', label= var_name_tex+' - rec')\n ax.hist(dy_pred, bins=np.arange(fmin, fmax, fstep),color=color_pred, histtype=u'step', \n label=var_name_tex+' - pred', linewidth=2.) \n ax.set_xlabel(y_lab, fontsize=15)\n ax.legend()\n if data.histo_logs[i]==1:\n ax.set_yscale('log') \n if data.verbose:\n tmp = np.where(dy_rec fmax)\n print('For {:4s} there are {:4d} recoveries bigger than fmax={:7.3f}'.format(data.var_names[i], np.shape(tmp)[1], fmax))\n tmp = np.where(dy_pred>fmax)\n print('For {:4s} there are {:4d} predictions bigger than fmax={:7.3f}'.format(data.var_names[i], np.shape(tmp)[1], fmax))\n print(' ')\n \n if data.savepng:\n figname = data.plots_prefix+'histo.png'\n fullname = data.plots_dir+'/'+figname\n plt.savefig(fullname,dpi=200,bbox_inches='tight')\n if data.verbose:\n print(figname, 'saved in', data.plots_dir)\n plt.show()\n return\n\n###################################################\n# Tables \n###################################################\ndef num2tex(x,precision=3):\n #out = '${:.'+str(precision)+'e}'\n #out = out.format(x)\n #out = out.replace('e+00','')\n #if 'e-0' in out:\n # out = out.replace('e-0', '\\\\times 10^{-')+'}'\n #if 'e+0' in out:\n # out = out.replace('e+0', '\\\\times 10^{')+'}'\n out = '${:.'+str(precision)+'f}'\n out = out.format(x)\n return out+'$'\n\ndef print_errortab(data):\n header = '\\n{:14s} {:15s} {:15s} {:15s} {:15s} {:15s} {:15s}'\n\n dashes='-'*120\n print('\\n', dashes, sep='', end='')\n print(header.format('name', 'mean_diff_rec', 'std_diff_rec', 'mean_err_rec', 'std_err_rec', \n 'mean_diff_pred', 'std_diff_pred', 'mean_err_pred', 'std_err_pred'))\n print(dashes)\n \n err_rec = data.stats['errors_rec']\n err_pred = data.stats['errors_pred']\n diffs_rec = data.stats['diffs_rec']\n diffs_pred = data.stats['diffs_pred']\n for i in range(NFEATURES):\n var_name = data.var_names[i]\n mean_diff_rec = np.mean(np.abs(diffs_rec[:,i]))\n mean_diff_pred = np.mean(np.abs(diffs_pred[:,i]))\n std_diff_rec = np.std(diffs_rec[:,i])\n std_diff_pred = np.std(diffs_pred[:,i])\n if var_name=='chi1' or var_name=='chi2':\n mean_err_rec = np.nan # then substitute with '/' while printing\n mean_err_pred = np.nan # then substitute with '/' while printing\n std_err_rec = np.nan\n std_err_pred = np.nan\n else:\n mean_err_rec = np.mean(np.abs(err_rec[:,i]))\n mean_err_pred = np.mean(np.abs(err_pred[:,i]))\n std_err_rec = np.std(err_rec[:,i])\n std_err_pred = np.std(err_pred[:,i])\n \n if data.tab_format=='txt':\n line_format = '{:14s} {:15.3e} {:15.3e} {:15.3e} {:15.3e} {:15.3e} {:15.3e} {:15.3e} {:15.3e}'\n myline = line_format.format(var_name, mean_diff_rec, std_diff_rec, mean_err_rec, std_err_rec, \n mean_diff_pred, std_diff_pred, mean_err_pred, std_err_pred) \n elif data.tab_format=='tex':\n tex_name = data.var_names_tex[i]\n line_format = escapeLatex('{:14s} & {:s} & {:s} & {:s} & {:s} & {:s} & {:s} & {:s} & {:s} \\\\\\\\')\n myline = line_format.format(tex_name, num2tex(mean_diff_rec), num2tex(std_diff_rec), num2tex(mean_err_rec), num2tex(std_err_rec), \n num2tex(mean_diff_pred), num2tex(std_diff_pred),num2tex(mean_err_pred), num2tex(std_err_pred)) \n else:\n raise RuntimeError(\"'{:s}' is not a valid tab-format\".format(data.tab_format))\n print(myline.replace('$nan$', ' / '))\n \n # add missing variable (i.e. Mc or m2)\n if data.regr_vars=='m1m2chi1chi2':\n Mc_inj = chirpMass(data.inj[:,0], data.inj[:,1]) # (m1, m2)\n Mc_rec = chirpMass(data.rec[:,0], data.rec[:,1])\n Mc_pred = chirpMass(data.pred[:,0],data.pred[:,1])\n diffs_rec = Mc_inj-Mc_rec\n diffs_pred = Mc_inj-Mc_pred\n err_rec = (Mc_inj-Mc_rec)/Mc_inj\n err_pred = (Mc_inj-Mc_pred)/Mc_inj\n var_name = 'Mc'\n tex_name = '${\\cal{M}}_c$'\n elif data.regr_vars=='m1Mcchi1chi2':\n m2_inj = findSecondMassFromMc(data.inj[:,1], data.inj[:,0]) # (Mc, m1)\n m2_rec = findSecondMassFromMc(data.rec[:,1], data.rec[:,0]) \n m2_pred = findSecondMassFromMc(data.pred[:,1],data.pred[:,0])\n diffs_rec = m2_inj-m2_rec\n diffs_pred = m2_inj-m2_pred\n err_rec = (m2_inj-m2_rec)/m2_inj\n err_pred = (m2_inj-m2_pred)/m2_inj\n var_name = 'm2'\n tex_name = '$m_2$'\n \n mean_err_rec = np.mean(np.abs(err_rec))\n mean_err_pred = np.mean(np.abs(err_pred))\n mean_diff_rec = np.mean(np.abs(diffs_rec))\n mean_diff_pred = np.mean(np.abs(diffs_pred))\n std_err_rec = np.std(err_rec)\n std_err_pred = np.std(err_pred)\n std_diff_rec = np.std(diffs_rec)\n std_diff_pred = np.std(diffs_pred)\n\n if data.tab_format=='txt':\n line_format = '{:14s} {:15.3e} {:15.3e} {:15.3e} {:15.3e} {:15.3e} {:15.3e} {:15.3e} {:15.3e}'\n myline = line_format.format(var_name, mean_diff_rec, std_diff_rec, mean_err_rec, std_err_rec, \n mean_diff_pred, std_diff_pred, mean_err_pred, std_err_pred) \n print(dashes,myline,dashes,sep='\\n')\n elif data.tab_format=='tex':\n line_format = escapeLatex('{:14s} & {:s} & {:s} & {:s} & {:s} & {:s} & {:s} & {:s} & {:s} \\\\\\\\')\n myline = line_format.format(tex_name, num2tex(mean_diff_rec), num2tex(std_diff_rec), num2tex(mean_err_rec), num2tex(std_err_rec), \n num2tex(mean_diff_pred), num2tex(std_diff_pred),num2tex(mean_err_pred), num2tex(std_err_pred)) \n print('\\hline',myline,dashes,sep='\\n')\n\n\n print(' \\n+++ Warning +++: for spin-variables the std is computed on the difference ' +\n 'distribution [e.g. abs(inj-rec)],\\nwhile for mass-variables on the error distributions'+\n ' [e.g. abs(inj-rec)/inj]\\n')\n \n return\n\n###################################################\n# Main \n###################################################\nif __name__=='__main__':\n parser = argparse.ArgumentParser(prog='paper_plots', description='plots to use in the regression paper')\n parser.add_argument('--NN', dest='use_NN_data', action='store_true',\n help=\"use NN-data (path and filename hardcoded).\")\n parser.add_argument('--GPR', dest='use_GPR_data', action='store_true',\n help=\"use GPR-data (path and filename hardcoded).\")\n parser.add_argument('-p', '--plots', dest='plots2do', nargs='+',default=[],\n help='identifiers of the plots to do, e.g. >> -p rec_vs_pred')\n parser.add_argument('--vars', type=str, dest='regr_vars', default='m1m2chi1chi2', \n help=\"variables used in the regression. Can be 'm1m2chi1chi2' or 'm1Mchi1chi2'\")\n parser.add_argument('--dataset_path', type=str, dest='dataset_path', default=repo_path+'datasets/GstLAL/', \n help=\"path where there are the O2 injections (test_NS.csv)\")\n parser.add_argument('-s', '--save', dest='savepng', action='store_true', \n help=\"save plots in PNG format\")\n parser.add_argument('--plots_dir', type=str, dest='plots_dir', default=os.getcwd(),\n help=\"directory where to save plots (default is current dir)\")\n parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', \n help=\"Print stuff\")\n \n parser.add_argument('--histo_fmin', dest='histo_fmins', default=[-3,-3,-2,-2], nargs=4, type=int, \n help=\"fmin used in histograms, one float for each feature, e.g. --histo_fmin -1 -2 -3 -4 \")\n parser.add_argument('--histo_fmax', dest='histo_fmaxs', default=[2,2,2,2], nargs=4, type=int, \n help=\"fmax used in histograms, one float for each feature, e.g. --histo_fmax 1 2 3 4 \")\n parser.add_argument('--histo_nbins', dest='histo_nbins', default=[50,50,50,50], nargs=4, type=int, \n help=\"nbins used in histograms, one float for each feature, e.g. --histo_nbins 50 30 40 10 \")\n parser.add_argument('--histo_logs', dest='histo_logs', default=[0,0,0,0], nargs=4, type=int, \n help=\"if i-element is 1, use logscale in i-subplot, e.g. --histo_logs 1 1 0 0 \")\n \n parser.add_argument('--errortab', dest='errortab', action='store_true', \n help=\"print a table with mean errors/differences\")\n parser.add_argument('--tab_format', dest='tab_format', type=str, default='txt', \n help=\"format of printed tables, 'txt' (default) or 'tex'\")\n \n parser.add_argument('--parspace_colorful', dest='parspace_colorful', action='store_true', \n help='Use different colors in parspace plot fr BBH, BNS, BHNS')\n\n args = parser.parse_args()\n verbose = args.verbose\n \n # load injected and recovered: m1, m2, Mc, chi1, chi2\n rec_all_features = extract_data(args.dataset_path+'/complete_xtest.csv', skip_header=False, verbose=verbose)\n inj_all_features = extract_data(args.dataset_path+'/complete_ytest.csv', skip_header=False, verbose=verbose)\n if args.regr_vars=='m1Mcchi1chi2':\n idx_feature_to_remove = 1\n var_names = ['m1', 'Mc', 'chi1', 'chi2']\n var_names_tex = ['$m_1$', '${\\cal{M}}_c$', '$\\chi_1$', '$\\chi_2$']\n \n elif args.regr_vars=='m1m2chi1chi2':\n idx_feature_to_remove = 2\n var_names = ['m1', 'm2', 'chi1', 'chi2']\n var_names_tex = ['$m_1$', '$m_2$', '$\\chi_1$', '$\\chi_2$']\n\n rec = np.delete(rec_all_features, idx_feature_to_remove, axis=1) # remove m2 or Mc\n inj = np.delete(inj_all_features, idx_feature_to_remove, axis=1) # remove m2 or Mc\n\n # load prediction \n plots_prefix = args.regr_vars\n if args.use_NN_data:\n fname = repo_path+'algo/classy_NN/sklassy_prediction/complete_prediction_'+args.regr_vars+'.csv' \n plots_prefix += '_NN_'\n elif args.use_GPR_data:\n fname = repo_path+'algo/GPR/something.csv'\n plots_prefix += '_GPR_'\n else:\n raise RuntimeError('Invalid input. Use --NN or --GPR')\n pred = extract_data(fname, verbose=verbose)\n\n dashes = '-'*50\n if verbose:\n print(dashes)\n print('Shape of injected matrix:', np.shape(inj))\n print('Shape of recovered matrix:', np.shape(rec))\n print('Shape of predicted matrix:', np.shape(pred))\n print(dashes)\n\n data = lambda:0\n data.regr_vars = args.regr_vars\n data.inj = inj\n data.rec = rec\n data.pred = pred\n data.var_names = var_names\n data.var_names_tex = var_names_tex\n #data.var_idx = var_idx\n data.savepng = args.savepng\n data.plots_dir = args.plots_dir\n data.plots_prefix = plots_prefix\n data.verbose = verbose\n data.histo_fmins = args.histo_fmins\n data.histo_fmaxs = args.histo_fmaxs\n data.histo_nbins = args.histo_nbins\n data.histo_logs = args.histo_logs\n data.errortab = args.errortab\n data.tab_format = args.tab_format\n data.parspace_colorful = args.parspace_colorful\n\n data.stats = {}\n for i in range(NFEATURES):\n data.stats['diffs_rec'] = inj-rec\n data.stats['diffs_pred'] = inj-pred\n with np.errstate(divide='ignore'):\n data.stats['errors_rec'] = (inj-rec )/inj \n data.stats['errors_pred'] = (inj-pred)/inj \n \n if args.errortab:\n print_errortab(data)\n\n for plot_id in args.plots2do:\n if plot_id=='rec_vs_pred':\n plot_recovered_vs_predicted(data)\n elif plot_id=='parspace':\n plot_parspace(data)\n elif plot_id=='histo':\n plot_histograms(data)\n else:\n print('Unknown plot: '+plot_id)\n\n","repo_name":"PhoenixBirdCreations/IPAM2021_ML","sub_path":"papers/regression/scripts/paper_plots.py","file_name":"paper_plots.py","file_ext":"py","file_size_in_byte":18205,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"71189630664","text":"import torch\nfrom torch.optim.optimizer import Optimizer\n\n\"\"\"Code copied from https://github.com/borbysh/coolmomentum/blob/master/optimizers/coolmom_pytorch.py\"\"\"\n\n\nclass Coolmomentum(Optimizer):\n \"\"\"\n lr (float): learning rate\n momentum (float, optional): initial momentum constant (0 for SGD)\n weight_decay (float, optional): weight decay (L2 penalty)\n beta: cooling rate, close to 1, if beta=1 then no cooling\n \"\"\"\n\n def __init__(self, params, lr=0.01, momentum=0,\n weight_decay=0.0, beta=1.0, dropout=0.0):\n\n defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay, beta=beta, dropout=dropout)\n super(Coolmomentum, self).__init__(params, defaults)\n self.T = 0.0\n self.number = 0\n self.iteration = 0\n\n def __setstate__(self, state):\n super(Coolmomentum, self).__setstate__(state)\n\n @torch.no_grad()\n def step(self):\n\n for group in self.param_groups:\n\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n beta = group['beta']\n beta_power = beta ** self.iteration\n\n rho_0 = momentum\n rho = 1 - (1 - rho_0) / beta_power\n rho = max(rho, 0)\n lrn = group['lr'] * (1 + rho) / 2 # lrn instead of lr\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Sparse gradients are not supported')\n state = self.state[p]\n # mask\n # m = torch.ones_like(p.data) * group['dropout']\n # mask = torch.bernoulli(m)\n # State initialization\n if len(state) == 0:\n state['step'] = torch.zeros_like(p.data)\n\n step = state['step']\n\n if weight_decay != 0:\n grad.add_(p.data, alpha=weight_decay)\n\n # Update the step\n step.mul_(rho).add_(grad, alpha=-lrn)\n\n self.T += torch.sum(step * step) # squared step\n self.number += torch.sum(step * 0 + 1) # of trainable params\n p.data.add_(step)\n\n if self.iteration % 391 == 0:\n # self.T = self.T / self.number # compute temperature\n self.T = 0.0\n self.number = 0\n self.iteration += 1\n\n return None\n","repo_name":"ifeherva/optimizer-benchmark","sub_path":"optimizers/coolmomentum.py","file_name":"coolmomentum.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"81"} +{"seq_id":"27916025938","text":"# CS194-26 (CS294-26): Project 2\n\nimport numpy as np\nimport skimage as sk\nimport skimage.io as skio\nimport scipy\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\n\ndef gaussian_kernel(size=4, sigma=1.5):\n # algorithm taken from https://subsurfwiki.org/wiki/Gaussian_filter\n size = size // 2\n x, y = np.mgrid[-size:size+1, -size:size+1]\n normal = 1 / (2.0 * np.pi * sigma**2)\n g = np.exp(-((x**2 + y**2) / (2.0*sigma**2))) * normal\n return g\n\n# kernel definitions\n# gauss_kern = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]], np.float32) / 16.\ngauss_kern = gaussian_kernel(6)\n\nunit_impulse = signal.unit_impulse((7, 7), 'mid')\nD_x = np.array([[-1, 1], [0, 0]], np.float32)\nD_y = np.array([[1, 0], [-1, 0]], np.float32)\n\ndef sharpen(img, alpha=2.5):\n # read in the image\n im = skio.imread(img)\n\n # convert to double (might want to do this later on to save memory) \n im = sk.img_as_float(im)\n \n # build an output image with same dimensions as input\n im_out = np.ndarray((len(im), len(im[0]), len(im[0][0])))\n\n # calculate sharpening kernel based on alpha\n sharp_kernel = (1 + alpha) * unit_impulse - alpha * gauss_kern\n\n # apply sharpening kernel to each channel and then add sharpened channel to output image\n for i in range(3):\n current_channel = im[:,:,i]\n sharp = signal.convolve2d(current_channel, sharp_kernel, mode='same', boundary='symm')\n im_out[:,:,i] = normalize(sharp)\n\n # fname = 'output/exsharp_' + img[7:len(img) - 4] + '.jpg'\n # skio.imsave(fname, im_out)\n skio.imshow(normalize(im + im_out))\n skio.show()\n return im_out\n\ndef blur(img, sigma=1.5):\n # read in the image\n im = skio.imread(img)\n\n # convert to double (might want to do this later on to save memory) \n im = sk.img_as_float(im)\n\n # build an output image with same dimensions as input\n im_out = np.ndarray((len(im), len(im[0]), len(im[0][0])))\n\n # build a gaussian kernal from input paramaters\n gauss_kern = gaussian_kernel(6, sigma)\n\n for i in range(3):\n current_channel = im[:,:,i]\n im_out[:,:,i] = signal.convolve2d(current_channel, gauss_kern, mode='same', boundary='symm')\n \n # fname = 'output/exblur_' + img[5:len(img) - 4] + '.jpg'\n # skio.imsave(fname, im_out)\n\n return normalize(im_out)\n\ndef normalize(img):\n return (img - np.amin(img)) / (np.amax(img) - np.amin(img))\n\n\ndef get_edges(img):\n # read in the image\n im = skio.imread(img)\n\n # convert to double (might want to do this later on to save memory) \n im = sk.img_as_float(im)\n\n # just grab the R channel from the image\n im_out = im[:,:,0]\n\n\n # gaussian blurring\n im_out = signal.convolve2d(im_out, gauss_kern, mode='same', boundary='symm')\n # im_out = signal.convolve2d(im_out, D_x, mode=\"same\", boundary=\"symm\")\n # im_out = signal.convolve2d(im_out, D_y, mode=\"same\", boundary=\"symm\")\n\n # Dx and Dy Kernel application\n im_out_x = signal.convolve2d(im_out, D_x, mode=\"same\", boundary=\"symm\")\n im_out_y = signal.convolve2d(im_out, D_y, mode=\"same\", boundary=\"symm\")\n im_out = np.sqrt(im_out_x * im_out_x + im_out_y * im_out_y)\n im_out = im_out / np.max(np.absolute(im_out))\n\n # combine convolution kernels\n # convx = signal.convolve2d(gauss_kern, D_x)\n # convtotal = signal.convolve2d(convx, D_y)\n # im_out = signal.convolve2d(im_out, convtotal)\n # im_out = im_out / np.max(np.absolute(im_out))\n # im_out = (im_out + 1) / 2.\n\n # make black and white at threshold\n for i in range(len(im_out)):\n for j in range(len(im_out[0])):\n if im_out[i][j] > 0.3:\n im_out[i][j] = 1\n else:\n im_out[i][j] = 0\n\n\n # # save the image\n # fname = 'docs/images/testx.jpg'\n # skio.imsave(fname, im_out_x)\n # fname = 'docs/images/testy.jpg'\n # skio.imsave(fname, im_out_y)\n\n # # display the image\n skio.imshow(normalize(im_out))\n skio.show()\n\n\n# name of the input file\nimname = 'cameraman.png'\n# imname = 'facade.jpg'\n# imname = 'taj.jpg'\n# imname = 'mandelbrot.jpg'\n# imname = 'nutmeg.jpg'\n\n# function to run (choose one)\nget_edges('data/' + imname)\n# sharpen('data/' + imname)\n","repo_name":"glenn2763/CS-194-26-Proj2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"28518160918","text":"from flask import Flask, render_template, request, flash, redirect\nimport sys\nimport requests\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__, template_folder=\"templates\")\napp.secret_key = b'_5#y2Q8z\\n\\xec]/'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///weather.db'\ndb = SQLAlchemy(app)\n\nAPI_KEY = \"your_api_token\"\nBASE_URL = 'http://api.openweathermap.org/data/2.5/weather'\n\n\nclass City(db.Model):\n id = db.Column(db.Integer, primary_key=True, unique=True)\n name = db.Column(db.String(80), unique=True, nullable=False)\n\n def __repr__(self):\n return self.name\n\n\n@app.route(\"/\")\ndef index():\n db.create_all()\n cities = City.query.all()\n weather_info = get_weather_info(cities)\n for city in cities:\n for city_weather in weather_info:\n if city.name.upper() == city_weather['city']:\n city_weather['id'] = city.id\n return render_template('index.html', weather=weather_info)\n\n\n@app.route('/add', methods=['POST'])\ndef add_city():\n if request.method == 'POST':\n city_name = request.form['city_name']\n response = requests.post(BASE_URL, params={\"q\": city_name, \"appid\": API_KEY})\n response_json = response.json()\n if response_json['cod'] != '404':\n city_exist = City.query.filter_by(name=city_name).first()\n if city_exist is None:\n print(\"Adding city: \", city_name)\n city = City(name=city_name)\n db.session.add(city)\n db.session.commit()\n return redirect('/')\n else:\n flash(\"The city has already been added to the list!\")\n else:\n flash(\"The city doesn't exist!\")\n return redirect('/')\n\n\n@app.route('/delete/', methods=['GET', 'POST'])\ndef delete(city_id):\n if request.method == 'POST':\n city = City.query.filter_by(id=city_id).first()\n db.session.delete(city)\n db.session.commit()\n return redirect('/')\n\n\ndef get_weather_info(cities):\n weather_cards = []\n city_names = [x.name for x in cities]\n for city in city_names:\n response = requests.post(BASE_URL, params={\"q\": city, \"appid\": API_KEY, 'units':'metric'})\n response_json = response.json()\n weather_info = {\"temp\": int(response_json['main']['temp']), \"city\": response_json['name'].upper(),\n \"state\": response_json['weather'][0]['main']}\n weather_cards.append(weather_info)\n return weather_cards\n\n\n# don't change the following way to run flask:\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n arg_host, arg_port = sys.argv[1].split(':')\n app.run(host=arg_host, port=arg_port)\n else:\n app.run()\n","repo_name":"maticortesr/weather-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"30631174752","text":"import numpy as np\n\ndef create_heatmap_array(df):\n #to create a waffle chart we need to do a hack-y workaround on the heatmap function\n #create a 10 x 10 array where each value in the array will represent a different brand, and the number of values for each brand will be equivalent to the market share %\n brand_squares = [int(perc * 100) for perc in df[\"MarketShareReformat\"]]\n brand_array = [np.ones(squares) * (i+1) for i, squares in enumerate(brand_squares)]\n\n data = np.concatenate(brand_array)\n data = data.reshape(10, 10)\n data = data.transpose() # Transpose the array\n\n return data\n","repo_name":"tomgprice411/MM_2023_Week7_Dashboard_v2","sub_path":"apps/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"24448556671","text":"import torch\nimport torch.nn as nn\nfrom Datasets.ShoeEdgeDataset import ShoeEdgeDataset\nfrom Model.Generator import Generator\nimport torchvision.transforms as transforms\nfrom torch.utils.data.dataloader import DataLoader\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom Datasets.BWToColorData import BWToColorDS\n\n\ndef plot_sample(im_tensor, gt_tensor, sktech_tensor):\n import torchvision.transforms.functional as F\n de_norm = transforms.Normalize([-1, -1, -1], [2, 2, 2])\n \n \n im = F.to_pil_image(de_norm(im_tensor.squeeze(0)))\n gt_im = F.to_pil_image(de_norm(gt_tensor.squeeze(0)))\n sktech_im = F.to_pil_image(de_norm(sktech_tensor.squeeze(0)))\n \n \n plt.subplot(1,3,1)\n plt.imshow(sktech_im)\n plt.axis('off')\n plt.subplot(1,3,2)\n plt.imshow(gt_im)\n plt.axis('off')\n plt.subplot(1,3,3)\n plt.imshow(im)\n plt.axis('off')\n\n\n plt.show()\n \n \n \n \ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\ndef _init_gen(state):\n gen = Generator(config=2, in_channels=3, out_im_ch=3)\n \n gen.load_state_dict(torch.load(state))\n gen.eval()\n \n return gen.to(device)\n\n\ngen = _init_gen(r'checkpoints/gen_g_color_to_gs_latest.pt')\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Resize((256, 256)),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\ntrans_bw = transforms.Compose([transforms.ToTensor(),\n transforms.Resize((256, 256)),\n transforms.Normalize((0.5), (0.5))])\n\ndsColorToGray = BWToColorDS(r'BWToColorData/bw', r'BWToColorData/color', trans_color=transform, trans_bw=trans_bw, max_items=200)\n\n\nloader = DataLoader(dsColorToGray, batch_size=1)\n\n\nimage_dim = 256 * 256 * 3\n\navg_pixel_acc = 0\nfor batchIDX, (domainA, domainB) in tqdm(enumerate(loader)):\n domainA, domainB = domainA.to(device).squeeze(0), domainB.to(device)\n \n im_tensor = torch.stack([domainA, domainA, domainA], dim=1)\n gen_image = gen(domainA)\n #plot_sample(gen_image, domainB, domainA)\n gen_image = (255 * gen_image).int()\n \n \n domainB = (255 * domainB).int()\n # calculate pixel accuracy\n correct_pixel = (torch.abs(gen_image- domainB) < 10).count_nonzero()\n \n avg_pixel_acc += (correct_pixel / image_dim)\n\n\navg_pixel_acc /= len(dsColorToGray)\n\n\nprint(f\"per pixel accuracy: {avg_pixel_acc:.4f}\")","repo_name":"Harutjun/DeepLearning","sub_path":"eval_performance.py","file_name":"eval_performance.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"31979494293","text":"import h5py\nimport bbi\nimport negspy.coordinates as nc\nimport numpy as np\nimport math\nimport argparse\nimport json\nfrom tqdm import tqdm\nimport resource\n\nfrom utils import metadata_json_to_row_info, get_manifest_to_outfile_parser\n\ndef bigwigs_to_multivec(\n input_bigwig_files,\n input_metadata_files,\n output_file,\n starting_resolution\n):\n\n f = h5py.File(output_file, 'w')\n\n num_samples = len(input_bigwig_files)\n\n # Zip the input to create (bw, metadata) tuples\n zipped_input = zip(input_bigwig_files, input_metadata_files)\n\n # Create level zero groups\n info_group = f.create_group(\"info\")\n resolutions_group = f.create_group(\"resolutions\")\n chroms_group = f.create_group(\"chroms\")\n\n # Set info attributes\n info_group.attrs['tile-size'] = 256\n\n # Prepare to fill in chroms dataset\n chromosomes = nc.get_chromorder('hg38')\n chromosomes = chromosomes[:25] # TODO: should more than chr1-chrM be used?\n num_chromosomes = len(chromosomes)\n chroms_length_arr = np.array([ nc.get_chrominfo('hg38').chrom_lengths[x] for x in chromosomes ], dtype=\"i8\")\n chroms_name_arr = np.array(chromosomes, dtype=\"S23\")\n\n chromosomes_set = set(chromosomes)\n chrom_name_to_length = dict(zip(chromosomes, chroms_length_arr))\n\n # Fill in chroms dataset entries \"length\" and \"name\"\n chroms_group.create_dataset(\"length\", data=chroms_length_arr)\n chroms_group.create_dataset(\"name\", data=chroms_name_arr)\n\n \n # Prepare to fill in resolutions dataset\n resolutions = [ starting_resolution*(2**x) for x in range(16)]\n \n # Create each resolution group.\n for resolution in resolutions:\n resolution_group = resolutions_group.create_group(str(resolution))\n # TODO: remove the unnecessary \"values\" layer\n resolution_values_group = resolution_group.create_group(\"values\")\n \n # Create each chromosome dataset.\n for chr_name, chr_len in zip(chromosomes, chroms_length_arr):\n chr_shape = (math.ceil(chr_len / resolution), num_samples)\n resolution_values_group.create_dataset(chr_name, chr_shape, dtype=\"f4\", fillvalue=np.nan, compression='gzip')\n \n # Fill in data for each bigwig file.\n for bw_index, bw_file in tqdm(list(enumerate(input_bigwig_files)), desc='bigwigs'):\n if bbi.is_bigwig(bw_file):\n chromsizes = bbi.chromsizes(bw_file)\n matching_chromosomes = set(chromsizes.keys()).intersection(chromosomes_set)\n\n # Fill in data for each resolution of a bigwig file.\n for resolution in resolutions:\n # Fill in data for each chromosome of a resolution of a bigwig file.\n for chr_name in matching_chromosomes:\n chr_len = chrom_name_to_length[chr_name]\n chr_shape = (math.ceil(chr_len / resolution), num_samples)\n arr = bbi.fetch(bw_file, chr_name, 0, chr_len, chr_shape[0], summary=\"sum\")\n resolutions_group[str(resolution)][\"values\"][chr_name][:,bw_index] = arr\n else:\n print(f\"{bw_file} not is_bigwig\")\n\n f.flush()\n\n f.close()\n\n max_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n print(max_mem)\n \n # Append metadata to the top resolution row_infos attribute.\n row_infos = []\n for metadata_index, metadata_file in enumerate(input_metadata_files):\n with open(metadata_file) as mf:\n try:\n metadata_json = json.load(mf)\n except Exception as e:\n print(f\"Error loading metadata file: {metadata_file}\")\n print(e)\n metadata_json = None\n row_info = metadata_json_to_row_info(metadata_json)\n row_infos.append(row_info)\n \n row_infos_encoded = str(json.dumps(row_infos))\n \n f = h5py.File(output_file, 'r+')\n\n info_group = f[\"info\"]\n info_group[\"row_infos\"] = row_infos_encoded\n \n f.close()\n\n\nif __name__ == \"__main__\":\n parser = get_manifest_to_outfile_parser()\n args = parser.parse_args()\n\n with open(args.input) as f:\n manifest_json = json.load(f)\n input_bigwig_files = manifest_json['input_bigwig_files']\n input_metadata_files = manifest_json['input_metadata_files']\n\n bigwigs_to_multivec(\n input_bigwig_files,\n input_metadata_files,\n args.output,\n args.starting_resolution\n )","repo_name":"hms-dbmi/cistrome-explorer","sub_path":"pipelines/cistrome-to-multivec/src/manifest_to_mv5.py","file_name":"manifest_to_mv5.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"81"} +{"seq_id":"5742996076","text":"\n\n#import machine, display, time,\nimport machine, _thread, math\n#import m5stack\nimport utime, machine #Cycle count required imports\nfrom micropython import const\nimport hardware_config\nimport gc\n\ntft, btn_a, btn_b, btn_c = hardware_config.M5stack()\n\nclass Relay(machine.Signal):\n \"\"\"\n Notes:\n Adds extra feature to the machine.Signal class\n\n Adds:\n toggle: Inverts the current state of the pin.\n \"\"\"\n def __init__(self, gpio_pin_number: int, inverted=False):\n super().__init__(gpio_pin_number, inverted)\n\n def toggle(self):\n self.value(not self.value())\n\nclass Button:\n def __init__(self, gpio_pin_number: int):\n self.gpio_pin = machine.Pin(gpio_pin_number, machine.Pin.IN, machine.Pin.PULL_UP)\n\ndef prettyTime(milliseconds: int, msPrecision: int=1, verbose: bool=False) -> str:\n \"\"\"\n Args:\n milliseconds: The value in milliseconds to on_off_time_calc\n msPrecision: The number of digits to show for the milliseconds portion of the output.\n Default = 1\n verbose: If verbose is True, it will output days, hours, minutes, seconds, milliseconds.\n If verbose is False, it will display only the minimum values needed.\n Default = False\n\n Returns: A string with the converted time in human readable on_off_time_calc with the precision specified.\n \"\"\"\n seconds, milliseconds = divmod(milliseconds, 1000)\n minutes, seconds = divmod(seconds, 60)\n hours, minutes = divmod(minutes, 60)\n days, hours = divmod(hours, 24)\n weeks, days = divmod(days, 7)\n years, weeks = divmod(weeks, 52)\n\n time=str(\"%1dy %1dw %1dd\" % (years, weeks, days))\n if verbose == False:\n if years == 0:\n time=str(\"%1dw %1dd %1dh\" % (weeks, days, hours))\n if weeks == 0:\n time=str(\"%1dd %1dh %02dm\" % (days, hours, minutes))\n if days == 0:\n time=str(\"%1dh %02dm %02ds\" % (hours, minutes, seconds))\n if hours == 0:\n time=str(\"%02dm %02ds\" % (minutes, seconds))\n if minutes == 0:\n time=str(\"%04.2fs\" % (seconds+(milliseconds/1000)))\n if seconds == 0:\n time=str(\"%1dms\" % (truncate(milliseconds, precision=msPrecision)))\n else:\n time=str(\"%1dy %1dw %1dd %1dh %02dm %02d.%3ds\" % (years, weeks, days, hours, minutes, seconds, milliseconds))\n return time\n\ndef truncate(original_number: float, precision: int=1) -> str:\n \"\"\"\n Args:\n original_number: The float that you want truncated (not rounded)\n precision: Int defining how many decimal places to truncate at.\n\n Returns: The string of the original float, but truncated to the specified number of decimal places.\n Default = 1\n\n Notes: This has to return a string due to the accuracy in the MCU causing numbers to have too many digits.\n \"\"\"\n precision=int(precision)\n if precision>0:\n temp = str(float(original_number)).split('.') # float to force a decimal point, string to split.\n temp[1] = temp[1]+('0'*precision) # make sure we have enough digits for the next step.\n truncated_number = temp[0]+'.'+temp[1][:precision]\n else:\n truncated_number = str(int(original_number))\n return truncated_number\n\ndef cycle(on_time_ms: int, off_time_ms: int, relay: Relay) -> None:\n \"\"\"\n Args:\n on_time_ms (int): The desired time in milliseconds for the ON period of the cycle.\n off_time_ms (int): The desired time in milliseconds for the OFF period of the cycle.\n relay (Relay): The instance of the relay to be acted on.\n Returns:\n None\n \"\"\"\n relay.on()\n utime.sleep_ms(on_time_ms)\n relay.off()\n utime.sleep_ms(off_time_ms)\n gc.collect()\n\nclass UI:\n \"\"\"\n Args:\n title_size (int): The number of lines for the Title section. Default = 1\n\n nav_size (int): The number of lines for the Navigation section. Set to 0 for test UIs. Default = 0\n\n parameter_size (int): The number of lines for the Parameter section. Set to 0 for menu UIs. Default = 4\n\n status_size (int): The number of lines for the Status section. Set to 0 for menu UIs. Default = 3\n\n notification_size (int): The number of lines for the Notification section. Default = 1\n\n Note:\n This is the display for a specific test / menu. All calls to change the\n information on the screen need to go through the GUI2LCD instance.\n \"\"\"\n global tft\n\n def __init__(self):\n tft.clear()\n self.screenwidth, self.screenheight = tft.screensize()\n self.header = self.DisplaySection(0,0,40, self.screenwidth)\n self.prameters = self.DisplaySection(0,41,120, self.screenwidth, fill_color=tft.GREEN)\n# self.status = self.DisplaySection(0,121,60, self.screenwidth)\n\n class DisplaySection:\n def __init__(self,\n x:int,\n y:int,\n frame_height:int,\n frame_width:int,\n frame_color:int = tft.WHITE,\n fill_color:int = tft.BLUE,\n text_color:int = tft.WHITE,\n font = tft.FONT_DejaVu18,\n ):\n\n self.x = x\n self.y = y\n self.frame_width = frame_width\n self.frame_height = frame_height\n self.frame_color = frame_color\n self.fill_color = fill_color\n self.text_color = text_color\n self.font = font\n tft.font(self.font)\n self.frame_height = 40\n self.line_height, self.text_y = self.line_height_margin_calc(10)\n self.num_of_lines = int(self.frame_height / self.line_height)\n self.lines = {}\n\n tft.set_bg(self.fill_color)\n tft.set_fg(self.text_color)\n\n self.create_lines(self.num_of_lines)\n self.initialize_section()\n\n def initialize_section(self):\n tft.font(self.font)\n tft.rect(self.x, self.y, self.frame_width, self.frame_height, self.frame_color, self.fill_color)\n self.create_lines(self.num_of_lines)\n self.update_all_lines()\n\n\n def line_height_margin_calc(self, margin:int = 10) -> int:\n \"\"\"\n Args:\n margin_pct (int): the percentage of font size for vertical margins\n Returns:\n The total hight of the line in pixels\n The number of pixels used above the font used for margins, to set the vertical offset for text_y\n \"\"\"\n margin_pct = margin/100\n font_height = int(tft.fontSize()[1])\n margin_px = int(font_height)\n line_height_px = int(font_height * (1 + margin_pct))\n print(\"line_height_margin_calc: line_height_px = \" + str(line_height_px) + 'margin:' + str(margin/2))\n return line_height_px, int(margin_px/2)\n\n def create_lines(self, num_of_lines:int):\n \"\"\"\n Args:\n num_of_lines (int): initializes the dictionary of the text for each line number\n Returns:\n Nothing\n \"\"\"\n line_num = 1\n while line_num <= num_of_lines:\n self.lines[line_num] = str(line_num)\n line_num += 1\n print(self.lines.items())\n\n def update_line(self, line_number:int):\n \"\"\"\n Args:\n line_number (int): The line number to update on the desplay\n Returns:\n Nothing\n \"\"\"\n line_y = ((line_number - 1) * self.line_height)\n text_y = line_y + self.text_y\n tft.rect(self.x, line_y, self.frame_width, self.line_height, self.fill_color, self.fill_color)\n tft.text(self.x, text_y, self.lines.get(line_number, \"ERROR\"),\n self.text_color, transparent=True)\n\n def update_all_lines(self):\n \"\"\"\n Returns:\n Nothing\n Notes:\n Quick way to update all lines in the section\n \"\"\"\n line_num = 1\n while line_num <= self.num_of_lines:\n self.update_line(line_num)\n line_num += 1\n\n\n\n\n # def header(self):\n # FRAME_COLOR = tft.BLUE\n # FILL_COLOR = tft.BLUE\n # TEXT_COLOR = 0xffffff\n # HEIGHT = 40\n # LINE1_Y = 0\n # LINE2_Y = 20\n #\n # tft.rect(0, 0, 320, HEIGHT, FRAME_COLOR, FILL_COLOR)\n # tft.textClear(tft.CENTER,LINE1_Y, self.h1_text)\n # tft.text(tft.CENTER,LINE1_Y, self.h1_text, TEXT_COLOR, transparent=True)\n # tft.textClear(tft.CENTER,LINE2_Y, self.h2_text)\n # tft.text(tft.CENTER,LINE2_Y, self.h2_text, TEXT_COLOR, transparent=True)\n #\n # def test_param(self):\n # FRAME_COLOR = tft.BLUE\n # FILL_COLOR = tft.BLUE\n # TEXT_COLOR = 0xffffff\n # HEIGHT = 60\n # text_line_1 = ''\n # text_line_2 = ''\n #\n # tft.rect(0, 0, 320, 20, tft.RED, tft.RED)\n # #printLCD(TEST_NAME_1, Y=LCDTitle1, Background=BLACK)\n # #printLCD(TEST_NAME_2, Y=LCDTitle2, Background=BLACK)\n #\n # def test_status(self):\n # FRAME_COLOR = tft.BLUE\n # FILL_COLOR = tft.BLUE\n # TEXT_COLOR = 0xffffff\n # HEIGHT = 60\n # #text_line_1 = ''\n # #text_line_2 = ''\n #\n # tft.rect(0, 0, 320, 20, tft.RED, tft.RED)\n # printLCD(TEST_NAME_1, Y=LCDTitle1, Background=BLUE)\n # printLCD(TEST_NAME_2, Y=LCDTitle2, Background=BLUE)\n\n def footer(self):\n tft.rect( 25, 210, 80, 30, tft.RED, tft.BLUE)\n tft.text( 50, 215, \"UP\", tft.WHITE, transparent=True)\n\n tft.rect(120, 210, 80, 30, tft.RED, tft.BLUE)\n tft.text(120, 215, \"DOWN\", tft.WHITE, transparent=True)\n\n tft.rect(215, 210, 80, 30, tft.RED, tft.BLUE)\n tft.text(235, 215, \"SEL\", tft.WHITE, transparent=True)\n\n def printLCD(text, X=0, Y=0, bg_color=0x000000, text_color=0xffffff, transparent=True):\n text = str(text)\n tft.textClear(X, Y, text)\n tft.text(X, Y, text, text_color, transparent=True)\n","repo_name":"Tamagotono/CycleTester","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36596222935","text":"class Recipe:\n def __init__(self, score, next):\n self.score = score\n self.next = next\n\n\ndef main():\n needed_recipes = 0\n for line in open(\"input.txt\"):\n needed_recipes = int(line)\n\n start_recipe = Recipe(3, None)\n last_recipe = Recipe(7, start_recipe)\n start_recipe.next = last_recipe\n recipe_1elf = start_recipe\n recipe_2elf = last_recipe\n size = 2\n steps = 20216200\n for _ in range(steps):\n new_score = recipe_1elf.score + recipe_2elf.score\n if new_score >= 10:\n new_last = Recipe(new_score - 10, start_recipe)\n before_last = Recipe(1, new_last)\n last_recipe.next = before_last\n last_recipe = new_last\n size += 2\n else:\n new_last = Recipe(new_score, start_recipe)\n last_recipe.next = new_last\n last_recipe = new_last\n size += 1\n for _ in range(recipe_1elf.score + 1):\n recipe_1elf = recipe_1elf.next\n for _ in range(recipe_2elf.score + 1):\n recipe_2elf = recipe_2elf.next\n\n current_recipe = start_recipe\n array_input = [int(i) for i in str(needed_recipes)]\n validation = 0\n current_recipe_id = 1\n start_test = current_recipe\n while True:\n if current_recipe.score == array_input[validation]:\n if validation == 0:\n start_test = current_recipe\n start_test_recipe_id = current_recipe_id\n validation += 1\n if validation == len(array_input):\n print(current_recipe_id - len(array_input))\n exit(1)\n current_recipe = current_recipe.next\n current_recipe_id += 1\n else:\n if validation > 0:\n validation = 0\n current_recipe = start_test.next\n current_recipe_id = start_test_recipe_id + 1\n else:\n current_recipe = current_recipe.next\n current_recipe_id += 1\n if current_recipe == start_recipe:\n exit(2)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Thleruth/advent-of-code-2018","sub_path":"sample/d14/c2.py","file_name":"c2.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"25573391108","text":"from tkinter import ttk\nfrom typing import Callable\n\nfrom pygments import lexers\n\nfrom src.Components.tktext import EnhancedText, EnhancedTextFrame, TextOpts\nfrom src.Components.winframe import WinFrame\nfrom src.tktypes import Tk_Widget\nfrom src.Utils.images import get_image\nfrom src.window import get_window\n\n\nclass CodeInputDialog(WinFrame):\n def __init__(\n self, master: Tk_Widget, title: str, onsave: Callable\n ) -> None:\n if master is None:\n master = get_window()\n super().__init__(master, title, closable=False, icon=get_image(\"question\"))\n\n self.save = self.add_destroy_action(onsave)\n self.textframe = EnhancedTextFrame(self)\n self.textframe.pack(fill=\"both\", expand=1)\n\n self.text: EnhancedText = self.textframe.text\n self.text.lexer = lexers.get_lexer_by_name(\"Python\")\n TextOpts(self, bindkey=True).set_text(self.text)\n self.text.focus_set()\n\n self.create_button_box()\n\n def create_button_box(self) -> None:\n button_frame = ttk.Frame(self)\n okbtn = ttk.Button(button_frame, text=\"OK\", command=self.save)\n okbtn.pack(side=\"left\")\n cancelbtn = ttk.Button(button_frame, text=\"Cancel\", command=self.destroy)\n cancelbtn.pack(side=\"left\")\n button_frame.pack(fill=\"x\")\n\n def insert(self, pos: str, text: str) -> None:\n self.text.insert(pos, text)\n\n def get(self, pos1: str, pos2: str) -> str:\n return self.text.get(pos1, pos2)\n\n def add_destroy_action(self, function: Callable) -> callable:\n def new():\n function()\n self.destroy()\n\n return new\n","repo_name":"NWSOFT-ORG/NWEdit","sub_path":"src/Components/codeinputdialog.py","file_name":"codeinputdialog.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"14039975672","text":"# Maximum product subset of an array\n# Given an array a, we have to find maximum product possible\n# with the subset of elements present in the array.\n# The maximum product can be single element also.\n\ndef maximumProductSubset(arr, n):\n if n == 1:\n return arr[0]\n \n prod = 1\n\n count_neg = 0\n count_zero = 0\n max_neg = float('-inf')\n\n\n for i in range(n):\n # count zeros\n if arr[i] == 0:\n count_zero += 1\n continue\n\n if arr[i] < 0:\n count_neg += 1\n max_neg = max(max_neg, arr[i])\n \n prod *= arr[i]\n\n # if all nos are 0s return 0\n if count_zero == n:\n return 0\n \n # if there are odd no of neg nos\n # reutrn the product without the biggest neg no\n if count_neg % 2 == 1:\n return int(prod/max_neg)\n \n return prod\n\nif __name__ == \"__main__\":\n a = [ -1, -1, -2, 4, 3 ]\n print(maximumProductSubset(a, len(a)))\n","repo_name":"mukeshmk/pygrams","sub_path":"examples/maximumProductSubset.py","file_name":"maximumProductSubset.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5327738173","text":"# Import the necessary libraries\nimport requests # Library for making HTTP requests\nfrom bs4 import BeautifulSoup # Library for parsing HTML\nimport pandas as pd # Library for data manipulation and analysis\n\n# Step 1: Function for extracting\ndef extract(page):\n # Define headers for the HTTP request\n headers = {'User-Agent': 'My user agent'}\n\n # Make an HTTP request to fetch the web page\n url = f'https://www.bayut.com/for-sale/property/uae/page-{page}'\n\n r = requests.get(url, headers) # Send a GET request to the URL\n soup = BeautifulSoup(r.content, 'html.parser') # Create a BeautifulSoup object to parse the HTML content\n return soup\n\n# Step 2: Function for transforming to dataframe.\ndef transform(soup):\n \n divs = soup.find_all('div', class_='d6e81fd0') # Find all div elements with the specified class name\n \n # Extract relevant attributes from the HTML\n for item in divs:\n title = item.find('h2').text.strip() # Extract the text of the h2 element\n location = item.find('div', class_='_7afabd84').text.strip() # Extract the text of the div element with the specified class\n typ = item.find('div', class_='_9a4e3964').text.strip() # Extract the text of the div element with the specified class\n bed_bath_area = []\n for items2 in item.find_all('span', class_='b6a29bc0'):\n try:\n bed_bath_area.append(items2.text) # Extract the text of the span element\n except:\n bed_bath_area.append('') # Append an empty string if the extraction fails\n price = item.find('span', class_='f343d9ce').text.strip() # Extract the text of the span element with the specified class\n\n # Create a dictionary with the extracted attributes\n apt = {\n 'title': title,\n 'location': location,\n 'type': typ,\n 'bed': bed_bath_area[0],\n 'bath': bed_bath_area[1],\n 'area': bed_bath_area[2],\n 'price': price\n }\n\n # Step 4: Append the dictionary to the aptlist\n aptlist.append(apt)\n # No return statement is needed as the `aptlist` is a global list variable\n\n# Step 3: Create an empty list to store the dictionaries\naptlist = []\n\n# Step 4: Iterate through the pages to scrape multiple pages\nfor i in range(2500):\n print(f'Getting page {i}')\n try:\n c = extract(i)\n transform(c)\n except:\n continue\n\n# Step 5: Create a DataFrame from the aptlist\ndf = pd.DataFrame(aptlist)\n\n# Step 6: Data cleaning and preprocessing\n\n# Clean the 'area' attribute by removing commas and converting it to a numerical value\ndf['area'] = df['area'].str.replace(',', '').str.extract(r'(\\d+)').astype(float) \n\ndf['Neighbourhood'] = ''\ndf['District'] = ''\ndf['State'] = ''\n\n# Extract location information from the 'location' column\nfor index, row in df.iterrows():\n strings = row['location'].split(',') # Split the location string by comma\n if len(strings) >= 3:\n df.at[index, 'Neighbourhood'] = strings[-3].strip() # Assign the last but two string to the 'Neighbourhood' column\n df.at[index, 'District'] = strings[-2].strip() # Assign the last but one string to the 'District' column\n df.at[index, 'State'] = strings[-1].strip() # Assign the last string to the 'State' column\n\n\ndf.to_csv('csv.csv') # Save the DataFrame to a CSV file\n","repo_name":"midhunrajds/Real-Estate-Insights","sub_path":"webscrape.py","file_name":"webscrape.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"19687684561","text":"from sklearn.cluster import DBSCAN\nfrom sklearn.cluster import OPTICS, cluster_optics_dbscan\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.manifold import TSNE\nimport matplotlib.cm as cm\nfrom data import filter_df, read_df, generate_returns\n\ndef apply_OPTICS(X, df_returns, min_samples, max_eps=2, xi=0.05, cluster_method='xi'):\n \"\"\"\n\n :param X:\n :param df_returns:\n :param min_samples:\n :param max_eps:\n :param xi:\n :param eps:\n :return:\n \"\"\"\n clf = OPTICS(min_samples=min_samples, max_eps=max_eps, xi=xi, metric='euclidean', cluster_method=cluster_method)\n print(clf)\n\n clf.fit(X)\n labels = clf.labels_\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n print(\"Clusters discovered: %d\" % n_clusters_)\n\n clustered_series_all = pd.Series(index=df_returns.columns, data=labels.flatten())\n clustered_series = clustered_series_all[clustered_series_all != -1]\n\n counts = clustered_series.value_counts()\n print(\"Pairs to evaluate: %d\" % (counts * (counts - 1) / 2).sum())\n\n return clustered_series_all, clustered_series, counts, clf\n\ndef apply_DBSCAN(eps, min_samples, X, df_returns):\n \"\"\"\n This function applies a DBSCAN clustering algo\n\n :param eps: min distance for a sample to be within the cluster\n :param min_samples: min_samples to consider a cluster\n :param X: data\n\n :return: clustered_series_all: series with all tickers and labels\n :return: clustered_series: series with tickers belonging to a cluster\n :return: counts: counts of each cluster\n :return: clf object\n \"\"\"\n clf = DBSCAN(eps=eps, min_samples=min_samples, metric='euclidean')\n #print(clf)\n clf.fit(X)\n\n \n labels = clf.labels_\n \n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n print(\"Clusters discovered: %d\" % n_clusters_)\n\n clustered_series_all = pd.Series(index=df_returns.columns, data=labels.flatten())\n clustered_series = clustered_series_all[clustered_series_all != -1]\n\n counts = clustered_series.value_counts()\n print(\"Pairs to evaluate: %d\" % (counts * (counts - 1) / 2).sum())\n\n return clustered_series_all, clustered_series, counts, clf\n\n\ndef plot_TSNE(X, clf, clustered_series_all):\n \"\"\"\n This function makes use of t-sne to visualize clusters in 2d.\n \"\"\"\n \n X_tsne = TSNE(learning_rate=1000, perplexity=25, random_state=1337).fit_transform(X)\n \n # visualization\n fig = plt.figure(1, facecolor='white', figsize=(8,8), frameon=True, edgecolor='black')\n plt.clf()\n \n # axis in the middle\n ax = fig.add_subplot(1, 1, 1, alpha=0.9)\n # Move left y-axis and bottim x-axis to centre, passing through (0,0)\n ax.spines['left'].set_position('center')\n ax.spines['left'].set_alpha(0.3)\n ax.spines['bottom'].set_position('center')\n ax.spines['bottom'].set_alpha(0.3)\n # Eliminate upper and right axes\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n # Show ticks in the left and lower axes only\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.tick_params(which='major', labelsize=18)\n #plt.axis('off')\n\n # etfs in cluster\n labels = clf.labels_\n x = X_tsne[(labels!=-1), 0]\n y = X_tsne[(labels!=-1), 1]\n tickers = list(clustered_series_all[clustered_series_all != -1].index)\n plt.scatter(\n x,\n y,\n s=50,\n alpha=0.75,\n c=labels[labels!=-1],\n cmap=cm.Paired\n )\n for i, ticker in enumerate(tickers):\n plt.annotate(ticker, (x[i], y[i]), size=5)\n\n # remaining etfs, not clustered\n x = X_tsne[(clustered_series_all==-1).values, 0]\n y = X_tsne[(clustered_series_all==-1).values, 1]\n tickers = list(clustered_series_all[clustered_series_all == -1].index)\n\n # WARNING: elimintate outliers\n #outliers = ['DTO','SCO']\n \"\"\"outliers = ['DZZ', 'XME']\n to_remove_x = [x[clustered_series_all[clustered_series_all==-1].index.get_loc(outliers[0])],\n x[clustered_series_all[clustered_series_all==-1].index.get_loc(outliers[1])]]\n to_remove_y = [y[clustered_series_all[clustered_series_all==-1].index.get_loc(outliers[0])],\n y[clustered_series_all[clustered_series_all==-1].index.get_loc(outliers[1])]]\n x = np.array([i for i in x if i not in to_remove_x])\n y= np.array([i for i in y if i not in to_remove_y])\"\"\"\n\n #plt.scatter(x,y,s=50,alpha=0.20,c='black')\n #for i, ticker in enumerate(tickers):\n # plt.annotate(ticker, (x[i], y[i]), size=5)#, arrowprops={'arrowstyle':'simple'})\n \n plt.title('OPTICS clusters visualized with t-SNE', size=16);\n plt.xlabel('t-SNE Dim. 1', position=(0.92,0), size=20)\n plt.ylabel('t-SNE Dim. 2', position=(0,0.92), size=20)\n ax.set_xticks(range(-50, 51, 300))\n ax.set_yticks(range(-50, 51, 300))\n plt.show()\n #plt.savefig('DBSCAN_2014_2018_eps0_15.png', bbox_inches='tight', pad_inches=0.01)\n #plt.savefig('../data/OPTICS_2013_2017.png', bbox_inches='tight', pad_inches=0.1)\n \n\n\ndef plot_cluster(cluster_elements, stock_info):\n number_of_elements = len(cluster_elements)\n for s in cluster_elements:\n close_values = filter_df(\"2016-01-01\", \"2021-01-01\", read_df(f\"../data/nse/NSE_1920/{s}.csv\", column_names=['Date','Close','Name']))['Close'].values\n plt.plot(np.log(close_values))\n\n\n plt.show()\n","repo_name":"ChetanTayal138/Trading-Toolkit","sub_path":"backtester/utils/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"29115394718","text":"import math\n\nimport torch\n\nfrom ..rasterizer.project import project_points\nfrom ...utils.pytorch3d import get_ndc_positive_bounds, warp_images, create_ndc_intrinsic_matrix\n\n__all__ = ['calculate_center_ndc_and_pix_size', 'extract_regions']\n\n\n@torch.no_grad()\ndef calculate_center_ndc_and_pix_size(\n ndc_points: torch.Tensor, # (b, n, 3)\n mask: torch.Tensor, # (b, n)\n h: int,\n w: int,\n margin: int = 30\n) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n ndc_points = ndc_points[:, :, :2] # we do not need depth here\n scale_x, scale_y = get_ndc_positive_bounds((h, w))\n\n left_border = masked_max_2d(ndc_points[:, :, 0], mask, -scale_x, scale_x) # (b,)\n right_border = masked_min_2d(ndc_points[:, :, 0], mask, -scale_x, scale_x) # (b,)\n bottom_border = masked_min_2d(ndc_points[:, :, 1], mask, -scale_y, scale_y) # (b,)\n top_border = masked_max_2d(ndc_points[:, :, 1], mask, -scale_y, scale_y) # (b,)\n\n h_ndc = top_border - bottom_border # (b,)\n w_ndc = left_border - right_border # (b,)\n\n h_pix = (h_ndc / (2.0 * scale_y)) * h # (b,)\n w_pix = (w_ndc / (2.0 * scale_x)) * w # (b,)\n\n h_pix = (h_pix + margin + 0.5).long().clamp(1, h).type(w_pix.dtype) # (b,)\n w_pix = (w_pix + margin + 0.5).long().clamp(1, w).type(w_pix.dtype) # (b,)\n\n center_ndc = torch.stack([(left_border + right_border) / 2, (top_border + bottom_border) / 2], dim=1) # (b, 2)\n\n return center_ndc, h_pix, w_pix\n\n\n@torch.no_grad()\ndef extract_regions(points, images, R_row, T, fcl_ndc, prp_ndc, original_interest_mask,\n max_size=None, avoid_scaling_down=True):\n \"\"\"\n Note: if there is no interesting point inside an image, the original view is returned\n Args:\n points: torch.Tensor\n of shape [batch_size, points_n, 3]\n images: torch.Tensor\n of shape [batch_size, channels_n, H, W]\n R_row: torch.Tensor\n of shape [batch_size, 3, 3]\n T: torch.Tensor\n of shape [batch_size, 3]\n fcl_ndc: torch.Tensor\n of shape [batch_size, 2]\n prp_ndc: torch.Tensor\n of shape [batch_size, 2]\n interest_mask: torch.Tensor\n of shape [batch_size, points]\n max_size: Optional[Union[int, Tuple[int, int]]\n avoid_scaling_down: bool\n Returns:\n (image_crops [b, *target_size], new_fcl_ndc [b, 2], new_prp_ndc [b, 2])\n \"\"\"\n assert max_size is None or isinstance(max_size, int) or (\n (isinstance(max_size, tuple) or isinstance(max_size, list)) and len(\n max_size) == 2), f\"{max_size}, {type(max_size)}\"\n b, _, h, w = images.shape\n\n # compute center_ndc, h_pix, w_pix\n ndc_points, mask = project_points(points, R_row, T, fcl_ndc, prp_ndc, (h, w)) # (b, n, 3), (b, n)\n mask = torch.logical_and(mask, original_interest_mask) # (b, n)\n center_ndc, h_pix, w_pix = calculate_center_ndc_and_pix_size(ndc_points, mask, h, w) # (b, 2), (b, 2), (b, 2)\n # del ndc_points, mask\n center_ndc = torch.cat([center_ndc, center_ndc.new_ones(1, 1).expand(b, 1)], dim=1) # (b, 3)\n if avoid_scaling_down and max_size is not None:\n clamp_size = (max_size, max_size) if isinstance(max_size, int) else max_size # Tuple[int, int]\n h_pix, w_pix = h_pix.clamp(max=clamp_size[0]), w_pix.clamp(max=clamp_size[1]) # (b, 2), (b, 2)\n\n # compute K_src_inv\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n K_src_inv = torch.inverse(create_ndc_intrinsic_matrix(fcl_ndc, prp_ndc)) # (b, 3, 3)\n\n # compute center_world\n center_world = torch.bmm(K_src_inv, center_ndc[:, :, None])[:, :, 0] # (b, 3)\n\n # compute max_world\n scale_x, scale_y = get_ndc_positive_bounds((h, w)) # float, float\n left_border = center_ndc[:, 0] + (w_pix / w) * scale_x # (b,)\n top_border = center_ndc[:, 1] + (h_pix / h) * scale_y # (b,)\n max_ndc = torch.stack([left_border, top_border], dim=1) # (b, 2)\n max_ndc = torch.cat([max_ndc, max_ndc.new_ones(b, 1)], dim=1) # (b, 3)\n max_world = torch.bmm(K_src_inv, max_ndc[:, :, None])[:, :, 0] # (b, 3)\n # del scale_x, scale_y, left_border, top_border, max_ndc\n\n # compute h_pix_max, w_pix_max\n if isinstance(max_size, tuple) or isinstance(max_size, list):\n assert max_size[0] >= 2 and max_size[1] >= 2\n h_pix_max, w_pix_max = max_size # int, int\n else:\n h_pix_max, w_pix_max = int(h_pix.max().item()), int(w_pix.max().item()) # int, int\n if isinstance(max_size, int):\n assert max_size >= 2\n a = w_pix_max / h_pix_max # float\n h_pix_max, w_pix_max = (max_size, math.ceil(max_size * a)) if a < 1 else (math.ceil(max_size / a), max_size)\n\n # compute new_max_ndc\n s = torch.stack([w_pix_max / w_pix, h_pix_max / h_pix], dim=1).min(dim=1).values # (b,)\n s = s.clamp(0.0, 1.0) # (b,) avoid upsampling the region of interest to match crop size\n h_pix, w_pix = s * h_pix, s * w_pix # (b,), (b,)\n new_scale_x, new_scale_y = get_ndc_positive_bounds((h_pix_max, w_pix_max)) # float, float\n new_left_border = (w_pix / w_pix_max) * new_scale_x # (b,)\n new_top_border = (h_pix / h_pix_max) * new_scale_y # (b,)\n new_max_ndc = torch.stack([new_left_border, new_top_border], dim=1) # (b, 2)\n # del s, h_pix, w_pix, new_left_border, new_top_border, new_scale_x, new_scale_y\n\n # compute new_fcl_ndc and new_prp_ndc\n # solve:\n # new_fcl_ndc * center_world + new_prp_ndc = 0\n # new_fcl_ndc * max_world + new_prp_ndc = new_max_ndc\n new_fcl_ndc = new_max_ndc / (max_world[:, :2] - center_world[:, :2] + 1e-8) # (b, 2)\n new_prp_ndc = - new_fcl_ndc * center_world[:, :2] # (b, 2)\n\n # crops: (b, 3, h_pix_max, w_pix_max)\n crops = warp_images(images, fcl_ndc, prp_ndc, (h, w), new_fcl_ndc, new_prp_ndc, (h_pix_max, w_pix_max))\n\n return crops, new_fcl_ndc, new_prp_ndc, center_ndc\n\n\ndef masked_min_2d(tensor: torch.Tensor, mask: torch.Tensor, lower: float, upper: float) -> torch.Tensor:\n return torch.where(\n torch.any(mask, dim=1),\n torch.min(tensor.where(mask, tensor.new_tensor(upper).view(1, 1)), dim=1).values,\n tensor.new_tensor(lower),\n )\n\n\ndef masked_max_2d(tensor: torch.Tensor, mask: torch.Tensor, lower: float, upper: float) -> torch.Tensor:\n return torch.where(\n torch.any(mask, dim=1),\n torch.max(tensor.where(mask, tensor.new_tensor(lower).view(1, 1)), dim=1).values,\n tensor.new_tensor(upper),\n )\n","repo_name":"rakhimovv/npbgpp","sub_path":"npbgplusplus/modeling/feature_extraction/cropping.py","file_name":"cropping.py","file_ext":"py","file_size_in_byte":6496,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"81"} +{"seq_id":"12843224178","text":"import os\nimport sys\nimport time\nimport random\nimport pygame\nimport pygame.freetype\nfrom pygame.locals import *\n \n \n# Class for the orange dude\nclass Player(object):\n \n def __init__(self):\n self.rect = pygame.Rect(32, 32, 16, 16)\n \n def move(self, dx, dy):\n \n # Move each axis separtely. Note that this checks for collisions both times.\n if dx != 0:\n self.move_single_axis(dx, 0)\n if dy != 0:\n self.move_single_axis(0, dy)\n \n def move_single_axis(self, dx, dy):\n \n # Move the rect\n self.rect.x += dx\n self.rect.y += dy\n \n # If you collide with a wall, move out based on velocity\n for wall in walls:\n if self.rect.colliderect(wall.rect):\n if dx > 0: # Moving right; Hit the left side of the wall\n self.rect.right = wall.rect.left\n if dx < 0: # Moving left; Hit the right side of the wall\n self.rect.left = wall.rect.right\n if dy > 0: # Moving down; Hit the top side of the wall\n self.rect.bottom = wall.rect.top\n if dy < 0: # Moving up; Hit the bottom side of the wall\n self.rect.top = wall.rect.bottom\n \n # If you collide with a box\n for box in boxes:\n if self.rect.colliderect(box.rect):\n game_state.exer1()\n boxes.remove(box)\n \n \n# Nice class to hold a wall rect\nclass Wall(object):\n \n def __init__(self, pos):\n walls.append(self)\n self.rect = pygame.Rect(pos[0], pos[1], 16, 16)\n \n \n\nclass Box(object):\n \n def __init__(self, pos):\n boxes.append(self)\n self.rect = pygame.Rect(pos[0], pos[1], 16, 16)\n\nclass GameState():\n \n def __init__(self):\n self.state = \"intro\"\n \n def intro(self):\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n running = False\n if e.type == pygame.MOUSEBUTTONDOWN:\n self.state = 'main_game'\n \n \n # Just added this to make it slightly fun ;)\n '''\n if player.rect.colliderect(end_rect):\n pygame.quit()\n sys.exit()\n \n for box in boxes:\n if player.rect.colliderect(box):\n exer1()\n '''\n \n # Drawing\n screen.fill((0, 0, 0))\n screen.blit(intro_text, (0, 0))\n # gfxdraw.filled_circle(screen, 255, 200, 5, (0,128,0))\n pygame.display.flip()\n \n def main_game(self):\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n running = False\n \n # Move the player if an arrow key is pressed\n key = pygame.key.get_pressed()\n if key[pygame.K_LEFT]:\n player.move(-2, 0)\n if key[pygame.K_RIGHT]:\n player.move(2, 0)\n if key[pygame.K_UP]:\n player.move(0, -2)\n if key[pygame.K_DOWN]:\n player.move(0, 2)\n \n \n # Just added this to make it slightly fun ;)\n \n if player.rect.colliderect(end_rect):\n pygame.quit()\n sys.exit()\n \n \n # Drawing\n screen.fill((0, 0, 0))\n for wall in walls:\n pygame.draw.rect(screen, (255, 255, 255), wall.rect)\n for box in boxes:\n pygame.draw.rect(screen, (0, 128, 0), box.rect)\n pygame.draw.rect(screen, (255, 0, 0), end_rect)\n pygame.draw.rect(screen, (255, 200, 0), player.rect)\n # gfxdraw.filled_circle(screen, 255, 200, 5, (0,128,0))\n pygame.display.flip()\n \n def exer1(self):\n font = pygame.font.Font(None, 32)\n input_box = pygame.Rect(490, 240, 240, 32)\n input_box_color = (255, 255, 255)\n input_background_color = (0, 0, 0)\n LIGHT_BLUE = (0, 128, 255)\n GRAY = (128, 128, 128)\n user_answer = ''\n # Define the variables for the user's answer and input box status\n input_box_active = False\n exer1_running = True\n while exer1_running:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n # If the user clicks on the input box, activate it\n if input_box.collidepoint(event.pos):\n input_box_active = not input_box_active\n else:\n input_box_active = False\n # Change the color of the input box depending on its status\n input_box_color = LIGHT_BLUE if input_box_active else GRAY\n if event.type == pygame.KEYDOWN:\n # If the input box is active, allow the user to enter text\n if input_box_active:\n if event.key == pygame.K_RETURN:\n # When the user presses Enter, print their answer and reset the input box\n print(user_answer)\n if user_answer.lower() == answer:\n game_state.right_answer()\n countdown_time = 3\n for i in range(countdown_time, 0, -1):\n time.sleep(1)\n elif user_answer.lower() != answer:\n game_state.wrong_answer()\n countdown_time = 3\n for i in range(countdown_time, 0, -1):\n time.sleep(1)\n del questions[0:2]\n exer1_running = False\n elif event.key == pygame.K_BACKSPACE:\n # Allow the user to delete characters with the Backspace key\n user_answer = user_answer[:-1]\n pygame.draw.rect(screen, input_background_color, input_box)\n pygame.draw.rect(screen, input_box_color, input_box, 2)\n user_answer_surface = font.render(user_answer, True, (255, 255, 255))\n screen.blit(user_answer_surface, (input_box.x+5, input_box.y+5))\n pygame.display.update()\n else:\n # Add the character to the user's answer\n user_answer += event.unicode\n \n question_text = questions[0]\n answer = questions[1]\n \n # answer = my_list[0][1]\n # Draw the screen\n question_surface = font.render(question_text, True, (255, 0, 0))\n screen.blit(question_surface, (460, 70))\n # (screen_width//4+10, 70)\n pygame.draw.rect(screen, input_background_color, input_box)\n pygame.draw.rect(screen, input_box_color, input_box, 2)\n user_answer_surface = font.render(user_answer, True, (255, 255, 255))\n screen.blit(user_answer_surface, (input_box.x+5, input_box.y+5))\n pygame.display.flip()\n \n if not exer1_running:\n game_state.main_game()\n \n def state_manager(self):\n if self.state == \"intro\":\n self.intro()\n elif self.state == \"main_game\":\n self.main_game()\n elif self.state == \"exer\":\n self.exer1()\n \n def right_answer(self):\n font = pygame.font.Font(None, 32)\n answer_surface = font.render('Õige!', True, (255, 255, 255))\n screen.blit(answer_surface, (580, 150))\n pygame.display.flip()\n \n def wrong_answer(self):\n font = pygame.font.Font(None, 32)\n answ = 'Vale. Õige vastus on ' + questions[1]\n answer_surface = font.render(answ, True, (255, 255, 255))\n screen.blit(answer_surface, (500, 150))\n pygame.display.flip()\n \n \n# Initialise pygame\nos.environ[\"SDL_VIDEO_CENTERED\"] = \"1\"\npygame.init()\n\n#Set up the introduction window\nintro_screen = pygame.display.set_mode((320, 240))\nintro_text = pygame.image.load(\"intro.png\")\n# Set up the display\nscreen_width = 1180\nscreen_height = 490\nscreen = pygame.display.set_mode((screen_width, screen_height))\npygame.display.set_caption(\"Labyrinth Game\")\ngame_state = GameState()\nLIGHT_BLUE = (0, 128, 255)\nGRAY = (128, 128, 128)\n\nclock = pygame.time.Clock()\nwalls = [] # List to hold the walls\nboxes = [] # List to hold the boxes\nplayer = Player() # Create the player\n \n# Holds the level layout in a list of strings.\nlevel = [\n \"WWWWWWWWWWWWWWWWWWWW\",\n \"W Y Y W\",\n \"W WW WWWWWW WWW W\",\n \"W W W Y W\",\n \"W W YW WWWWW W\",\n \"W WWWWWWWWWYW W W\",\n \"W W W Y W Y W\",\n \"WW W Y W W W WW\",\n \"WWYWW WWWWW W W\",\n \"W W Y WWW W\",\n \"W WW W WWWWYWW W\",\n \"W W WY W Y W\",\n \"W WW WW W WWW W\",\n \"W W E W W\",\n \"WWWWWWWWWWWWWWWWWWWW\",\n]\n\n\n\nquestions = ['Mitu teemakooli on Vocos?', '7', 'Mida võiks tähendada märk \"#\"?', 'kommentaar',\n 'Mitut eriala on ligikaudu Vocos võimalik omandada?', '80', 'Mitu korrust on kopli õppehoones? (nr)',\n '4', 'Väljasta koodina: Hello world! (kasuta print())',\n 'print(\"hello world!\")', 'Mis on VOCO täisnimi??', 'tartu rakenduslik kolledž', 'Näita koodina x ja y summa väljastamine',\n 'print(x + y)', 'Mis kool asub voco kõrval??', 'variku kool', 'Mis oli Voco eelmine nimi?',\n 'tartu kutsehariduskeskus', 'Mida tähistab int?', 'täisarvu', 'Mitu korpust on Vocos?', '3',\n 'Mida tähistab float', 'ujukomaarvu', 'Mitu aastat õpitakse tarkvaraarenduse eriala?', '4']\n\n# Parse the level string above. W = wall, E = exit, Y = exercise\nx = y = 0\nfor row in level:\n for col in row:\n if col == \"W\":\n Wall((x, y))\n elif col == \"E\":\n end_rect = pygame.Rect(x, y, 16, 16)\n elif col == \"Y\":\n box_rect = Box((x, y))\n x += 16\n y += 16\n x = 0\n\nprint(boxes)\n\nrunning = True\nwhile running:\n \n clock.tick(60)\n game_state.state_manager()\n clock.tick(360)\n \npygame.quit()","repo_name":"ne-mona/Voco-game","sub_path":"finale_game.py","file_name":"finale_game.py","file_ext":"py","file_size_in_byte":10563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13157480906","text":"def detect_coinstalled_clients():\n \"\"\"\n Co-installation detection. The lookup client has nearly identical code in its version of\n databricks/feature_store/__init__.py.\n \"\"\"\n\n # Detect if core client installed\n try:\n from databricks._feature_store_pkg_metadata import _core_client_pkg_metadata\n\n core_client_installed = True\n except ImportError:\n core_client_installed = False\n except Exception as e:\n print(\n f\"Internal Warning: unexpected exception trying to import core client pkg_metadata: {e}\"\n )\n core_client_installed = False\n\n # Detect if lookup client installed\n try:\n from databricks._feature_store_pkg_metadata import _lookup_client_pkg_metadata\n\n lookup_client_installed = True\n except ImportError:\n lookup_client_installed = False\n except Exception as e:\n print(\n f\"Internal Warning: unexpected exception trying to import lookup client pkg_metadata: {e}\"\n )\n lookup_client_installed = False\n\n # If neither client is installed, log a warning because we should never get into this situation.\n if not core_client_installed and not lookup_client_installed:\n print(\"Internal Warning: no feature store clients detected\")\n\n # If both clients are installed, throw an exception because the earlier installed client will be in a broken\n # state due to having some of its files clobbered by the later installed client. Note the order\n # of which clients are mentioned is different between clients to indicate which __init__.py emitted this error.\n if core_client_installed and lookup_client_installed:\n raise Exception(\n \"The Databricks Feature Store client and Databricks Lookup client cannot be installed in the \"\n \"same python environment. Use pip to uninstall both clients, then pip install the client \"\n \"you intend to use.\"\n )\n\n\ndetect_coinstalled_clients()\n\nfrom databricks.feature_store.client import FeatureStoreClient\nfrom databricks.feature_store.decorators import feature_table\nfrom databricks.feature_store.entities.feature_lookup import FeatureLookup\nfrom databricks.feature_store.utils.logging_utils import (\n _configure_feature_store_loggers,\n)\n\n_configure_feature_store_loggers(root_module_name=__name__)\n\n__all__ = [\"FeatureStoreClient\", \"feature_table\", \"FeatureLookup\"]\n","repo_name":"xuechendi/databricks_codes","sub_path":"codes_databricks/databricks/feature_store/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"70406549385","text":"from string import ascii_uppercase\n\n\ndef excel_column_count(column):\n alphabet_list = list(ascii_uppercase)\n alphabet_lookup = {item:index+1 for index, item in enumerate(alphabet_list)}\n column_number = []\n column_list = list(reversed([x for x in column]))\n \n for index, char in enumerate(column_list):\n number = (26 ** index) * alphabet_lookup[char]\n column_number.append(number)\n \n return sum(column_number)\n\nprint(excel_column_count(\"AZ\"))\n","repo_name":"YazzyYaz/codinginterviews","sub_path":"practice_problems/strings_arrays/excel_column_count.py","file_name":"excel_column_count.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"69806995467","text":"from flask import request\r\nfrom flask_restful import Resource, abort, url_for\r\nfrom marshmallow import ValidationError\r\nfrom sqlalchemy.sql import func\r\nfrom api.tracks.parsers import CoordinatesSchema, CoordinatesQuerySchema,\\\r\n SensorsQuerySchema\r\nfrom api.tracks.fields import coordinates_schema, sensors_schema,\\\r\n tracks_info_schema, track_info_schema\r\nfrom api.tracks.models import Coordinate, Sensor, Track\r\nfrom api.utils import make_response, make_empty\r\nfrom extensions import db\r\nfrom sqlalchemy import exc\r\nfrom datetime import datetime\r\n\r\n\r\n# todo: Add Resource\r\nclass Tracks(Resource):\r\n @staticmethod\r\n def get():\r\n \"\"\"Получить список маршрутов\"\"\"\r\n\r\n tracks = db.session.query(Track.uuid.label(\"uuid\"),\r\n func.max(func.ifnull(Coordinate.datetime_at,\r\n None))\r\n .label(\"datetime_max_coords\"),\r\n func.min(func.ifnull(Coordinate.datetime_at,\r\n None))\r\n .label(\"datetime_min_coords\"),\r\n func.max(func.ifnull(Sensor.datetime_at,\r\n None))\r\n .label(\"datetime_max_sens\"),\r\n func.min(func.ifnull(Sensor.datetime_at,\r\n None))\r\n .label(\"datetime_min_sens\"))\\\r\n .outerjoin(Coordinate, Track.uuid == Coordinate.track_uuid)\\\r\n .outerjoin(Sensor, Track.uuid == Sensor.track_uuid)\\\r\n .group_by(Track.uuid)\\\r\n .all()\r\n\r\n return make_response(200, tracks=tracks_info_schema.dump(tracks))\r\n\r\n @staticmethod\r\n def post():\r\n \"\"\"Создать новый маршрут\"\"\"\r\n\r\n track = Track()\r\n db.session.add(track)\r\n db.session.commit()\r\n location_url = url_for(\"api.track_info\", track_uuid=track.uuid)\r\n resp = make_response(201, location=location_url)\r\n resp.headers[\"Location\"] = location_url\r\n return resp\r\n\r\n\r\nclass TrackInfo(Resource):\r\n @staticmethod\r\n def get(track_uuid):\r\n \"\"\"Получить информацию о маршруте\"\"\"\r\n\r\n track_info = db.session.query(Track.uuid.label(\"uuid\"), func.max(\r\n func.ifnull(Coordinate.datetime_at, None))\r\n .label(\"datetime_max_coords\"), func.min(\r\n func.ifnull(Coordinate.datetime_at, None))\r\n .label(\"datetime_min_coords\"), func.max(\r\n func.ifnull(Sensor.datetime_at, None))\r\n .label(\"datetime_max_sens\"), func.min(\r\n func.ifnull(Sensor.datetime_at, None))\r\n .label(\"datetime_min_sens\"))\\\r\n .outerjoin(Coordinate, Track.uuid == Coordinate.track_uuid) \\\r\n .outerjoin(Sensor, Track.uuid == Sensor.track_uuid) \\\r\n .filter(Track.uuid.like(str(track_uuid)))\\\r\n .one_or_none()\r\n\r\n if track_info is None or track_info.uuid is None:\r\n abort(404, message=\"Track with uuid={} not found\"\r\n .format(track_uuid))\r\n\r\n return make_response(200, **track_info_schema.dump(track_info))\r\n\r\n\r\nclass Coordinates(Resource):\r\n @staticmethod\r\n def post(track_uuid):\r\n \"\"\"Добавить координаты в маршрут\"\"\"\r\n\r\n if db.session.query(Track).filter(Track.uuid.like(str(track_uuid)))\\\r\n .one_or_none() is None:\r\n abort(404, message=\"Track with uuid={} not found\"\r\n .format(track_uuid))\r\n\r\n try:\r\n args = CoordinatesSchema().load(request.json)\r\n except ValidationError as error:\r\n return make_response(400, message=\"Bad JSON format\")\r\n\r\n for coord_data in args[\"coordinates\"]:\r\n coordinate = Coordinate(track_uuid=str(track_uuid), **coord_data)\r\n try:\r\n db.session.add(coordinate)\r\n except exc.SQLAlchemyError:\r\n db.session.rollback()\r\n return make_response(500, message=\"Database add error\")\r\n\r\n try:\r\n db.session.commit()\r\n except exc.SQLAlchemyError:\r\n db.session.rollback()\r\n return make_response(500, message=\"Database commit error\")\r\n\r\n return make_empty(201)\r\n\r\n @staticmethod\r\n def get(track_uuid):\r\n \"\"\"Получить список координат маршрута\"\"\"\r\n\r\n # todo: Убрать или добавить проверку в основной запрос\r\n if db.session.query(Track).filter(Track.uuid.like(str(track_uuid)))\\\r\n .one_or_none() is None:\r\n abort(404, message=\"Track with uuid={} not found\"\r\n .format(track_uuid))\r\n\r\n query = CoordinatesQuerySchema().load(request.args)\r\n coords = db.session\\\r\n .query(Coordinate)\\\r\n .filter(\r\n Coordinate.track_uuid.like(str(track_uuid)),\r\n Coordinate.datetime_at.between(query[\"begin_datetime\"],\r\n query[\"end_datetime\"])\r\n )\\\r\n .order_by(Coordinate.datetime_at)\\\r\n .all()\r\n\r\n if coords is None:\r\n abort(404, message=\"Track with uuid={} not found\"\r\n .format(track_uuid))\r\n\r\n return make_response(200, coordinates=coordinates_schema.dump(coords))\r\n\r\n\r\nclass Sensors(Resource):\r\n @staticmethod\r\n def post(track_uuid):\r\n \"\"\"Добавить показания датчиков\"\"\"\r\n\r\n if db.session.query(Track).filter(Track.uuid.like(str(track_uuid)))\\\r\n .one_or_none() is None:\r\n abort(404, message=\"Track with uuid={} not found\"\r\n .format(track_uuid))\r\n\r\n # todo: Upgrade parsing->Update 400 error\r\n args = request.json\r\n if len(args) == 0:\r\n return make_response(400, message=\"Bad JSON format\")\r\n for sensors_data in args[\"sensors\"]:\r\n sensor = Sensor(\r\n track_uuid=str(track_uuid),\r\n datetime_at=datetime.strptime(sensors_data[\"datetime\"],\r\n \"%Y-%m-%d %H:%M:%S.%f\"),\r\n accelerometerX=sensors_data[\"accelerometer\"][0],\r\n accelerometerY=sensors_data[\"accelerometer\"][1],\r\n accelerometerZ=sensors_data[\"accelerometer\"][2],\r\n gyroscopeX=sensors_data[\"gyroscope\"][0],\r\n gyroscopeY=sensors_data[\"gyroscope\"][1],\r\n gyroscopeZ=sensors_data[\"gyroscope\"][2],\r\n magnetometerX=sensors_data[\"magnetometer\"][0],\r\n magnetometerY=sensors_data[\"magnetometer\"][1],\r\n magnetometerZ=sensors_data[\"magnetometer\"][2],\r\n )\r\n try:\r\n db.session.add(sensor)\r\n except exc.SQLAlchemyError:\r\n db.session.rollback()\r\n return make_response(500, message=\"Database add error\")\r\n\r\n try:\r\n db.session.commit()\r\n except exc.SQLAlchemyError:\r\n db.session.rollback()\r\n return make_response(500, message=\"Database commit error\")\r\n\r\n return make_empty(201)\r\n\r\n @staticmethod\r\n def get(track_uuid):\r\n \"\"\"Получить список показаний датчиков\"\"\"\r\n\r\n if db.session.query(Track).filter(Track.uuid.like(str(track_uuid)))\\\r\n .one_or_none() is None:\r\n abort(404, message=\"Track with uuid={} not found\"\r\n .format(track_uuid))\r\n\r\n query = SensorsQuerySchema().load(request.args)\r\n sens = db.session.query(Sensor.datetime_at.label(\"dt\"),\r\n Sensor.accelerometerX.label(\"a_x\"),\r\n Sensor.accelerometerY.label(\"a_y\"),\r\n Sensor.accelerometerZ.label(\"a_z\"),\r\n Sensor.gyroscopeX.label(\"g_x\"),\r\n Sensor.gyroscopeY.label(\"g_y\"),\r\n Sensor.gyroscopeZ.label(\"g_z\"),\r\n Sensor.magnetometerX.label(\"m_x\"),\r\n Sensor.magnetometerY.label(\"m_y\"),\r\n Sensor.magnetometerZ.label(\"m_z\"),)\\\r\n .filter(\r\n Sensor.track_uuid.like(str(track_uuid)),\r\n Sensor.datetime_at.between(query[\"begin_datetime\"],\r\n query[\"end_datetime\"])) \\\r\n .order_by(Sensor.datetime_at)\\\r\n .all()\r\n\r\n return make_response(200, sensors=sensors_schema.dump(sens))\r\n","repo_name":"tursunovJr/karting-api","sub_path":"api/tracks/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":8956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2875601509","text":"import get_names\n\nname_file = 'Alle navne, der er godkendt som både drenge- og pigenavn per 2020-03-02.csv'\n\ndef take_a_number_return_names(num):\n name_list = get_names.read_linewise(name_file)\n names = list(next(name_list) for n in range(num))\n return names\n\nif __name__ == '__main__':\n print(take_a_number_return_names(5))","repo_name":"Rasm-P/Python","sub_path":"Week-10/06-1 Iterators and Generators/test_my_module.py","file_name":"test_my_module.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"no","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"17237164322","text":"def two_list_dictionary(keys, values):\n \"\"\"Given keys and values, make dictionary of those.\n \n >>> two_list_dictionary(['x', 'y', 'z'], [9, 8, 7])\n {'x': 9, 'y': 8, 'z': 7}\n \n If there are fewer values than keys, remaining keys should have value\n of None:\n \n >>> two_list_dictionary(['a', 'b', 'c', 'd'], [1, 2, 3])\n {'a': 1, 'b': 2, 'c': 3, 'd': None}\n \n If there are fewer keys, ignore remaining values:\n\n >>> two_list_dictionary(['a', 'b', 'c'], [1, 2, 3, 4])\n {'a': 1, 'b': 2, 'c': 3}\n \"\"\"\n new_dict = {}\n difference = len(keys) - len(values)\n if difference >= 0:\n for num in range(difference):\n values.append(None)\n for key in keys:\n new_dict[key] = values.pop(0)\n elif difference < 0:\n for key in keys:\n new_dict[key] = values.pop(0)\n return new_dict","repo_name":"Fredeggs/python-ds-practice","sub_path":"two_list_dictionary.py","file_name":"two_list_dictionary.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"22906498117","text":"import re, os\nimport codecs\nimport logging\nfrom polyglot_tokenizer import Tokenizer\n\nlogger = logging.getLogger(__name__)\n\ndef load_data(text_type, filename, lang, tokenize_text=False, split_sent=True):\n data_tuple = []\n with codecs.open(filename, 'r', encoding='utf-8') as fp:\n logger.info('Loading text_type: %s format' % (text_type))\n if text_type == \"ssf\":\n start_c = -1\n for line in fp:\n line = line.strip()\n ds = line.split()\n #print(\"Line\", line)\n #print(\"DS\", ds)\n if line == \"\":\n continue\n elif line[0:2] == \" 2:\n if ds[2]:\n #print \"--\",line,\"--\"\n word, tag = ds[1], ds[2]\n if start_c == -1:\n sent.append((word, tag, \"\"))\n if start_c == 1:\n sent.append((word, tag, \"B-%s\" % (chunk_tag)))\n start_c = 0\n if start_c == 0:\n sent.append((word, tag, \"I-%s\" % (chunk_tag)))\n elif text_type == \"conll\":\n sent = []\n for line in fp:\n line = line.strip()\n ds = line.split()\n if line != \"\":\n print(line)\n if len(ds) == 2:\n word, tag, chunk = ds[1], \"\",\"\"\n if len(ds) == 3:\n word, tag, chunk = ds[1], ds[2], \"\"\n if len(ds) == 4:\n word, tag, chunk = ds[1], ds[2], ds[3]\n sent.append([word, tag, chunk])\n else:\n data_tuple.append(sent)\n sent = []\n elif text_type == \"txt\":\n if split_sent == True:\n text = fp.read()\n tok = Tokenizer(lang=lang, split_sen=split_sent)\n tokenized_sents = tok.tokenize(text)\n sent = []\n for tokens in tokenized_sents:\n for token in tokens:\n sent.append([token, \"\", \"\"])\n data_tuple.append(sent)\n else:\n for line in fp:\n sent = []\n if tokenize_text:\n tok = Tokenizer(lang=lang, split_sen=False)\n tokenized_sents = tok.tokenize(line)\n for tokens in tokenized_sents:\n for token in tokens:\n sent.append([token, \"\", \"\"])\n data_tuple.append(sent)\n else:\n print(\"Check - text_type\", text_type)\n\n return data_tuple\n","repo_name":"avineshpvs/indic_tagger","sub_path":"tagger/src/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"81"} +{"seq_id":"35137118427","text":"'''Deep Dreaming in Keras.\nAdapted from Keras example and original Google Caffe implementation\n\nRun the script with:\n```\npython deepdream_keras_caffeport.py path_to_your_base_image.jpg prefix_for_results number_of_iterations\n```\ne.g.:\n```\npython deepdream_keras_caffeport.py img/mypic.jpg results/dream 10\n```\n'''\n\n\n\nfrom __future__ import print_function\n\nfrom keras.preprocessing.image import load_img, save_img, img_to_array\nimport numpy as np\nimport scipy\nimport scipy.ndimage as nd\nimport argparse\nimport random\n\nfrom keras.applications import inception_v3\nfrom keras import backend as K\n\n\nparser = argparse.ArgumentParser(description='Deep Dreams with Keras.')\nparser.add_argument('base_image_path', metavar='base', type=str,\n help='Path to the image to transform.')\nparser.add_argument('result_prefix', metavar='res_prefix', type=str,\n help='Prefix for the saved results.')\nparser.add_argument('number_ite', metavar='num_it', type=int,\n help='Number of iterations for dream.')\n\nargs = parser.parse_args()\nbase_image_path = args.base_image_path\nresult_prefix = args.result_prefix\nnumber_ite = args.number_ite\n\n\ndef preprocess_image(image):\n # Util function to format pictures\n # into appropriate tensors.\n img = np.expand_dims(image, axis=0)\n img = inception_v3.preprocess_input(img)\n return img\n\n\ndef deprocess_image(x):\n # Util function to convert a tensor into a valid image.\n if K.image_data_format() == 'channels_first':\n x = x.reshape((3, x.shape[2], x.shape[3]))\n x = x.transpose((1, 2, 0))\n else:\n x = x.reshape((x.shape[1], x.shape[2], 3))\n x /= 2.\n x += 0.5\n x *= 255.\n x = np.clip(x, 0, 255).astype('uint8')\n return x\n\n\ndef eval_loss_and_grads(x):\n outs = fetch_loss_and_grads([x])\n loss_value = outs[0]\n grad_values = outs[1]\n return loss_value, grad_values\n\n\ndef resize_img(img, size):\n img = np.copy(img)\n if K.image_data_format() == 'channels_first':\n factors = (1, 1,\n float(size[0]) / img.shape[2],\n float(size[1]) / img.shape[3])\n else:\n factors = (1,\n float(size[0]) / img.shape[1],\n float(size[1]) / img.shape[2],\n 1)\n return scipy.ndimage.zoom(img, factors, order=1)\n\n\ndef gradient_ascent(x, iterations, step, max_loss=None, jitter=16):\n for i in range(iterations):\n ox, oy = np.random.randint(-jitter, jitter+1, 2)\n x = np.roll(np.roll(x, ox, -1), oy, -2) # apply jitter shift\n loss_value, grad_values = eval_loss_and_grads(x)\n if max_loss is not None and loss_value > max_loss:\n break\n print('..Loss value at', i, ':', loss_value)\n x += step * grad_values\n x = np.roll(np.roll(x, -ox, -1), -oy, -2) # unshift image\n return x\n\ndef deepdream(base_image_path, step, iterations, num_octave, octave_scale, max_loss, jitter):\n\n img = preprocess_image(base_image_path)\n if K.image_data_format() == 'channels_first':\n original_shape = img.shape[2:]\n else:\n original_shape = img.shape[1:3]\n successive_shapes = [original_shape]\n for i in range(1, num_octave):\n shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])\n successive_shapes.append(shape)\n successive_shapes = successive_shapes[::-1]\n original_img = np.copy(img)\n shrunk_original_img = resize_img(img, successive_shapes[0])\n\n for shape in successive_shapes:\n print('Processing image shape', shape)\n img = resize_img(img, shape)\n img = gradient_ascent(img,\n iterations=iterations,\n step=step,\n max_loss=max_loss)\n upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)\n same_size_original = resize_img(original_img, shape)\n lost_detail = same_size_original - upscaled_shrunk_original_img\n\n img += lost_detail\n shrunk_original_img = resize_img(original_img, shape)\n return deprocess_image(img)\n\n\n\n# end definitions\n\n# The learning phase flag is a bool tensor (0 = test, 1 = train)\nK.set_learning_phase(0)\n\n# Build the InceptionV3 network with our placeholder.\n# The model will be loaded with pre-trained ImageNet weights.\nmodel = inception_v3.InceptionV3(weights='imagenet',\n include_top=False)\ndream = model.input\nprint('Model loaded.')\n\n# Get the symbolic outputs of each \"key\" layer (we gave them unique names).\nlayer_dict = dict([(layer.name, layer) for layer in model.layers])\n\n\ntest_layers = []\nfor layer in model.layers:\n if 'mixed' in layer.name:\n test_layers.append(layer.name)\n\n\n# Playing with these hyperparameters will also allow you to achieve new effects\nstep = 0.01 # Gradient ascent step size\nnum_octave = 4 # Number of scales at which to run gradient ascent\noctave_scale = 1.4 # Size ratio between scales\niterations = 10 # Number of ascent steps per scale\nmax_loss = 10. # max amount of loss\njitter = 8 # random offset jitter\ncoefficient = 1.0 # activation amount\n\n\nfor layer in test_layers:\n img = load_img(base_image_path)\n print('Image loaded from file.')\n img = img_to_array(img)\n h, w = img.shape[:2]\n\n s = 0.05 # scale coefficient\n print('starting dream for layer ', layer, ' with ', number_ite, ' iteration(s)')\n\n # Define the loss.\n loss = K.variable(0.)\n # Add the L2 norm of the features of a layer to the loss.\n if layer not in layer_dict:\n raise ValueError('Layer ' + layer + ' not found in model.')\n coeff = coefficient\n x = layer_dict[layer].output\n # We avoid border artifacts by only involving non-border pixels in the loss.\n scaling = K.prod(K.cast(K.shape(x), 'float32'))\n if K.image_data_format() == 'channels_first':\n loss += coeff * K.sum(K.square(x[:, :, 2: -2, 2: -2])) / scaling\n else:\n loss += coeff * K.sum(K.square(x[:, 2: -2, 2: -2, :])) / scaling\n\n # Compute the gradients of the dream wrt the loss.\n grads = K.gradients(loss, dream)[0]\n # Normalize gradients.\n grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())\n\n # Set up function to retrieve the value\n # of the loss and gradients given an input image.\n outputs = [loss, grads]\n fetch_loss_and_grads = K.function([dream], outputs)\n\n\n for i in range(number_ite):\n print('starting iteration', i+1)\n out_img = deepdream(img, step, iterations, num_octave, octave_scale, max_loss, jitter)\n img = nd.affine_transform(out_img, [1-s,1-s,1], [h*s/2,w*s/2,0], order=1)\n save_img(result_prefix + '_' + layer + '_' + '{}ite.png'.format(i+1), np.copy(img))\n","repo_name":"koenigpeter/deepdream","sub_path":"deepdream_keras_caffeport.py","file_name":"deepdream_keras_caffeport.py","file_ext":"py","file_size_in_byte":6739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24759031091","text":"import copy\nfrom difflib import SequenceMatcher\nimport xml.etree.ElementTree as ET\nfrom ruamel import yaml\nimport csv\nimport glob\nimport os\nimport time\nimport re\nfrom pathlib import Path\nfrom collections import Counter\nimport toml\nfrom cli._ux import output_message\nfrom rich.live import Live\nfrom rich.panel import Panel\nfrom rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn\nfrom rich.table import Table\nfrom migration_tools._comparison_report import comparison_report, table_comparison_summary, table_issue_summary, \\\n table_issue_detail, table_issue_list, table_info_list, report_section_title, table_current_mappings, table_file_list, table_setting_list\nfrom migration_tools._error_handling import EInvalidDataTypes, EModelValidationError, EYAMLValidationError, \\\n EYAMLPreparationError, EDatabaseNotSupported, EInputFilesMissing\n\n# Requirements:\n# ==> python3 -m pip install ruamel.yaml\n# ==> python3 -m pip install toml\n# ==> python3 -m pip install \"typer[all]\"\n\n\n# ======================================================================================================================\n# CLASS MigrationUtils\n# ======================================================================================================================\n\nclass MigrationUtils:\n @staticmethod\n def fuzzy_strip(column):\n \"\"\"\n Returns the column value in lowercase with underscores replaced by spaces and special characters removed\n\n Args:\n column (str): The column value to be stripped\n\n Returns:\n _type_: The column value in lowercase, underscores, parentheses replaced by spaces and special characters removed\n \"\"\"\n return \" \".join((column.casefold().replace('(', ' ').replace(')', ' ').replace(\n '_', ' ').strip().encode(\"ascii\", errors=\"ignore\").decode()).split())\n\n# ======================================================================================================================\n# Notification Classes\n# ======================================================================================================================\n\n\nclass VAL_NOTIFICATION:\n def __init__(self, table=None, msg=None):\n self.severity = '0-INFO'\n self.table = \"\" if table is None else table\n self.msg = \"\" if msg is None else msg\n self.classification = \"Informational Message\"\n\n def set_table(self, table):\n self.table = table\n\n\nclass E_VAL_ERROR(VAL_NOTIFICATION):\n\n def __init__(self, table=None):\n super().__init__(table)\n self.severity = '1-SEVERE'\n self.msg = \"Validation Failed\"\n self.classification = \"Validation Error\"\n\n\nclass E_NO_SUITABLE_MATCH(E_VAL_ERROR):\n def __init__(self, table):\n super().__init__(table)\n self.msg = \"No suitable matching table could be found\"\n self.classification = \"Table Could Not Be Matched\"\n\n\nclass E_COLUMN_NOT_FOUND(E_VAL_ERROR):\n def __init__(self, table, col):\n super().__init__(table)\n self.msg = f\"Column {col} (or anything matching) has not been found.\"\n self.classification = \"Column Not Found\"\n\n\nclass E_FUZZY_MATCH_NOTIFICATION(E_VAL_ERROR):\n def __init__(self, table, msg):\n super().__init__(table)\n self.severity = '2-LOW'\n self.msg = msg\n self.classification = \"Table Could Only Be Matched Via Fuzzy Match\"\n\n\nclass E_STRICT_DATATYPE_NOTIFICATION(E_VAL_ERROR):\n def __init__(self, source_data_type, source_yaml_type, target_data_type, target_yaml_type, table=None):\n super().__init__(table)\n self.severity = '2-LOW'\n self._source_data_type = source_data_type\n self._target_data_type = target_data_type\n self.msg = f\"Type {self._source_data_type} ({source_yaml_type}) might not be compatible with {self._target_data_type} ({target_yaml_type}) both ways\"\n self.classification = \"Source and Target Data Type might not be compatible both ways\"\n\n\nclass E_DT_VAL_ERROR(E_VAL_ERROR):\n def __init__(self, source_data_type, target_data_type, table=None):\n super().__init__(table)\n self._source_data_type = source_data_type\n self._target_data_type = target_data_type\n self.classification = \"Data Type Validation Error\"\n\n\nclass E_VAL_INCOMPATIBLE_DATATYPES(E_DT_VAL_ERROR):\n def __init__(self, source_data_type, target_data_type, table=None):\n super().__init__(source_data_type, target_data_type, table)\n self.msg = f\"Type {self._source_data_type} is not compatible with {self._target_data_type}\"\n self.classification = \"Source and Target Data Type Incompatible\"\n\n\nclass E_VAL_LARGER_TARGET_DECIMAL(E_DT_VAL_ERROR):\n def __init__(self, source_data_type, target_data_type, critical_override, table=None):\n super().__init__(source_data_type, target_data_type, table)\n self.severity = \"1-SEVERE\" if critical_override else \"2-LOW\"\n self.msg = f\"Target datatype decimal {self._target_data_type} is larger than source {self._source_data_type}\"\n self.classification = \"Target Data Type Precision Larger Than Source\"\n\n\nclass E_VAL_SMALLER_TARGET_DECIMAL(E_DT_VAL_ERROR):\n def __init__(self, source_data_type, target_data_type, table=None):\n super().__init__(source_data_type, target_data_type, table)\n self.msg = f\"Target datatype decimal {self._target_data_type} is smaller than source {self._source_data_type}\"\n self.classification = \"Target Data Type Precision Smaller Than Source\"\n\n\nclass E_VAL_LARGER_TARGET_LENGTH(E_DT_VAL_ERROR):\n def __init__(self, source_data_type, target_data_type, critical_override, table=None):\n super().__init__(source_data_type, target_data_type, table)\n self.severity = \"1-SEVERE\" if critical_override else \"2-LOW\"\n self.msg = f\"Target datatype length {self._target_data_type} is larger than source {self._source_data_type}\"\n self.classification = \"Target Data Type Length Larger Than Source\"\n\n\nclass E_VAL_SMALLER_TARGET_LENGTH(E_DT_VAL_ERROR):\n def __init__(self, source_data_type, target_data_type, table=None):\n super().__init__(source_data_type, target_data_type, table)\n self.msg = f\"Target datatype length {self._target_data_type} is smaller than source {self._source_data_type}\"\n self.classification = \"Target Data Type Length Smaller Than Source\"\n\n\n# ======================================================================================================================\n# CLASS datatype\n# ======================================================================================================================\nclass datatype:\n cdw: None\n basetype: None\n length: None\n decimal: None\n org_basetype: None\n org_length: None\n org_decimal: None\n lookup_type: None\n data_config: None\n\n def __init__(self, general_config, source_config, target_config, cdw, basetype, length=None, decimal=None):\n \"\"\"\n Initialises the data type class\n\n Args:\n general_config (dict): The contents of the general configuration file\n source_config (dict): The contents of the data configuration for the source model\n target_config (dict): The contents of the data configuration for the target model\n cdw (str): The name of the the cloud data platform\n basetype (str): Base data type, e.g. VARCHAR, INTEGER, NUMBER etc\n length (int, optional): The length of the data type. Defaults to None.\n decimal (int, optional): The number of values after the decimal point. Defaults to None.\n \"\"\"\n\n self._general_config = general_config\n self._source_config = source_config\n self._target_config = target_config\n self.cdw = cdw\n self.data_config = source_config if self.cdw.upper() == 'FALCON' else target_config\n\n # Convert to uppercase as it does not matter and is easier to work with\n self.basetype = basetype.upper()\n self.length = int(\n length) if length is not None and length != '' else None\n # Exception for Falcon\n if self.cdw == 'FALCON' and self.length == 0:\n self.length = 99999\n\n self.decimal = int(\n decimal) if decimal is not None and decimal != '' else None\n self.process_alias()\n self.lookup_type = self.format_type(True)\n\n def process_alias(self):\n \"\"\"\n Replaces a data type with another as specified in the DATATYPE_REPLACE section in the config file\n \"\"\"\n if 'DATATYPE_REPLACE' in self.data_config:\n for remap_dt in self.data_config['DATATYPE_REPLACE']:\n if self.basetype == remap_dt[\"data_type\"] and \\\n self.length == (remap_dt[\"length\"] if \"length\" in remap_dt else None) and \\\n self.decimal == (remap_dt[\"decimal\"] if \"decimal\" in remap_dt else None):\n self.org_basetype = self.basetype\n self.org_length = self.length\n self.org_decimal = self.decimal\n self.basetype = remap_dt[\"new_data_type\"].upper()\n self.length = remap_dt[\"new_length\"] if \"new_length\" in remap_dt else None\n self.decimal = remap_dt[\"new_decimal\"] if \"new_decimal\" in remap_dt else None\n\n def format_type(self, mask=False):\n \"\"\"\n Returns a formatted data type based on the basetype, length and precision, i.e.\n (,)\n\n Args:\n mask (bool, optional): Whether to mask the numbers with X or not. Defaults to False.\n\n Returns:\n _type_: _description_\n \"\"\"\n precision = ('(' + (str(self.length) if not mask else 'X') +\n ((\",\" + (str(self.decimal) if not mask else 'X'))\n if self.decimal is not None and self.decimal != 0 else '') +\n ')') if self.length is not None and self.length != 0 else ''\n return self.basetype + precision\n\n def is_compatible_with(self, other_data_type):\n \"\"\"\n Compares this data type to the passed other data type to see if they are compatible.\n Data types are compatible if they are in the same group in the data mappings\n\n Args:\n other_data_type (class datatype): the data type to compare this data type to\n\n Returns:\n bool: True if the data types are compatible, False otherwise\n \"\"\"\n other_config = self._source_config if other_data_type.cdw.upper(\n ) == 'FALCON' else self._target_config\n\n # Now that we fixed the checks on source data types, this should never fail\n if self.lookup_type not in self.data_config['DATATYPE_MAPPINGS']:\n raise Exception\n\n return self.data_config['DATATYPE_MAPPINGS'][self.lookup_type]['GROUP'] == other_config['DATATYPE_MAPPINGS'][\n other_data_type.lookup_type]['GROUP']\n\n def compare(self, other_data_type):\n \"\"\"\n Do a full comparison of this data type to the other data type in terms of data type,\n length and precision to see if they are suitable for mapping\n\n Args:\n other_data_type (class datatype): the data type to compare this data type to\n\n Returns:\n bool: True if the data types match and can be mapped, False otherwise\n \"\"\"\n validation_status_new = []\n\n # Check if compatible\n if self.is_compatible_with(other_data_type):\n # Check length\n if self.length != other_data_type.length:\n if self.cdw == 'FALCON' and self.basetype == 'VARCHAR' and self._general_config.get(\n 'MODEL_VALIDATION').get('IGNORE_FALCON_VARCHAR_PRECISION'):\n # We ignore to chose length for Falcon varchars\n output_message(\"Ignoring length for Falcon VARCHAR type\")\n else:\n if self.length is None or other_data_type.length is None:\n # Not all data types have a length, but we have proven they are\n # compatible, so we accept the length\n pass\n else:\n this_length = self.length if self.length is not None else 0\n if this_length < other_data_type.length:\n validation_status_new.append(\n E_VAL_LARGER_TARGET_LENGTH(\n self.format_type(),\n other_data_type.format_type(),\n not self._general_config.get('MODEL_VALIDATION').get(\n 'ACCEPT_LARGER_TARGET_LENGTH_DECIMAL')))\n\n else:\n validation_status_new.append(\n E_VAL_SMALLER_TARGET_LENGTH(\n self.format_type(),\n other_data_type.format_type()))\n\n if self.decimal != other_data_type.decimal:\n this_length = self.decimal if self.decimal is not None else 0\n if this_length < other_data_type.decimal:\n validation_status_new.append(\n E_VAL_LARGER_TARGET_DECIMAL(\n self.format_type(),\n other_data_type.format_type(),\n not self._general_config.get('MODEL_VALIDATION').get(\n 'ACCEPT_LARGER_TARGET_LENGTH_DECIMAL')))\n\n elif this_length > other_data_type.decimal:\n validation_status_new.append(\n E_VAL_SMALLER_TARGET_DECIMAL(\n self.format_type(),\n other_data_type.format_type()))\n\n other_config = self._source_config if other_data_type.cdw.upper(\n ) == 'FALCON' else self._target_config\n if self.data_config['DATATYPE_MAPPINGS'][self.lookup_type]['YAML_TYPE'] != other_config['DATATYPE_MAPPINGS'][\n other_data_type.lookup_type]['YAML_TYPE']:\n validation_status_new.append(\n E_STRICT_DATATYPE_NOTIFICATION(\n self.format_type(),\n self.data_config['DATATYPE_MAPPINGS'][self.lookup_type]['YAML_TYPE'],\n other_data_type.format_type(),\n other_config['DATATYPE_MAPPINGS'][\n other_data_type.lookup_type]['YAML_TYPE']))\n else:\n validation_status_new.append(\n E_VAL_INCOMPATIBLE_DATATYPES(\n self.format_type(),\n other_data_type.format_type()))\n\n return validation_status_new\n\n# ======================================================================================================================\n# CLASS dbschema_model\n# ======================================================================================================================\n\n\nclass dbschema_model:\n folder_name = None\n model = None\n database = None\n cdw = \"\"\n data_config = None\n\n def __init__(self, folder_name, general_config, source_config, target_config):\n \"\"\"\n Initialises the dbschema_model class. Loads all the input files (DBS or CSV) and\n validates the data types used. Cleans up the model after loading (removing empty and\n databases, schemas to ignore)\n\n Args:\n folder_name (str): The name of the folder to read the input files from\n general_config (dict): The contents of the general configuration file\n source_config (dict): The contents of the data configuration for the source model\n target_config (dict): The contents of the data configuration for the target model\n\n Raises:\n Exception: Exception is raised when invalid data types have been encountered in the model\n \"\"\"\n self._general_config = general_config\n self._source_config = source_config\n self._target_config = target_config\n self._source_files_processed = []\n self.folder_name = folder_name\n self.load_from_file()\n self.data_config = self._source_config if self.cdw.upper(\n ) == 'FALCON' else self._target_config\n self.valid_data_types = list(self.data_config['DATATYPE_MAPPINGS'])\n\n invalid_data_types = [\n {'db': db, 's': schema, 't': table, 'c': c,\n 'dt': self.model[db]['schemas'][schema][table][c].format_type(True)}\n for db in self.model\n for schema in self.model[db]['schemas']\n for table in self.model[db]['schemas'][schema]\n for c in self.model[db]['schemas'][schema][table]\n if self.model[db]['schemas'][schema][table][c].format_type(True)\n not in list(\n [m for m in self.data_config['DATATYPE_MAPPINGS']])]\n\n if len(invalid_data_types) > 0:\n raise EInvalidDataTypes(invalid_data_types, self.cdw)\n\n self.cleanup_model()\n\n def cleanup_model(self):\n \"\"\"\n Cleans up the loaded data model:\n 1. Removes databases which are marked to ignore\n 2. Removes databases with no content\n 3. Removes schemas which are marked to ignore\n 4. Removes schemas which are empty\n 5. Removes tables with no columns\n \"\"\"\n # Remove all databases, views to ignore and empty elements\n model_copy = copy.deepcopy(self.model)\n\n for db in model_copy:\n if db in self._general_config.get('MODEL_VALIDATION').get(\n 'EXCLUDE_DATABASES') or len(model_copy[db]['schemas']) == 0:\n output_message(f\"Excluding database: {db}\")\n self.model.pop(db)\n else:\n for schema in model_copy[db]['schemas']:\n if f\"{db}.{schema}\" in self._general_config.get('MODEL_VALIDATION').get(\n 'EXCLUDE_SCHEMAS') or len(model_copy[db]['schemas'][schema]) == 0:\n output_message(f\"Excluding schema: {db}.{schema}\")\n self.model[db]['schemas'].pop(schema)\n else:\n for table in model_copy[db]['schemas'][schema]:\n if len(model_copy[db]['schemas'][schema][table]) == 0:\n self.model[db]['schemas'][schema].pop(table)\n\n def load_from_file(self):\n \"\"\"\n Loads the input files from the source folder. Input files can be either\n in .dbs format (dbschema) or .csv files\n \"\"\"\n self.model = {}\n # List all the dbs and csv files in the source folder\n model_list = glob.glob(self.folder_name + '*.dbs') + \\\n glob.glob(self.folder_name + '*.csv')\n\n for db_file in model_list:\n self._source_files_processed.append(db_file)\n if Path(db_file).suffix == '.dbs':\n # For dbschema .dbs files, the database name is the file name, so we can have one db per file\n database_name = os.path.splitext(os.path.basename(db_file))[0]\n self.model[database_name] = {}\n tree = ET.parse(db_file)\n self.parse_dbs(database_name, tree.getroot())\n elif Path(db_file).suffix == '.csv':\n # For csv files, the database name is in the data, so we can have multiple dbs per file\n self.parse_csv(db_file)\n\n def get_stats(self):\n \"\"\"\n Counts the number of tables, schemas, tables and columns in the model\n\n Returns:\n dict: The individual counts in a dictionary\n \"\"\"\n return {\n \"db_cnt\": len([d for d in self.model]),\n \"sch_cnt\": len([s for d in self.model\n for s in self.model[d]['schemas']]),\n \"tbl_cnt\": len([s for d in self.model\n for s in self.model[d]['schemas']\n for t in self.model[d]['schemas'][s]]),\n \"col_cnt\": len([c for d in self.model\n for s in self.model[d]['schemas']\n for t in self.model[d]['schemas'][s]\n for c in self.model[d]['schemas'][s][t]])\n }\n\n def parse_csv(self, db_file):\n \"\"\"\n Loads the data model from a csv file\n\n Args:\n db_file (str): File name of the csv file to process\n \"\"\"\n\n output_message(\n f\"Start parsing model(s) from csv file {db_file}...\", \"debug\")\n if os.path.basename(os.path.dirname(db_file)) == 'falcon':\n source_platform = 'Falcon'\n else:\n source_platform = self._general_config.get(\n 'MIGRATION').get('TARGET_PLATFORM')\n\n with open(db_file) as csv_file:\n csv_reader = csv.reader(csv_file)\n line_count = 0\n for row in csv_reader:\n # 'DATABASE_NAME','SCHEMA_NAME','TABLE_NAME','COLUMN_NAME','DATA_TYPE','LENGTH','DECIMAL']\n if line_count >= 0 and row[0] != \"\":\n database_name = row[0]\n schema_name = row[1]\n table_name = row[2]\n column_name = row[3]\n data_type = row[4]\n length = row[5] if row[5] != '' else None\n decimal = row[6] if row[6] != '' else None\n if database_name not in self.model:\n self.model[database_name] = {}\n self.model[database_name]['database_name'] = database_name\n self.model[database_name]['source_database'] = source_platform\n self.cdw = self.model[database_name]['source_database'].upper(\n )\n self.model[database_name]['total_schemas'] = 0\n self.model[database_name]['total_tables'] = 0\n self.model[database_name]['total_columns'] = 0\n self.model[database_name]['schemas'] = {}\n if schema_name not in self.model[database_name]['schemas']:\n self.model[database_name]['schemas'][schema_name] = {}\n self.model[database_name]['total_schemas'] += 1\n if table_name not in self.model[database_name]['schemas'][schema_name]:\n self.model[database_name]['schemas'][schema_name][table_name] = {\n }\n self.model[database_name]['total_tables'] += 1\n if column_name not in self.model[database_name]['schemas'][schema_name][table_name]:\n self.model[database_name]['schemas'][schema_name][table_name][column_name] = {\n }\n self.model[database_name]['total_columns'] += 1\n\n self.model[database_name]['schemas'][schema_name][table_name][column_name] = datatype(\n self._general_config, self._source_config, self._target_config,\n self.cdw, data_type, length, decimal)\n\n line_count += 1\n\n stats = self.get_stats()\n\n output_message(''.join(\n (f\"Parsed csv file {db_file} ({self.model[database_name]['source_database']}): \",\n f\"Databases: {stats['db_cnt']} \",\n f\"Schemas: {stats['sch_cnt']} \",\n f\"Tables: {stats['tbl_cnt']} \",\n f\"Columns: {stats['col_cnt']}\")),\n \"success\")\n\n def parse_dbs(self, database_name, db_data):\n \"\"\"\n Loads the model information from the DBS file. A DBS contains only one database and the file name\n is the database name\n\n Args:\n database_name (str): Name of the database we are loading schemas, tables etc for\n db_data (dict): The data from the dbs file\n \"\"\"\n\n self.model[database_name]['database_name'] = database_name\n self.model[database_name]['source_database'] = db_data.attrib['database'] if db_data.attrib['database'].upper(\n ) != 'THOUGHTSPOT' else 'FALCON'\n self.cdw = self.model[database_name]['source_database'].upper()\n self.model[database_name]['schemas'] = {}\n self.model[database_name]['total_tables'] = 0\n self.model[database_name]['total_columns'] = 0\n\n output_message(\n f\"Start parsing database {database_name} ({self.model[database_name]['source_database']}) from dbs...\")\n for schema in db_data:\n if schema.tag == 'schema':\n schema_name = schema.attrib['name'].encode(\n \"ascii\", errors=\"ignore\").decode()\n tables = {}\n for table in schema:\n if table.tag == 'table':\n table_name = table.attrib['name'].encode(\n \"ascii\", errors=\"ignore\").decode()\n\n columns = {}\n for column in table:\n if column.tag == \"column\":\n columns[column.attrib['name'].encode(\"ascii\", errors=\"ignore\").decode()] = datatype(\n self._general_config, self._source_config, self._target_config, self.cdw,\n column.attrib['type'].upper(),\n column.attrib[\"length\"] if \"length\" in column.attrib else None, column.attrib\n [\"decimal\"] if \"decimal\" in column.attrib else None)\n\n tables[table_name] = columns\n self.model[database_name]['total_columns'] += len(\n columns)\n self.model[database_name]['schemas'][schema_name] = tables\n self.model[database_name]['total_tables'] += len(tables)\n self.model[database_name]['total_schemas'] = len(\n self.model[database_name]['schemas'])\n\n output_message(''.join(\n (f\"Parsed database {database_name} ({self.model[database_name]['source_database']}): \",\n f\"Schemas: {self.model[database_name]['total_schemas']} \",\n f\"Tables: {self.model[database_name]['total_tables']} \",\n f\"Columns: {self.model[database_name]['total_columns']}\")))\n\n def write_to_csv(self, file_name='./output/test.csv'):\n \"\"\"\n Test function to write the contents of the model to a csv file\n\n Args:\n file_name (str, optional): Name of the file to write to. Defaults to './output/test.csv'.\n \"\"\"\n with open(file_name, \"w\") as stream:\n writer = csv.writer(stream)\n for db in self.model:\n for schema in self.model[db]['schemas']:\n for table in self.model[db]['schemas'][schema]:\n for column in self.model[db]['schemas'][schema][table]:\n data = [\n db,\n schema,\n table,\n column,\n self.model[db]['schemas'][schema][table][column].basetype,\n self.model[db]['schemas'][schema][table][column].length,\n self.model[db]['schemas'][schema][table][column].decimal]\n writer.writerow(data)\n\n def get_column_list_for_table(self, database_name, schema_name, table_name):\n \"\"\"\n Returns a list of all the columns in the table\n\n Args:\n database_name (str) : Name of the database\n schema_name (str) : Name of the schema\n table_name (str) : Name of the table\n\n Returns:\n list: A list of the column names in the table\n \"\"\"\n\n return [column.casefold()\n for database in self.model if database.casefold() == database_name.casefold()\n for schema in self.model[database]['schemas'] if schema.casefold() == schema_name.casefold()\n for table in self.model[database]['schemas'][schema] if table.casefold() == table_name.casefold()\n for column in self.model[database]['schemas'][schema][table]]\n\n def similar_names(self, name1, name2):\n \"\"\"\n Returns a similarity ratio of name1 and name2\n\n Args:\n name1 (_type_): The first name\n name2 (_type_): The name to check similarity with the first name\n\n Returns:\n float: The ratio of similarity\n \"\"\"\n return SequenceMatcher(None, name1, name2).ratio()\n\n def table_fuzzy_search(self, source_db, source_schema, source_table,\n source_column_list, mapping_details, mapping_category):\n \"\"\"\n _summary_\n\n Args:\n source_db (str): _description_\n source_schema (str): _description_\n source_table (str): _description_\n source_column_list (list): _description_\n mapping_details (dict): _description_\n mapping_category (str): _description_\n\n Returns:\n _type_: _description_\n \"\"\"\n result = False\n\n output_message(\n f'Executing fuzzy match for table {source_db}.{source_schema}.{source_table} with columns {\",\".join(source_column_list)}')\n\n output_message('Considering tables:')\n output_message(','.join(source_column_list))\n\n # This goes wrong when there is a (user uploaded) table which does not exist in source and no match can be found\n # for example, the column names are too different or there are more columns in source than on target\n output_message(','.join([f\"{tdatabase}.{tschema}.{ttable} ({mapping_details.table_available_for_matching(mapping_category, tdatabase, tschema, ttable)}) \" for tdatabase in self.model\n for tschema in self.model[tdatabase]['schemas']\n for ttable in self.model[tdatabase]['schemas'][tschema]\n # Fuzzy match the column name lists\n if [MigrationUtils.fuzzy_strip(c1) for c1 in source_column_list] ==\n [MigrationUtils.fuzzy_strip(c2)\n for c2 in self.get_column_list_for_table(tdatabase, tschema, ttable)] or\n all(\n elem\n in\n [MigrationUtils.fuzzy_strip(c2)\n for c2 in self.get_column_list_for_table(tdatabase, tschema, ttable)]\n for elem in [MigrationUtils.fuzzy_strip(c1) for c1 in source_column_list])\n ]))\n # --------------------------------------------------------------------------------------------------\n # Ranking explained:\n # 100.0 : Perfect match: same table name (case insensitive),\n # same columns (fuzzy matched) and same number of columns\n # 75.0 - 85.0 : Different table names, same columns (fuzzy matched) and same number of columns.\n # The closer the number is to 85, the more similar the two table names are\n # 50.0 : Same table name (case insensitive), source columns are present in target table,\n # but target table has additional columns\n # 25.0 - 35.0 : Different table names, source columns are present in target table,\n # but target table has additional columns. The closer the number is to 35, the more\n # similar the two table names are\n # 0.0 : No match found\n # --------------------------------------------------------------------------------------------------\n matching_tables_prep = [{\"database\": tdatabase, \"schema\": tschema, \"table\": ttable,\n # Check if a target table is available for mapping, i.e. it has not been mapped to\n # any other source table already\n \"available\": mapping_details.table_available_for_matching(\n mapping_category, tdatabase, tschema, ttable),\n # -------------------------------------------------------------------------------\n # Score = 100 (perfect) => identical table names (case insensitive) and the same\n # amount of identical column names (fuzzy matched)\n \"rank\": 100\n if ttable.casefold() == source_table.casefold() and len(source_column_list) ==\n len(self.get_column_list_for_table(\n tdatabase, tschema, ttable))\n\n # -------------------------------------------------------------------------------\n # Score = 75 - 85 =>\n # The table names do not match (case insensitive), but the tables have the same\n # amount of fuzzy matching columns.\n # Base score is 75, but this can be increased based on the similarity of the\n # table names\n else\n round(75 + (10 * self.similar_names(ttable.casefold(),\n source_table.casefold())),\n 1)\n if ttable.casefold() != source_table.casefold() and len(source_column_list) ==\n len(self.get_column_list_for_table(\n tdatabase, tschema, ttable))\n\n # -------------------------------------------------------------------------------\n # Score = 50 => identical column names (case insensitive). All columns of the\n # source table fuzzy match the target columns, but the target table has additional\n # columns.\n else 50\n if ttable.casefold() == source_table.casefold() and len(source_column_list) <\n len(self.get_column_list_for_table(\n tdatabase, tschema, ttable))\n\n\n # -------------------------------------------------------------------------------\n # Score = 25 - 35 =>\n # The table names do not match (case insensitive), all columns of the source\n # table fuzzy match the target columns, but the target table has additional\n # columns.\n # Base score is 25, but this can be increased based on the similarity of the\n # table names\n # There is an additional condition (configurable) that the source table must have\n # at least X columns\n else\n round(25 + (10 * self.similar_names(ttable.casefold(),\n source_table.casefold())),\n 1)\n if ttable.casefold() != source_table.casefold() and len(source_column_list) <\n len(self.get_column_list_for_table(\n tdatabase, tschema, ttable)) and len(source_column_list)\n >= self._general_config.get('MODEL_VALIDATION').get('MIN_COL_COUNT_FUZZY')\n\n # -------------------------------------------------------------------------------\n # Score = 0 => No suitable match found\n else 0}\n\n for tdatabase in self.model\n for tschema in self.model[tdatabase]['schemas']\n for ttable in self.model[tdatabase]['schemas'][tschema]\n # Fuzzy match the column name lists\n if [MigrationUtils.fuzzy_strip(c1) for c1 in source_column_list] ==\n [MigrationUtils.fuzzy_strip(c2)\n for c2 in self.get_column_list_for_table(tdatabase, tschema, ttable)] or\n all(\n elem\n in\n [MigrationUtils.fuzzy_strip(c2)\n for c2 in self.get_column_list_for_table(tdatabase, tschema, ttable)]\n for elem in [MigrationUtils.fuzzy_strip(c1) for c1 in source_column_list])\n ]\n\n # Remove matches with rank of 0\n matching_tables = [\n mt for mt in matching_tables_prep if float(mt['rank']) > 0]\n\n # --------------------------------------------------------------------------------------------\n # Build a list of similar tables, these are tables which are not matching candidates but are\n # highly similar in terms of column names\n # --------------------------------------------------------------------------------------------\n sim_tables = [{\"table\": f\"{tdatabase}.{tschema}.{ttable}\",\n \"sim_pct\":\n round(\n (\n len(\n list(\n # Columns appearing in both tables\n set([MigrationUtils.fuzzy_strip(c1) for c1 in source_column_list]) &\n set(\n [MigrationUtils.fuzzy_strip(c2)\n for c2 in self.get_column_list_for_table(tdatabase, tschema, ttable)]))) /\n len(source_column_list)) * 100, 1)}\n for tdatabase in self.model\n for tschema in self.model[tdatabase]['schemas']\n for ttable in self.model[tdatabase]['schemas'][tschema]\n if f\"{tdatabase}.{tschema}.{ttable}\" not in [f\"{mt['database']}.{mt['schema']}.{mt['table']}\" for mt in matching_tables]]\n\n # Rank this table list based on similarity percentage\n sim_table_ranking = sorted([t for t in sim_tables if t['sim_pct'] > self._general_config.get(\n 'MODEL_VALIDATION').get('MIN_COLUMN_SIMILARITY_PCT')], key=lambda d: d['sim_pct'], reverse=True)\n\n # ----------------------------------------------------------------------------------\n # Do we have any matching candidates (which are available, i.e. not already mapped)\n # ----------------------------------------------------------------------------------\n if len(matching_tables) > 0 and len([m['rank'] for m in matching_tables if m['available']]) > 0:\n # Get tha maximum rank of the available tables\n max_rank = max([m['rank']\n for m in matching_tables if m['available']])\n\n # If the rank is higher than the set minimal rank for a match (config)\n if max_rank >= self._general_config.get('MODEL_VALIDATION').get('MINIMUM_SCORE_FOR_MATCH'):\n # Add details to the mapping comments\n mapping_details.add_notification(VAL_NOTIFICATION(\n f\"{source_db}.{source_schema}.{source_table}\",\n \"Matching Candidates:\"))\n\n # 'Pick' the candidate with the highest rank\n picked = False\n for m in matching_tables:\n picked = not (\n picked) and m['rank'] == max_rank and m['available']\n\n # Add a comment with the score explanation\n mapping_details.add_notification(\n VAL_NOTIFICATION(\n f\"{source_db}.{source_schema}.{source_table}\",\n f\"----{m['database']}.{m['schema']}.{m['table']} (Available:{m['available']}, \" +\n f\"Score:{m['rank']}) {' <==== BEST CANDIDATE' if picked else ''}\",))\n\n m['status_msg'] = \"\"\n if picked:\n if m['rank'] == 100:\n m['status_msg'] = ''.join((\n f\"Table {m['table']} matched via identical match: \",\n \"same table name, same columns\"))\n elif m['rank'] >= 75:\n m['status_msg'] = ''.join((\n f\"Table {m['table']} matched via fuzzy match: \",\n \"different table name, same columns\"))\n elif m['rank'] == 50:\n m['status_msg'] = ''.join((\n f\"Table {m['table']} matched via fuzzy match: same table name, \",\n \"target has more columns than source which will result in a partial match\"))\n elif m['rank'] >= 25:\n m['status_msg'] = ''.join((\n f\"Table {m['table']} matched via fuzzy match: different table name, \",\n \"target has more columns than source will might result in a partial match\"))\n\n mapping_details.add_notification(\n VAL_NOTIFICATION(\n f\"{source_db}.{source_schema}.{source_table}\",\n \"------\" + str(m['status_msg'])))\n\n result = m\n break\n\n # Output similar tables. These table are currently not valid mappings, but they might help investigation\n if len(sim_table_ranking) > 0:\n mapping_details.add_notification(\n VAL_NOTIFICATION(\n f\"{source_db}.{source_schema}.{source_table}\",\n \"Similar (non matching) tables:\"))\n for t in sim_table_ranking:\n mapping_details.add_notification(\n VAL_NOTIFICATION(\n f\"{source_db}.{source_schema}.{source_table}\",\n f\"----{t['table']} (Similarity Percent: {t['sim_pct']})\"))\n\n return result\n\n\n# ======================================================================================================================\n# CLASS business_model\n# ======================================================================================================================\n\nclass business_model:\n COL_LOGICAL_COLUMN_NAME = 0\n COL_COLUMN_GUID = 2\n COL_LOGICAL_TABLE_NAME = 3\n COL_TABLE_GUID = 4\n COL_PHYSICAL_COLUMN_NAME = 5\n COL_PHYSICAL_TABLE_NAME = 7\n file_name = None\n src_model_tables = None\n\n def __init__(self, folder_name):\n self.folder_name = folder_name\n self.load_from_file()\n\n def load_from_file(self):\n # Should exist as otherwise an error would have been raised earlier\n for file in os.listdir(self.folder_name):\n if file.endswith(\".xls\") or file.endswith(\".tsv\"):\n self.file_name = self.folder_name + file\n\n self.src_model_tables = []\n if Path(self.file_name).is_file():\n with open(self.file_name) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n line_count = 0\n for row in csv_reader:\n if line_count > 0 and row[self.COL_PHYSICAL_TABLE_NAME] != \"\":\n self.src_model_tables.append({\n \"TableGUID\": row[self.COL_TABLE_GUID],\n \"LogicalTableName\": row[self.COL_LOGICAL_TABLE_NAME],\n \"PhysicalTableName\": row[self.COL_PHYSICAL_TABLE_NAME],\n \"ColumnGUID\": row[self.COL_COLUMN_GUID],\n \"LogicalColumnName\": row[self.COL_LOGICAL_COLUMN_NAME],\n \"PhysicalColumnName\": row[self.COL_PHYSICAL_COLUMN_NAME]\n })\n line_count += 1\n break\n\n def locate_by_id(self, table_id):\n table_details = None\n if table_id in {t['TableGUID'] for t in self.src_model_tables}:\n table_details = [\n t for t in self.src_model_tables if t['TableGUID'] == table_id]\n output_message(\n f\"Located user uploaded table source business model. Actual name is {table_details[0]['PhysicalTableName']}\")\n # As this is a 100% sure match overwrite table and columns to the mappings by\n # locating this matched name as the target but with this source db and schema\n return table_details\n\n\n##########################################################################\n# Class connections_yaml\n##########################################################################\n\n# ======================================================================================================================\n# CLASS connections_yaml\n# ======================================================================================================================\n\nclass connections_yaml:\n source_model = None\n target_model = None\n business_model = None\n source_file_name = None\n source_schema_list = None\n remap_overrides = None\n mapping_details = None\n tables_used_for_renaming = None\n\n def __init__(self, general_config, source_config, target_config, source_model,\n target_model, mapping_details):\n self._general_config = general_config\n self._source_config = source_config\n self._target_config = target_config\n\n yaml_found = False\n for file in os.listdir(self._general_config.get('FILE_LOCATIONS').get('SRC_YAML_FOLDER')):\n if file.endswith(\".yaml\"):\n yaml_found = True\n break\n\n if not yaml_found:\n raise EInputFilesMissing('input yaml', self._general_config.get(\n 'FILE_LOCATIONS').get('SRC_YAML_FOLDER'))\n\n self.source_file_name = self._general_config.get(\n 'FILE_LOCATIONS').get('SRC_YAML_FOLDER') + file\n self.source_model = source_model\n self.target_model = target_model\n self.business_model = business_model(self._general_config.get(\n 'FILE_LOCATIONS').get('BUSINESS_MODEL_FOLDER'))\n self.tables_used_for_renaming = []\n self.mapping_details = mapping_details\n self.load_yaml_from_file()\n self.prepare_yaml()\n output_message(\"--YAML Preparation completed...\")\n\n def load_yaml_from_file(self):\n with open(self.source_file_name) as file:\n self.contents = yaml.load(file, Loader=yaml.RoundTripLoader)\n\n for table in self.contents['table']:\n table['external_table']['mapping_status'] = 'UNMAPPED'\n for col in table['column']:\n col['mapping_status'] = 'UNMAPPED'\n\n def identify_similar_tables(self):\n similar_tables = set()\n for table in self.contents['table']:\n col_list = [c['name'] for t in self.contents['table']\n if t['external_table']['table_name'] == table['external_table']['table_name']\n for c in t['column']]\n\n tables_with_similar_columns = [\n t['name'] for t in self.contents['table'] if [\n MigrationUtils.fuzzy_strip(\n c1['name']) for c1 in t['column']] == [\n MigrationUtils.fuzzy_strip(c2) for c2 in col_list]]\n if len(tables_with_similar_columns) > 1:\n similar_tables.add(tuple(tables_with_similar_columns))\n\n if len(similar_tables):\n msg = \"\".join(\n (\"We have found tables with similar column definitions in the YAML file. This might cause the \",\n \"fuzzy matching to match tables incorrectly. The tables with similar column names are:\\n\"))\n for t in similar_tables:\n msg += \",\".join(t) + \"\\n\"\n output_message(msg, \"warning\")\n\n def prepare_yaml(self):\n output_message(\"Preparing YAML remapping file for migration....\")\n errors = []\n self.identify_similar_tables()\n\n for table in self.contents['table']:\n output_message(''.join(\n (f\"Preparing YAML for table: {table['external_table']['db_name']}.\",\n f\"{table['external_table']['schema_name']}.\",\n f\"{table['external_table']['table_name']}\")))\n\n # -----------------------------------------------------------------------------------------------------\n # Step 1 - Extract Falcon table details: Extract the database, schema and table name from the formatted\n # table_name __\n # -----------------------------------------------------------------------------------------------------\n # For all combinations of _ present in the source model,\n # if the formatted name starts with them, extract them\n db_schemas = [{\"db\": db.lower(),\n \"schema\": schema.lower()}\n for db in self.source_model.model for schema in self.source_model.model[db]['schemas']\n if table['external_table']['table_name'].lower().startswith(\n db.lower() + \"_\" + schema.lower())]\n\n if len(db_schemas) == 1:\n table['external_table']['falcon_db'] = db_schemas[0]['db']\n table['external_table']['falcon_schema'] = db_schemas[0]['schema']\n table['external_table']['falcon_table'] = table['external_table']['table_name'][\n len(db_schemas[0]['db']) + len(db_schemas[0]['schema']) + 2:]\n table['external_table']['old_falcon_db'] = table['external_table']['db_name']\n table['external_table']['old_falcon_schema'] = table['external_table']['schema_name']\n table['external_table']['old_falcon_table'] = table['external_table']['table_name']\n table['external_table']['db_name'] = db_schemas[0]['db']\n table['external_table']['schema_name'] = db_schemas[0]['schema']\n table['external_table']['table_name'] = table['external_table']['table_name'][\n len(db_schemas[0]['db']) + len(db_schemas[0]['schema']) + 2:]\n output_message(''.join(\n (f\"Mapped to table: {table['external_table']['db_name']}.\",\n f\"{table['external_table']['schema_name']}.{table['external_table']['table_name']}\")))\n else:\n # Cannot extract database and schema name from YAML falcon table_name\n errors.append(''.join(\n (\"Cannot extract database and schema name from YAML falcon table_name \",\n table['external_table']['table_name'].lower())))\n\n # ---------------------------------------------\n # Step 2 - Fix names for user uploaded tables\n # ---------------------------------------------\n if table['external_table']['falcon_db'].lower() == 'falconuserdatadatabase' and \\\n table['external_table']['falcon_schema'].lower() == 'falconuserdataschema' and \\\n table['external_table']['falcon_table'].lower()[:9] == 'userdata_':\n output_message(\n \"Identified as user uploaded table. Trying to locate in business model.\")\n model_table = self.business_model.locate_by_id(table['id'])\n # As this is a 100% sure match overwrite table and columns to the mappings by locating this matched\n # name as the target but with this source db and schema\n if model_table is not None:\n table['external_table']['falcon_table'] = model_table[0]['PhysicalTableName']\n for col in model_table:\n mcol = [c for c in table['column']\n if c['id'] == col['ColumnGUID']]\n mcol[0]['external_column'] = col['PhysicalColumnName']\n else:\n # Cannot locate user uploaded table in business model\n errors.append(\n (f\"Cannot locate user uploaded table {table['external_table']['falcon_table'].lower()} \",\n f\"with id {table['id']} in business model\"))\n\n if len(errors) > 0:\n raise EYAMLPreparationError(errors)\n\n def compare_yaml_to_source(self):\n # for table in self.contents['table']:\n yaml_tables = [\n f\"{st['external_table']['db_name'].lower()}.{st['external_table']['schema_name'].lower()}.\" +\n f\"{st['external_table']['falcon_table'].lower()}\" for st in self.contents['table']]\n # f\"{st['external_table']['table_name'].lower()}\" for st in self.contents['table']]\n src_tables = [\n f\"{d.lower()}.{s.lower()}.{t.lower()}\"\n for d in self.source_model.model\n for s in self.source_model.model[d]['schemas'] for t in self.source_model.model[d]['schemas'][s]]\n\n missing_model = (set(yaml_tables).difference(src_tables))\n\n if len(missing_model) > 0:\n msg = \"Identified tables in the remapping YAML which do not exist in our source model.\\n\"\n msg += '\\n'.join(missing_model)\n output_message(msg, \"warning\")\n # A missing table in the source model should be picked up and fixed by the\n # main migration, given that it exists in the target model\n\n def migrate(self):\n st = time.time()\n output_message(\"Starting YAML migration process...\")\n errors = []\n self.tables_used_for_renaming = []\n self.table_columns = {}\n\n # ============\n job_progress = Progress(\n \"{task.description}\",\n SpinnerColumn(),\n BarColumn(),\n TextColumn(\"[progress.percentage]{task.percentage:>3.0f}%\"),\n )\n\n t_cnt = len([t for t in self.contents['table']])\n c_cnt = len([c for t in self.contents['table'] for c in t['column']])\n\n job_tables = job_progress.add_task(\n \"Processing table mappings\", total=t_cnt)\n job_columns = job_progress.add_task(\n \"Processing column mappings\", total=c_cnt)\n\n total = sum(task.total for task in job_progress.tasks)\n overall_progress = Progress()\n overall_task = overall_progress.add_task(\"All Jobs\", total=int(total))\n\n progress_table = Table.grid()\n progress_table.add_row(\n Panel.fit(\n overall_progress,\n title=\"[b]Overall YAML Remapping Progress\",\n subtitle=\"All yaml remapping tasks to be executed\",\n border_style=\"green\",\n padding=(\n 2,\n 2)),\n Panel.fit(\n job_progress,\n title=\"[b]YAML remapping Tasks\",\n subtitle=\"Individual YAML remapping tasks being executed\",\n border_style=\"red\",\n padding=(\n 1,\n 2)),\n )\n\n with Live(progress_table, refresh_per_second=10):\n for table in self.contents['table']:\n output_message(\n f\"\\nMigrating table {table['external_table']['falcon_table']}\")\n # ------------------------------------------------------------------------------------------------\n # Most of the mapping should have been done in the DDL compare already, so let's first check if we\n # have a mapping for this table and the columns\n # ------------------------------------------------------------------------------------------------\n # Get the table mappings\n yaml_table_mappings = self.mapping_details.get(\n 'YAML', 'TABLE', table['external_table']['falcon_db'],\n table['external_table']['falcon_schema'],\n table['external_table']['falcon_table'])\n table_mappings = self.mapping_details.get(\n 'DDL', 'TABLE', table['external_table']['falcon_db'],\n table['external_table']['falcon_schema'],\n table['external_table']['falcon_table'])\n col_mappings = self.mapping_details.get(\n 'DDL', 'COLUMN', table['external_table']['falcon_db'],\n table['external_table']['falcon_schema'],\n table['external_table']['falcon_table'])\n yaml_columns = [c['external_column'].casefold() for t in self.contents['table']\n if t['external_table']['falcon_db'].casefold() ==\n table['external_table']['falcon_db'].casefold() and\n t['external_table']['falcon_schema'].casefold() ==\n table['external_table']['falcon_schema'].casefold() and\n t['external_table']['falcon_table'].casefold() ==\n table['external_table']['falcon_table'].casefold() for c in t['column']]\n if len(yaml_table_mappings) == 1 and yaml_table_mappings[0].status == 'OVERRIDE':\n table['external_table'][\"db_name\"] = yaml_table_mappings[0].tar_database\n table['external_table'][\"schema_name\"] = yaml_table_mappings[0].tar_schema\n table['external_table'][\"table_name\"] = yaml_table_mappings[0].tar_table\n table['external_table']['mapping_status'] = 'MAPPED'\n for col in table['column']:\n col['mapping_status'] = 'MAPPED'\n col_mappings = self.mapping_details.get(\n 'YAML', 'COLUMN', table['external_table']['falcon_db'],\n table['external_table']['falcon_schema'],\n table['external_table']['falcon_table'],\n col['external_column'],\n True)\n if len(col_mappings) == 1:\n col['external_column'] = col_mappings[0].tar_column\n col['data_type'] = col_mappings[0].tar_datatype.upper()\n else:\n errors.append(\n (\"Cannot find override column for YAML overriden table \",\n f\"{table['external_table']['db_name']}.{table['external_table']['schema_name']}.\",\n f\"{table['external_table']['table_name']} in override file\"))\n if not overall_progress.finished:\n job_progress.advance(job_columns)\n completed = sum(\n task.completed for task in job_progress.tasks)\n overall_progress.update(\n overall_task, completed=completed)\n\n elif len(table_mappings) == 1:\n # Table mapped\n table['external_table'][\"db_name\"] = table_mappings[0].tar_database\n table['external_table'][\"schema_name\"] = table_mappings[0].tar_schema\n table['external_table'][\"table_name\"] = table_mappings[0].tar_table\n table['external_table']['mapping_status'] = 'MAPPED'\n\n output_message(\n ''.join(\n (\"Table mapped successfully mapped to \",\n f\"{table['external_table']['db_name']}.{table['external_table']['schema_name']}.\",\n f\"{table['external_table']['table_name']}\")))\n if [MigrationUtils.fuzzy_strip(m.src_column.casefold()) for m in col_mappings].sort() == [\n MigrationUtils.fuzzy_strip(y) for y in yaml_columns].sort():\n output_message(\"All columns successfully mapped\")\n\n for col in table['column']:\n col['mapping_status'] = 'MAPPED'\n col_mappings = self.mapping_details.get(\n 'DDL', 'COLUMN', table['external_table']['falcon_db'],\n table['external_table']['falcon_schema'],\n table['external_table']['falcon_table'],\n col['external_column'],\n True)\n\n if len(col_mappings) == 1:\n col['external_column'] = col_mappings[0].tar_column\n dt = datatype(\n self._general_config,\n self._source_config,\n self._target_config,\n self._general_config.get(\n 'MIGRATION').get('TARGET_PLATFORM'),\n col_mappings[0].tar_datatype.upper(),\n col_mappings[0].tar_datatype_length,\n col_mappings[0].tar_datatype_decimal)\n col['data_type'] = self._target_config['DATATYPE_MAPPINGS'][\n dt.format_type(True)]['YAML_TYPE']\n else:\n errors.append(''.join(\n (\"Cannot find override column for DDL overriden table \",\n f\"{table['external_table']['db_name']}.{table['external_table']['schema_name']}.\",\n f\"{table['external_table']['table_name']} in override file\")))\n if not overall_progress.finished:\n job_progress.advance(job_columns)\n completed = sum(\n task.completed for task in job_progress.tasks)\n overall_progress.update(\n overall_task, completed=completed)\n\n else:\n # So we have no mapping for the table, this is most likely that the table did not exist in the\n # source model (as the DDL compare is done source model<->target model). If we get here in the\n # process means that it does appear in the remapping YAML, so we will need to try to map the\n # YAML directly to target\n\n tar_tables_with_matching_columns = self.target_model.table_fuzzy_search(\n table['external_table']\n ['falcon_db'],\n table['external_table']\n ['falcon_schema'],\n table['external_table']\n ['falcon_table'],\n [sc['external_column']\n for sc in table['column']],\n self.mapping_details, 'DDL')\n if tar_tables_with_matching_columns:\n if tar_tables_with_matching_columns['rank'] == 100:\n table['external_table'][\"db_name\"] = tar_tables_with_matching_columns['database']\n table['external_table'][\"schema_name\"] = tar_tables_with_matching_columns['schema']\n table['external_table'][\"table_name\"] = tar_tables_with_matching_columns['table']\n table['external_table']['mapping_status'] = 'MAPPED'\n else:\n # No 100% mapping, outputting for confirmation\n self.mapping_details.merge_record(\n mapping_record(\n mapping_category=\"YAML\", mapping_type=\"TABLE\",\n src_database=table['external_table']['falcon_db'],\n src_schema=table['external_table']['falcon_schema'],\n src_table=table['external_table']['falcon_table'],\n tar_database=tar_tables_with_matching_columns['database'],\n tar_schema=tar_tables_with_matching_columns['schema'],\n tar_table=tar_tables_with_matching_columns['table']))\n\n for col in table['column']:\n\n # There must always be a match, as we compared on column lists\n c_mapping = [\n {\"dt\": self.target_model.model[d]['schemas'][s][t][c],\n \"column_name\": c} for d in self.target_model.model\n if d == tar_tables_with_matching_columns['database'].lower()\n for s in self.target_model.model[d]['schemas']\n if s.lower() == tar_tables_with_matching_columns['schema'].lower()\n for t in self.target_model.model[d]['schemas'][s]\n if t.lower() == tar_tables_with_matching_columns['table'].lower()\n for c in self.target_model.model[d]['schemas'][s][t]\n if MigrationUtils.fuzzy_strip(col['external_column']) == MigrationUtils.fuzzy_strip(c)][\n 0]\n\n if tar_tables_with_matching_columns['rank'] == 100:\n col['mapping_status'] = 'MAPPED'\n col['external_column'] = c_mapping['column_name']\n col['data_type'] = self._target_config['DATATYPE_MAPPINGS'][\n c_mapping['dt'].format_type(True)]['YAML_TYPE']\n else:\n # Also output the columns for confirmation\n self.mapping_details.merge_record(\n mapping_record(\n mapping_category=\"YAML\", mapping_type=\"COLUMN\",\n src_database=table['external_table']['falcon_db'],\n src_schema=table['external_table']['falcon_schema'],\n src_table=table['external_table']['falcon_table'],\n src_column=col['external_column'],\n src_datatype=col['data_type'],\n src_datatype_length=None, src_datatype_decimal=None,\n tar_database=tar_tables_with_matching_columns['database'],\n tar_schema=tar_tables_with_matching_columns['schema'],\n tar_table=tar_tables_with_matching_columns['table'],\n tar_column=c_mapping['column_name'],\n tar_datatype=c_mapping['dt'].basetype,\n tar_datatype_length=c_mapping['dt'].length,\n tar_datatype_decimal=c_mapping['dt'].decimal))\n if not overall_progress.finished:\n job_progress.advance(job_columns)\n completed = sum(\n task.completed for task in job_progress.tasks)\n overall_progress.update(\n overall_task, completed=completed)\n\n else:\n self.mapping_details.merge_record(\n # table_mapping_record=mapping_record(\n mapping_record(\n mapping_category=\"YAML\", mapping_type=\"TABLE\",\n src_database=table['external_table']['falcon_db'],\n src_schema=table['external_table']['falcon_schema'],\n src_table=table['external_table']['falcon_schema'],\n tar_database=table['external_table']['falcon_db'],\n tar_schema=table['external_table']['falcon_schema'],\n tar_table=table['external_table']['falcon_schema']))\n for col in table['column']:\n self.mapping_details.merge_record(\n mapping_record(\n mapping_category=\"YAML\", mapping_type=\"COLUMN\",\n src_database=table['external_table']['falcon_db'],\n src_schema=table['external_table']['falcon_schema'],\n src_table=table['external_table']['falcon_table'],\n src_column=col['external_column'],\n src_datatype=col['data_type'],\n src_datatype_length=None, src_datatype_decimal=None,\n tar_database=table['external_table']['falcon_db'],\n tar_schema=table['external_table']['falcon_schema'],\n tar_table=table['external_table']['falcon_table'],\n tar_column=col['external_column'],\n tar_datatype=col['data_type'],\n tar_datatype_length=None, tar_datatype_decimal=None))\n if not overall_progress.finished:\n job_progress.advance(job_columns)\n completed = sum(\n task.completed for task in job_progress.tasks)\n overall_progress.update(\n overall_task, completed=completed)\n\n if not overall_progress.finished:\n job_progress.advance(job_tables)\n completed = sum(\n task.completed for task in job_progress.tasks)\n overall_progress.update(overall_task, completed=completed)\n\n self.mapping_details.export()\n # Validate the YAML for unmapped, duplicates\n if len(errors) > 0:\n output_message(\n \"\\nThe following errors occurred during migration:\" + f\"--{errors[0]}\", \"error\")\n\n yaml_validated = self.validate_yaml_mappings()\n et = time.time()\n elapsed_time = round(et - st, 1)\n status_msg = f\"Migration completed in {elapsed_time} seconds.\"\n output_message(status_msg, \"success\" if yaml_validated else \"error\")\n\n if not yaml_validated:\n raise EYAMLValidationError(self._general_config)\n\n output_message(''.join((\"The YAML file was successfully migrated and can be found here \",\n f'{os.getcwd()}/{self._general_config[\"FILE_LOCATIONS\"][\"TAR_YAML_FILE_NAME\"][2:]}.')),\n \"success\")\n self.cleanup_and_write()\n\n def cleanup_and_write(self):\n # Cleans up YAML from the additional attributes\n if self._general_config.get('YAML_PROCESSING').get('YAML_CLEANUP'):\n for table in self.contents['table']:\n tbl_cleanup = self._general_config.get(\n 'YAML_PROCESSING').get('YAML_TABLE_CLEAN_UP')\n # ONLY FOR ADW\n if self._general_config.get('MIGRATION').get('TARGET_PLATFORM') == \"ADW\":\n tbl_cleanup.append('db_name')\n for key in tbl_cleanup:\n if key in table['external_table']:\n table['external_table'].pop(key)\n\n for col in table['column']:\n for key in self._general_config.get('YAML_PROCESSING').get('YAML_COL_CLEAN_UP'):\n if key in col:\n col.pop(key)\n with open(self._general_config.get('FILE_LOCATIONS').get('TAR_YAML_FILE_NAME'), 'w') as file:\n yaml.dump(self.contents, file, Dumper=yaml.RoundTripDumper)\n\n def validate_yaml_mappings(self):\n output_message(\"Validating YAML mappings....\")\n success = True\n\n # Do all tables have a mapping?\n unmapped_tables = [\n {'database': t['external_table']['falcon_db'],\n 'schema': t['external_table']['falcon_schema'],\n 'table': t['external_table']['falcon_table']} for t in self.contents['table']\n if t['external_table']['mapping_status'] != \"MAPPED\"]\n\n if len(unmapped_tables) > 0:\n success = False\n output_message(\"We have unmapped tables:\")\n output_message(unmapped_tables)\n\n unmapped_columns = [\n {'database': t['external_table']['falcon_db'],\n 'schema': t['external_table']['falcon_schema'],\n 'table': t['external_table']['falcon_table'],\n 'column': c['external_column'],\n 'st': c['mapping_status']} for t in self.contents['table'] for c in t['column']\n if c['mapping_status'] != \"MAPPED\"]\n if len(unmapped_columns) > 0:\n success = False\n output_message(\"We have unmapped columns:\")\n output_message(unmapped_columns)\n\n dup_sources = [\n t\n for t in Counter(\n [(f\"{t['external_table']['falcon_db']}.{t['external_table']['falcon_schema']}.\",\n f\"{t['external_table']['falcon_table']}\") for t in self.contents['table']])\n if\n Counter(\n [(f\"{t['external_table']['falcon_db']}.{t['external_table']['falcon_schema']}.\",\n f\"{t['external_table']['falcon_table']}\") for t in self.contents['table']])[t] > 1]\n if len(dup_sources) > 0:\n success = False\n output_message(\n \"The following sources have been mapped more than once:\")\n output_message(dup_sources)\n\n dup_targets = [\n t\n for t in Counter(\n [(f\"{t['external_table']['db_name']}.{t['external_table']['schema_name']}.\",\n f\"{t['external_table']['table_name']}\") for t in self.contents['table']])\n if\n Counter(\n [(f\"{t['external_table']['db_name']}.{t['external_table']['schema_name']}.\",\n \"{t['external_table']['table_name']}\") for t in self.contents['table']])[t] > 1]\n if len(dup_targets) > 0:\n success = False\n output_message(\n \"The following targets have been mapped more than once:\")\n output_message(dup_targets)\n\n if success:\n output_message(\"Validation successful\")\n\n return success\n\n# ======================================================================================================================\n# CLASS mapping_record\n# ======================================================================================================================\n\n\nclass mapping_record:\n mapping_category = None\n mapping_type = None\n src_database = None\n src_schema = None\n src_table = None\n src_column = None\n src_datatype = None\n src_datatype_length = None\n src_datatype_decimal = None\n tar_database = None\n tar_schema = None\n tar_table = None\n tar_column = None\n tar_datatype = None\n tar_datatype_length = None\n tar_datatype_decimal = None\n status = None\n rank = None\n\n def __init__(self, **mapping_details):\n if 'status' not in mapping_details or mapping_details['status'] is None:\n mapping_details['status'] = 'UNMAPPED'\n self._set_values(mapping_details)\n\n def __iter__(self):\n return iter(\n [self.mapping_category, self.mapping_type, self.src_database, self.src_schema, self.src_table, self.\n src_column, self.src_datatype, self.src_datatype_length, self.src_datatype_decimal, self.tar_database,\n self.tar_schema, self.tar_table, self.tar_column, self.tar_datatype, self.tar_datatype_length, self.\n tar_datatype_decimal, self.status, self.rank])\n\n def update(self, **mapping_details):\n self._set_values(mapping_details)\n\n def _set_values(self, mapping_details):\n for mapping_col, mapping_value in mapping_details.items():\n setattr(self, mapping_col, mapping_value)\n\n def mark_mapped(self):\n self.status = 'MAPPED'\n\n# ======================================================================================================================\n# CLASS mapping_details\n# ======================================================================================================================\n\n\nclass mapping_details:\n mappings = []\n comments = {}\n notifications = {}\n\n def __init__(self, general_config, source_config, target_config, source_cdw,\n target_cdw, source_files_processed, target_files_processed):\n self._general_config = general_config\n self._source_config = source_config\n self._target_config = target_config\n self._source_files_processed = source_files_processed\n self._target_files_processed = target_files_processed\n self.source_cdw = source_cdw\n self.target_cdw = target_cdw\n self.load_overrides()\n\n def add_notification(self, notification):\n if notification.table not in self.notifications:\n self.notifications[notification.table] = []\n self.notifications[notification.table].append(notification)\n\n def merge_record(self, mapping_record):\n updated = False\n # For each mapping check if we need to update or insert it, if the existing mapping is an override\n # the mapping will not be updated\n for m in self.mappings:\n if m.mapping_category.casefold() == mapping_record.mapping_category.casefold() and \\\n m.mapping_type.casefold() == mapping_record.mapping_type.casefold() and \\\n m.src_database.casefold() == mapping_record.src_database.casefold() and \\\n m.src_schema.casefold() == mapping_record.src_schema.casefold() and \\\n m.src_table.casefold() == mapping_record.src_table.casefold() and \\\n ((m.mapping_type.casefold() == 'column'\n and m.src_column.casefold() == mapping_record.src_column.casefold())\n or m.mapping_type.casefold() == 'table'):\n if m.status.casefold() != 'override':\n m.__dict__.update(mapping_record.__dict__)\n updated = True\n if not updated:\n self.mappings.append(mapping_record)\n\n def validate_override_file(self):\n invalid_data_types = [\n {'db': dt.tar_database, 's': dt.tar_schema, 't': dt.tar_table,\n 'c': dt.tar_column, 'dt': dt.tar_datatype}\n for dt in self.mappings\n if dt.tar_datatype\n not in list([re.sub(r'\\([^)]*\\)', '', m) for m in self._target_config['DATATYPE_MAPPINGS']]) and dt.tar_datatype\n != '']\n\n if len(invalid_data_types) > 0:\n raise EInvalidDataTypes(invalid_data_types, \"override file\")\n\n def load_overrides(self, filter='OVERRIDE'):\n if Path(self._general_config.get('FILE_LOCATIONS').get('MANUAL_OVERRIDES')).is_file():\n with open(self._general_config.get('FILE_LOCATIONS').get('MANUAL_OVERRIDES')) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count > 0 and row[16] == filter:\n self.merge_record(mapping_record(mapping_category=row[0],\n mapping_type=row[1],\n src_database=row[2],\n src_schema=row[3],\n src_table=row[4],\n src_column=row[5],\n src_datatype=row[6],\n src_datatype_length=row[7],\n src_datatype_decimal=row[8],\n tar_database=row[9],\n tar_schema=row[10],\n tar_table=row[11],\n tar_column=row[12],\n tar_datatype=row[13],\n tar_datatype_length=row[14],\n tar_datatype_decimal=row[15],\n status=row[16],\n rank=row[17]))\n line_count += 1\n self.validate_override_file()\n\n def export(self, status_filter=[\"UNMAPPED\", \"OVERRIDE\"]):\n export_set = [m for m in self.mappings if m.status in status_filter]\n if len(export_set) > 0:\n with open(self._general_config.get('FILE_LOCATIONS').get('MANUAL_OVERRIDES'), \"w\") as stream:\n writer = csv.writer(stream)\n # Not to happy with the list comp here, must be a better way to get props in order without methods\n writer.writerow([x for ind, x in enumerate(\n list(mapping_record.__dict__.keys())) if 19 > ind > 0])\n writer.writerows(export_set)\n\n def get(self, mapping_category, mapping_type, db_name, schema_name,\n table_name, column_name=None, fuzzy_col_match=False):\n return [m for m in self.mappings\n if m.status != 'UNMAPPED'\n and m.mapping_category == mapping_category\n and m.mapping_type == mapping_type\n and m.src_database.casefold() == db_name.casefold()\n and m.src_schema.casefold() == schema_name.casefold()\n and m.src_table.casefold() == table_name.casefold()\n and (column_name is None or (not (fuzzy_col_match)\n and m.src_column.casefold() == column_name.casefold())\n or (fuzzy_col_match\n and MigrationUtils.fuzzy_strip(m.src_column).casefold() ==\n MigrationUtils.fuzzy_strip(column_name).casefold()))\n ]\n\n def table_available_for_matching(self, mapping_category, db_name, schema_name, table_name):\n return len([m for m in self.mappings\n if m.mapping_type == 'TABLE'\n and m.mapping_category == mapping_category\n and m.tar_database == db_name\n and m.tar_schema == schema_name\n and m.tar_table == table_name]) == 0\n\n def generate_report(self, ddl_mapping_time, src_stats, tar_stats):\n report = comparison_report(self._general_config)\n\n report.add_report_element(report_section_title(\n 'Model Comparison - Validation Summary', True))\n\n # Comparison Summary Table\n report.add_report_element(table_comparison_summary(\n title=\"DDL Comparison Summary\",\n caption=f\"All elements validated in {str(round(ddl_mapping_time, 2))} seconds\",\n headers=[\"Elements\", \"Source\", \"Target\", \"Total\"],\n data=[\n [\"Number of databases\", src_stats['db_cnt'], tar_stats['db_cnt'], src_stats['db_cnt'] +\n tar_stats['db_cnt']],\n [\"Number of schemas\", src_stats['sch_cnt'], tar_stats['sch_cnt'], src_stats['sch_cnt'] +\n tar_stats['sch_cnt']],\n [\"Number of tables\", src_stats['tbl_cnt'], tar_stats['tbl_cnt'], src_stats['tbl_cnt'] +\n tar_stats['tbl_cnt']],\n ]\n ))\n\n # --------------------------------------------------------------------------------------------------\n # Ranking explained:\n # 100.0 : Perfect match: same table name (case insensitive),\n # same columns (fuzzy matched) and same number of columns\n # 75.0 - 85.0 : Different table names, same columns (fuzzy matched) and same number of columns.\n # The closer the number is to 85, the more similar the two table names are\n # 50.0 : Same table name (case insensitive), source columns are present in target table,\n # but target table has additional columns\n # 25.0 - 35.0 : Different table names, source columns are present in target table,\n # but target table has additional columns. The closer the number is to 35, the more\n # similar the two table names are\n # 0.0 : No match found\n # --------------------------------------------------------------------------------------------------\n\n report_clarification = f\"\"\"\nThe fuzzy matching process used in the mapping process produced the table mappings displayed above.\nIncorrect mappings can be overridden in the mapping overrides file, which can be found here:\n{os.getcwd()}/{self._general_config[\"FILE_LOCATIONS\"][\"MANUAL_OVERRIDES\"][2:]}\n\nScoring explained:\n\n Score │ Explanation\n════════════╪══════════════════════════════════════════════════════════════════════════════════════════\n 100.0 │ Perfect match:\n │ - Same table name (case insensitive)\n │ - Same columns (fuzzy matched) and same number of columns\n75.0 - 85.0 │ - Different table names\n │ - Same columns (fuzzy matched) and same number of columns\n │ - The closer the number is to 85, the more similar the two table names are\n 50.0 │ - Same table name (case insensitive)\n │ - Source columns are present in target table, but target table has additional columns\n25.0 - 35.0 │ - Different table names\n │ - Source columns are present in target table, but target table has additional columns\n │ - The closer the number is to 35, the more similar the two table names are\n 0.0 │ No suitable match found\n\"\"\"\n report.add_report_element(\n table_current_mappings(\n title=\"Projected mappings\",\n clarification=report_clarification,\n headers=[\"Source FQN\", \"Target FQN\", \"Score\"],\n data=[[f\"{m.src_database}.{m.src_schema}.{m.src_table}\"\n if m.src_database is\n not None and m.src_schema is\n not None and m.src_table is not None else \"---No suitable match found---\",\n f\"{m.tar_database}.{m.tar_schema}.{m.tar_table}\"\n if m.tar_database is\n not None and m.tar_schema is\n not None and m.tar_table is not None else \"---No suitable match found---\",\n float(\"{:.2f}\".format(float(m.rank) if m.rank is not None and m.rank != '' else 0))]\n for m in self.mappings if m.mapping_type == 'TABLE']))\n\n totals = dict(\n sorted(\n {**{'2-LOW': 0, '1-SEVERE': 0},\n **dict(Counter([c.severity for t in self.notifications for c in self.notifications[t]]))}.items()))\n\n # Issue Summary\n issue_summary_data = []\n notification_data = dict(sorted(Counter(\n [f\"{c.severity} - {c.classification}\" for t in self.notifications for c in self.notifications[t]\n if c.severity != '0-INFO']).items()))\n total_count = 0\n for n in notification_data:\n issue_summary_data.append([n, notification_data[n]])\n total_count += notification_data[n]\n\n report.add_report_element(table_issue_summary(\n title=\"Summary of issues detected\",\n headers=['Issue', 'Count'],\n data=issue_summary_data,\n footers=['TOTAL', total_count],\n msg_if_empty=\"\\n\" +\n \"**************************************************************************************************\\n\" +\n \"* SUCCESS: Source and target model have been successfully validated and no issues have been found.\\n\"\n \"**************************************************************************************************\\n\"\n ))\n\n # Table Issue Summary\n issue_summary_data = []\n for table in self.notifications:\n cnts = dict(sorted({**{'2-LOW': 0, '1-SEVERE': 0}, **\n dict(Counter([c.severity for c in self.notifications[table]]))}.items()))\n\n if cnts['2-LOW'] + cnts['1-SEVERE'] > 0:\n issue_summary_data.append(\n [table, cnts['1-SEVERE'], cnts['2-LOW']])\n\n report.add_report_element(table_issue_summary(\n title=\"Summary of table issues detected\",\n headers=['Qualified Table Name', 'Severe Issues', 'Minor Issues'],\n data=issue_summary_data,\n footers=['TOTAL', totals['1-SEVERE'], totals['2-LOW']],\n msg_if_empty=\"\\n\" +\n \"**************************************************************************************************\\n\" +\n \"* SUCCESS: Source and target model have been successfully validated and no issues have been found.\\n\"\n \"**************************************************************************************************\\n\"\n ))\n\n report.add_report_element(report_section_title(\n 'Model Comparison - Detailed Results per table', True))\n\n for table in self.notifications:\n cnts = dict(sorted({**{'2-LOW': 0, '1-SEVERE': 0}, **\n dict(Counter([c.severity for c in self.notifications[table]]))}.items()))\n\n # Table issue count\n report.add_report_element(report_section_title(\n f'Detailed validation information for {table}'))\n\n report.add_report_element(table_issue_detail(\n headers=[\"Qualified Table Name\",\n \"Severe Issues\", \"Minor Issues\"],\n data=[[table, cnts['1-SEVERE'], cnts['2-LOW']]]))\n\n # Table Issue List\n issue_list_data = sorted(\n [c for c in self.notifications[table] if c.severity != '0-INFO'],\n key=lambda d: d.severity)\n issue_data = []\n for cmt in issue_list_data:\n issue_data.append([cmt.severity, cmt.msg])\n\n report.add_report_element(table_issue_list(\n headers=[\"Issue Severity\", \"Issue Description\"],\n data=issue_data,\n msg_if_empty=\"No issues have been reported on this table\"\n ))\n\n report_clarification = \"\"\"\nAbove we listed any additional information relevant to the mapping process:\nMatching candidates : The list of tables identified as possible matching candidates with their mapping score.\n The best candidate will be marked.\nSimilar tables : These are similar tables to the source table, which did not score high enough to be a\n matching candidate. This gives insight into possible other matches if no proper match has\n been found\nSimilarity Percent : How similar are the two tables in terms of columns.\n 100% means all the columns of the source table appear in the target table\n (but target might have more)\n\"\"\"\n # Table Info List\n info_list_data = []\n for cmt in [c for c in self.notifications[table] if c.severity == '0-INFO']:\n info_list_data.append([cmt.msg])\n report.add_report_element(table_info_list(\n headers=[\"Additional Information\"],\n clarification=report_clarification,\n data=info_list_data,\n msg_if_empty=\"No additional information available for this table\"\n ))\n\n # Used files table\n report.add_report_element(table_file_list(\n title=\"Files processed\",\n headers=[\"File Type\", \"File Location\"],\n data=[\n [f\"Source Model Files ({len(self._source_files_processed)})\", \"\\n\".join(\n self._source_files_processed)],\n [f\"Target Model Files ({len(self._target_files_processed)})\", \"\\n\".join(\n self._target_files_processed)],\n [\"Source Business Model\", self._general_config.get(\n 'FILE_LOCATIONS').get('BUSINESS_MODEL_FOLDER')]\n ]\n ))\n\n # Output settings to report\n fmt_settings = [[cat, setting, self._general_config[cat][setting]] for cat in self._general_config if cat not in [\n 'DELTA_MIGRATION', 'TEMPLATES'] for setting in self._general_config[cat]]\n report.add_report_element(table_setting_list(\n title=\"Migration Tools Configuration\",\n headers=[\"Category\", \"Parameter\", \"Setting\"],\n data=fmt_settings,\n force_justification=['left', 'left', 'left']\n ))\n\n report.to_file()\n\n def has_issues(self):\n return len([c for t in self.notifications for c in self.notifications[t]\n if c.severity in ['2-LOW', '1-SEVERE']])\n\n\n# ======================================================================================================================\n# CLASS connection_migrator\n# ======================================================================================================================\n\nclass connection_migrator:\n source_model = None\n target_model = None\n mapping_details = None\n\n def __init__(self, general_config): # , project_name):\n self._general_config = general_config\n\n # Validate platforms\n self.check_platform_supported(\n general_config[\"MIGRATION\"][\"SOURCE_PLATFORM\"])\n self.check_platform_supported(\n general_config[\"MIGRATION\"][\"TARGET_PLATFORM\"])\n\n self._source_config = toml.load(\n f\"config/{self._general_config['MIGRATION']['SOURCE_PLATFORM'].casefold()}.toml\")\n self._target_config = toml.load(\n f\"config/{self._general_config['MIGRATION']['TARGET_PLATFORM'].casefold()}.toml\")\n\n # Load the source dbschema model\n self.source_model = dbschema_model(self._general_config.get('FILE_LOCATIONS').get(\n 'SOURCE_MODEL_FOLDER'), self._general_config, self._source_config, self._target_config)\n # Load the target dbschema model\n self.target_model = dbschema_model(self._general_config.get('FILE_LOCATIONS').get(\n 'TARGET_MODEL_FOLDER'), self._general_config, self._source_config, self._target_config)\n\n # Debugging\n # self.source_model.write_to_csv('./projects/cmc/output/falcon.csv')\n # self.target_model.write_to_csv('./projects/cmc/output/redshift.csv')\n\n # Initialise the mapping details\n self.mapping_details = mapping_details(\n self._general_config,\n self._source_config,\n self._target_config,\n self.source_model.cdw,\n self.target_model.cdw,\n self.source_model._source_files_processed,\n self.target_model._source_files_processed)\n\n def check_platform_supported(self, cdp_name):\n if not Path(f\"config/{cdp_name.casefold()}.toml\").is_file():\n raise EDatabaseNotSupported(cdp_name)\n\n def identify_mismatch_candidates(self):\n # Identify source tables for which there are multiple candidates on the\n # target model and there is no table with the same name\n similar_tables = {}\n for db in self.source_model.model:\n for schema in self.source_model.model[db]['schemas']:\n for table in self.source_model.model[db]['schemas'][schema]:\n col_list = [c for d in self.source_model.model if d.lower() == db.lower()\n for s in self.source_model.model[d]['schemas'] if s.lower() == schema.lower()\n for t in self.source_model.model[d]['schemas'][s] if t.lower() == table.lower()\n for c in self.source_model.model[d]['schemas'][s][t]]\n\n tables_with_similar_columns = [\n f\"{tdatabase}.{tschema}.{ttable}\"\n for tdatabase in self.target_model.model\n for tschema in self.target_model.model[tdatabase]['schemas']\n for ttable in self.target_model.model[tdatabase]['schemas'][tschema]\n if [MigrationUtils.fuzzy_strip(c1) for c1 in col_list] ==\n [MigrationUtils.fuzzy_strip(c2)\n for c2 in self.target_model.get_column_list_for_table(tdatabase, tschema, ttable)]]\n if len(tables_with_similar_columns) > 1:\n similar_tables[f\"{db}.{schema}.{table}\"] = tables_with_similar_columns\n\n # Filter all entries where there is no target table with the same name as the source table\n similar_tables_different_names = [\n {\"source\": s, \"target\": similar_tables[s]} for s in similar_tables\n if s.split(\".\")[2] not in [t.split(\".\")[2] for t in similar_tables[s]]]\n\n if len(similar_tables_different_names) > 0:\n msg = \"We have found tables with similar column definitions in the source and target model and no \" + \\\n \"matching names. This might cause the fuzzy matching to match tables incorrectly. The tables \" + \\\n \"with similar column names are:\\n\"\n for t in similar_tables_different_names:\n msg += f\"\\nSource table : {t['source']} \\nTarget tables: {','.join(t['target'])}\"\n\n output_message(msg, \"warning\")\n\n def map_target_to_source_dt(self, source_dt):\n matched_dts = [re.sub(r'\\([^)]*\\)', '', dt) for dt in self._target_config['DATATYPE_MAPPINGS']\n if self._target_config['DATATYPE_MAPPINGS'][dt]['TARGET_TYPE'] == source_dt.upper()]\n if len(matched_dts) == 0:\n # TODO: Can this still happen now we have a data type check at the beginning?\n output_message(\n (f\"Could not map source data type {source_dt} to any target data type on CDW \",\n self._general_config.get('MIGRATION').get('TARGET_PLATFORM')),\n \"critical\")\n else:\n return matched_dts[0]\n\n def compare_models(self):\n # Requirements:\n # - SOURCE_MODEL_FOLDER not empty\n # - TARGET_MODEL_FOLDER not empty\n # - BUSINESS_MODEL_FOLDER exists\n\n if len(os.listdir(self._general_config.get('FILE_LOCATIONS').get('SOURCE_MODEL_FOLDER'))) == 0:\n output_message(\n (\"No source model(s) specified in \",\n self._general_config.get('FILE_LOCATIONS').get('SOURCE_MODEL_FOLDER')), \"error\")\n\n if len(os.listdir(self._general_config.get('FILE_LOCATIONS').get('TARGET_MODEL_FOLDER'))) == 0:\n output_message(\n (\"No target model(s) specified in \",\n self._general_config.get('FILE_LOCATIONS').get('TARGET_MODEL_FOLDER')), \"error\")\n\n model_found = False\n for file in os.listdir(self._general_config.get('FILE_LOCATIONS').get('BUSINESS_MODEL_FOLDER')):\n if file.endswith(\".xls\") or file.endswith(\".tsv\"):\n model_found = True\n break\n\n if not model_found:\n output_message(\n f\"No business model specified in {self._general_config.get('FILE_LOCATIONS').get('BUSINESS_MODEL_FOLDER')}\",\n \"error\")\n else:\n output_message(\"Start comparing data models....\")\n\n job_progress = Progress(\n \"{task.description}\",\n SpinnerColumn(),\n BarColumn(),\n TextColumn(\"[progress.percentage]{task.percentage:>3.0f}%\"),\n )\n\n stats = self.source_model.get_stats()\n\n job_databases = job_progress.add_task(\n \"Validating Databases\", total=stats['db_cnt'])\n job_schemas = job_progress.add_task(\n \"Validating Schemas\", total=stats['sch_cnt'])\n job_tables = job_progress.add_task(\n \"Validating Tables\", total=stats['tbl_cnt'])\n job_columns = job_progress.add_task(\n \"Validating Columns\", total=stats['col_cnt'])\n\n total = sum(task.total for task in job_progress.tasks)\n overall_progress = Progress()\n overall_task = overall_progress.add_task(\n \"All Jobs\", total=int(total))\n\n progress_table = Table.grid()\n progress_table.add_row(\n Panel.fit(\n overall_progress,\n title=\"[b]Overall Validation Progress\",\n subtitle=\"All validation tasks to be executed\",\n border_style=\"green\",\n padding=(\n 2,\n 2)),\n Panel.fit(\n job_progress,\n title=\"[b]Validation Tasks\",\n subtitle=\"Individual validation tasks being executed\",\n border_style=\"red\",\n padding=(\n 1,\n 2)),\n )\n\n st = time.time()\n self.identify_mismatch_candidates()\n\n with Live(progress_table, refresh_per_second=10):\n\n for db in self.source_model.model:\n for s in self.source_model.model[db]['schemas']:\n for t in self.source_model.model[db]['schemas'][s]:\n output_message(\n f\"Processing source table {db}.{s}.{t}...\")\n # Initialise mapping record\n mapping_records = self.mapping_details.get(\n 'DDL', 'TABLE', db, s, t)\n if len(mapping_records) > 0 and mapping_records[0].status == 'OVERRIDE':\n output_message(\n 'Found an override entry for this table')\n table_mapping_record = mapping_records[0]\n db_match = table_mapping_record.tar_database\n s_match = table_mapping_record.tar_schema\n t_match = table_mapping_record.tar_table\n else:\n output_message(\n 'No mapping details available yet')\n table_mapping_record = mapping_record(\n mapping_category=\"DDL\",\n mapping_type=\"TABLE\",\n src_database=db,\n src_schema=s,\n src_table=t)\n\n if table_mapping_record.status != \"OVERRIDE\":\n output_message(\"Trying to fuzzy match table\")\n # Identify tables with the same columns, which has not already been used\n # in another mapping\n db_match = None\n s_match = None\n t_match = None\n matching_tables = self.target_model.table_fuzzy_search(\n db, s, t, self.source_model.get_column_list_for_table(\n db, s, t), self.mapping_details, 'DDL')\n\n if matching_tables:\n output_message(\n f\"Found a match with score: {matching_tables['rank']} => {matching_tables['database']}.{matching_tables['schema']}.{matching_tables['table']}\")\n table_mapping_record.rank = matching_tables['rank']\n db_match = matching_tables['database']\n s_match = matching_tables['schema']\n t_match = matching_tables['table']\n if matching_tables['rank'] == 100:\n table_mapping_record.mark_mapped()\n else:\n self.mapping_details.add_notification(\n E_FUZZY_MATCH_NOTIFICATION(\n f\"{db}.{s}.{t}\", f\"{matching_tables['status_msg']}, please review\"))\n else:\n output_message(\n 'No suitable matching table could be found.')\n self.mapping_details.add_notification(\n E_NO_SUITABLE_MATCH(f\"{db}.{s}.{t}\"))\n\n if (db_match is not None and s_match is not None\n and t_match is not None):\n table_mapping_record.update(\n tar_database=db_match,\n tar_schema=s_match,\n tar_table=t_match)\n\n # Column mapping\n for c in self.source_model.model[db]['schemas'][s][t]:\n col_mapping_records = self.mapping_details.get(\n 'DDL', 'COLUMN', db, s, t, c)\n if len(col_mapping_records) > 0 and col_mapping_records[0].status == 'OVERRIDE':\n column_mapping_record = col_mapping_records[0]\n c_match = column_mapping_record.tar_column\n else:\n column_mapping_record = mapping_record(\n mapping_category=\"DDL\",\n mapping_type=\"COLUMN\",\n src_database=db,\n src_schema=s,\n src_table=t,\n src_column=c,\n src_datatype=self.source_model.model[db]['schemas'][s][t][c].basetype,\n src_datatype_length=(\n self.source_model.model[db]['schemas'][s][t][c].length),\n src_datatype_decimal=(\n self.source_model.model[db]['schemas'][s][t][c].decimal),\n tar_database=db_match,\n tar_schema=s_match,\n tar_table=t_match)\n\n if column_mapping_record.status != 'OVERRIDE':\n c_match = None\n if MigrationUtils.fuzzy_strip(c.casefold()) in [MigrationUtils.fuzzy_strip(\n c.casefold())\n for c in self.target_model.model[db_match]['schemas'][s_match][t_match]]:\n column_mapping_record.update(\n tar_column=c)\n c_match = [\n c1\n for c1 in self.target_model.model[db_match]['schemas'][s_match]\n [t_match]\n if MigrationUtils.fuzzy_strip(c1.casefold()) == MigrationUtils.fuzzy_strip(\n c.casefold())][0]\n else:\n self.mapping_details.add_notification(\n E_COLUMN_NOT_FOUND(f\"{db}.{s}.{t}\", c))\n\n if c_match is not None:\n output_message(\n f\"Checking matched columns for compatibility: {db}.{s}.{t}.{c}:{self.source_model.model[db]['schemas'][s][t][c].format_type(False)}==>{db_match}.{s_match}.{t_match}.{c_match}:{self.target_model.model[db_match]['schemas'][s_match][t_match][c_match].format_type(False)}\")\n\n dt_val = self.source_model.model[db]['schemas'][s][t][c].compare(\n self.target_model.model[db_match]['schemas'][s_match][t_match][c_match])\n\n for dts in dt_val:\n dts.set_table(f\"{db}.{s}.{t}\")\n dts.msg = f\"Column {c}<-->{c_match}: {dts.msg}\"\n self.mapping_details.add_notification(\n dts)\n\n column_mapping_record.update(\n tar_datatype=self.target_model.model[db_match][\n 'schemas'][s_match][t_match][c_match].basetype,\n tar_datatype_length=self.target_model.model[db_match][\n 'schemas'][s_match][t_match][c_match].length,\n tar_datatype_decimal=self.target_model.model[db_match][\n 'schemas'][s_match][t_match][c_match].decimal,\n status=table_mapping_record.status)\n\n self.mapping_details.merge_record(\n column_mapping_record)\n if not overall_progress.finished:\n job_progress.advance(job_columns)\n completed = sum(\n task.completed for task in job_progress.tasks)\n overall_progress.update(\n overall_task, completed=completed)\n\n else:\n # Table does not exist, we are going to export it for confirmation, for ease we are\n # making the suggestion that the target db, schema, table are the same as source\n # (can be edited anyway)\n if self._general_config.get('MODEL_VALIDATION').get('COPY_SOURCE_DEF_WHEN_NOT_FOUND'):\n table_mapping_record.update(\n tar_database=db, tar_schema=s, tar_table=t)\n\n # The table does not exists so we need to generated unmapped entries for\n # all its columns too, we assume here that when the table will exist it\n # will have identical nameing\n for c in self.source_model.model[db]['schemas'][s][t]:\n col_mapping_records = self.mapping_details.get(\n 'DDL', 'TABLE', db, s, t, c)\n if len(col_mapping_records) > 0 and col_mapping_records[0].status == 'OVERRIDE':\n column_mapping_record = col_mapping_records[0]\n else:\n column_mapping_record = mapping_record(\n mapping_category=\"DDL\",\n mapping_type=\"COLUMN\",\n src_database=db,\n src_schema=s,\n src_table=t,\n src_column=c,\n src_datatype=self.source_model.model[db]['schemas'][s][t][c].basetype,\n src_datatype_length=self.source_model.model[\n db]['schemas'][s][t][c].length,\n src_datatype_decimal=self.source_model.model[db]['schemas'][s][t][c].decimal)\n\n if self._general_config.get('MODEL_VALIDATION').get(\n 'COPY_SOURCE_DEF_WHEN_NOT_FOUND'):\n column_mapping_record.update(\n tar_database=db,\n tar_schema=s,\n tar_table=t,\n tar_column=c,\n tar_datatype=self.map_target_to_source_dt(\n self.source_model.model[db]['schemas'][s][t][c].basetype),\n tar_datatype_length=self.source_model.model[\n db]['schemas'][s][t][c].length,\n tar_datatype_decimal=self.source_model.model[db]['schemas'][s][t][c].decimal)\n if column_mapping_record.status != 'OVERRIDE':\n self.mapping_details.merge_record(\n column_mapping_record)\n\n if not overall_progress.finished:\n job_progress.advance(job_columns)\n completed = sum(\n task.completed for task in job_progress.tasks)\n overall_progress.update(\n overall_task, completed=completed)\n\n self.mapping_details.merge_record(\n table_mapping_record)\n\n if not overall_progress.finished:\n job_progress.advance(job_tables)\n completed = sum(\n task.completed for task in job_progress.tasks)\n overall_progress.update(\n overall_task, completed=completed)\n\n if not overall_progress.finished:\n job_progress.advance(job_schemas)\n completed = sum(\n task.completed for task in job_progress.tasks)\n overall_progress.update(\n overall_task, completed=completed)\n\n if not overall_progress.finished:\n job_progress.advance(job_databases)\n completed = sum(\n task.completed for task in job_progress.tasks)\n overall_progress.update(\n overall_task, completed=completed)\n\n et = time.time()\n elapsed_time = round(et - st, 1)\n\n if self.mapping_details.has_issues() > 0:\n issue_text = '\\nThe DDL Comparison raised ' + \\\n f'{self.mapping_details.has_issues()} issues. The mapping overrides file can be found here: ' + \\\n os.getcwd() + \"/\" + \\\n self._general_config[\"FILE_LOCATIONS\"][\"MANUAL_OVERRIDES\"][2:] + '.'\n msg_style = \"error\"\n else:\n issue_text = \"No major issues where detected.\"\n msg_style = \"success\"\n\n status_msg = f\"Data models validated in {elapsed_time} seconds.\\n\" + \\\n \"The validation report can be found here: \" + \\\n os.getcwd() + \"/\" + self._general_config[\"FILE_LOCATIONS\"][\"DDL_COMPARISON_REPORT\"][2:] + \\\n f\"\\n{issue_text}\"\n output_message(status_msg, msg_style)\n\n self.mapping_details.generate_report(\n elapsed_time, self.source_model.get_stats(),\n self.target_model.get_stats())\n self.mapping_details.export()\n\n def migrate_yaml(self):\n if not self.mapping_details.has_issues() > 0:\n connections = connections_yaml(\n self._general_config,\n self._source_config,\n self._target_config,\n self.source_model,\n self.target_model,\n self.mapping_details)\n\n connections.compare_yaml_to_source()\n connections.migrate()\n self.mapping_details.export()\n\n else:\n raise EModelValidationError(\n self.mapping_details.has_issues(), self._general_config)\n\n# TODO: Cover all failure scenarios in myfirstproject\n# TODO: Extra groovy parameters --> template\n","repo_name":"thoughtspot/ps_migration_tools","sub_path":"migration_tools/_connection_migrator.py","file_name":"_connection_migrator.py","file_ext":"py","file_size_in_byte":120106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"1803464350","text":"\n\nimport numpy as np \nimport importlib\nimport signal\nimport json \nimport pprint\nimport pickle \nimport os \nimport glob \nfrom tqdm import tqdm \nfrom queue import Empty \n\ndef dbgp(name,value):\n\tif type(value) is dict:\n\t\tprint('{}'.format(name))\n\t\tfor key_i,value_i in value.items():\n\t\t\tprint('{}:{}'.format(str(key_i),value_i))\n\telse:\n\t\tprint('{}:{}'.format(name,value))\n\ndef load_module(fn):\n\tmodule_dir, module_name = fn.split(\"/\")\n\tmodule_name, _ = module_name.split(\".\")\n\tmodule = importlib.import_module(\"{}.{}\".format(module_dir, module_name))\n\treturn module\t\n\ndef write_sim_result(sim_result_dict,fn):\n\twith open(fn+'.pickle', 'xb') as h:\n\t\tpickle.dump(sim_result_dict, h)\n\ndef load_sim_result(fn):\n\twith open(fn, 'rb') as h:\n\t\tsim_result = pickle.load(h)\n\treturn sim_result\n\ndef write_dataset(dataset,fn):\n\t# with open(fn, 'xb') as h:\n\t# \tpickle.dump(dataset, h)\n\tnp.save(fn,dataset)\n\ndef get_dataset_fn(oracle_name,l,robot=0):\n\t# return \"../current/data/{}_l{}_i{}.pickle\".format(oracle,l,robot)\n\treturn \"../current/data/{}_l{}_i{}.npy\".format(oracle_name,l,robot)\n\n# def get_oracle_fn(oracle_name,l,robot=0):\n\t# return \"../current/models/model_{}_l{}_i{}.pt\".format(oracle_name,l,robot)\n\ndef get_oracle_fn(l,num_robots):\n\tvalue_oracle_path = \"../current/models/model_value_l{}.pt\".format(l)\n\tpolicy_oracle_paths = []\n\tfor i in range(num_robots):\n\t\tpolicy_oracle_paths.append(\"../current/models/model_policy_l{}_i{}.pt\".format(l,i))\n\treturn value_oracle_path, policy_oracle_paths\n\n\n\ndef format_dir(clean_dirnames=[]):\n\tdirnames = [\"plots\",\"data\",\"models\"]\n\tfor dirname in dirnames:\n\t\tpath = os.path.join(os.getcwd(),\"../current/{}\".format(dirname))\n\t\tos.makedirs(path,exist_ok=True)\n\tfor dirname in clean_dirnames:\n\t\tpath = os.path.join(os.getcwd(),\"../current/{}\".format(dirname))\n\t\tfor file in glob.glob(path + \"/*\"):\n\t\t\tos.remove(file)\n\ndef sample_vector(lims,damp=0.0):\n\t# from cube\n\tdim = lims.shape[0]\n\tx = np.zeros((dim,1))\n\tfor i in range(dim):\n\t\tx[i] = lims[i,0] + np.random.uniform(damp,1-damp)*(lims[i,1] - lims[i,0])\n\treturn x\n\ndef contains(vector,lims):\n\treturn (vector[:,0] >= lims[:,0]).all() and (vector[:,0] <= lims[:,1]).all()\n\ndef get_temp_fn(dirname,i):\n\treturn \"{}/temp_{}.npy\".format(dirname,i)\n\n\ndef init_tqdm(rank,total):\n\tpbar = None \n\tif rank == 0:\n\t\tpbar = tqdm(total=total)\n\treturn pbar\n\ndef update_tqdm(rank,total_per_worker,queue,pbar):\n\tif rank == 0:\n\t\tcount = total_per_worker\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tcount += queue.get_nowait()\n\t\texcept Empty:\n\t\t\tpass\n\t\tpbar.update(count)\n\telse:\n\t\tqueue.put_nowait(total_per_worker)\n","repo_name":"bpriviere/decision_making","sub_path":"code/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"81"} +{"seq_id":"14639576671","text":"l1, l2 = [], []\nfor _ in range(3):\n X, Y = map(int, input().split())\n l1.append(X)\n l2.append(Y)\n\nfor i, j in zip(l1, l2):\n if l1.count(i) == 1: x = i\n if l2.count(j) == 1: y = j\n\nprint(x, y)\n\n\n\n\n\"\"\"\n5 5\n5 7\n7 5\n\"\"\"","repo_name":"hugehoo/problem-solving","sub_path":"2021/10DEC 네번째점.py","file_name":"10DEC 네번째점.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"36045270583","text":"def leiaInt(msg):\r\n while True:\r\n try:\r\n inteiro = int(input(msg))\r\n except KeyboardInterrupt:\r\n print(f'\\033[31mUsuário preferiu não digitar esse número.')\r\n return 0\r\n except (ValueError, TypeError):\r\n print('\\033[31mERRO: por favor digite um número inteiro válido!\\033[m')\r\n continue\r\n else:\r\n return inteiro\r\n\r\n\r\ndef linha(tam=42):\r\n print('-' * tam)\r\n\r\n\r\ndef cabeçalho(txt):\r\n linha(40)\r\n print(f'{txt:^40}')\r\n linha(40)\r\n\r\n\r\n# noinspection PyBroadException\r\ndef menu(lista):\r\n cabeçalho('MENU PRINCIPAL')\r\n cont = 1\r\n for item in lista:\r\n print(f'\\033[36m{cont}\\033[m -\\033[30m {item}\\033[m')\r\n cont += 1\r\n linha(40)\r\n opc = leiaInt('\\033[32mSua opção: \\033[m')\r\n return opc\r\n\r\n","repo_name":"GuiPolezi/Cursoemvideo-Python","sub_path":"Exercícios/ex115/lib/interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"39479588115","text":"from usuario import *\nfrom medico import *\n#from Apppaciente import nombre,i,cons\n\n\nusuarios_registrados = {\n \"Jose\": \"12345\",\n \"Rodriguez\": \"12345\",\n \"Miguel\": \"12345\",\n \"Ana\": \"12345\",\n \n }\ninicio_sesion = usuario(usuarios_registrados)\n\nmi_agenda = medico()\n\ndef mostrar_menu():\n print(\"----- Menú -----\")\n print(\"1.iniciar sesion\")\n print(\"2. Agendar día\")\n print(\"3. Mostrar días agendados\")\n print(\"4.Consultar nombre del paciente\")\n print(\"5.Consultar motivo de la cita\")\n print(\"6.Consultario de la cita\")\n print(\"8. Salir\")\n \ncontinuar=True \n\nwhile continuar:\n print(\"\")\n mostrar_menu()\n opcion = input(\"Ingrese el número de la opción deseada: \")\n print(\"\")\n\n if opcion == \"1\":\n nombre=[]\n usuario_ingresado = input(\"Ingresa tu nombre de usuario: \")\n contrasena_ingresada = input(\"Ingresa tu contraseña: \")\n nombre.append(usuario_ingresado)","repo_name":"Jessicapaola23/mejoramiento_trivi-o","sub_path":"CodificacionDiagrama/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12891980424","text":"from IPython import embed\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm, UserChangeForm, PasswordChangeForm # UserCreationForm : 회원가입 폼, AuthenticationForm : 인증과 관련된 form\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth import update_session_auth_hash\nfrom .forms import CustomUserChangeForm, CustomUserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import get_user_model # User 클래스 사용하려면 이거 불러와야해!!\n\n# Create your views here.\ndef signup(request):\n if request.user.is_authenticated:\n return redirect('articles:index')\n if request.method == 'POST':\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n auth_login(request, user) # 회원가입에 성공했으면 로그인 바로 시켜주는 코드\n return redirect('articles:index')\n else:\n form = CustomUserCreationForm()\n context = {\n 'form' : form\n }\n return render(request, 'accounts/form.html', context)\n\ndef login(request):\n if request.user.is_authenticated:\n return redirect('articles:index')\n if request.method == 'POST':\n form = AuthenticationForm(request, request.POST)\n if form.is_valid():\n # 로그인\n user = form.get_user() # 현재 valid한 User를 찾아서 준다.\n auth_login(request, user) # request에 login관련 정보가 다 들어있다. / auth_login() : 로그인시켜주는 함수\n # return redirect('articles:index')\n return redirect(request.GET.get('next') or 'articles:index') # next : 이전 요청을 받기 위해서\n else:\n form = AuthenticationForm()\n context = {\n 'form' : form\n }\n return render(request, 'accounts/login.html', context)\n\ndef logout(request):\n auth_logout(request)\n return redirect('articles:index')\n\n@login_required\ndef update(request):\n if request.method == 'POST':\n form = CustomUserChangeForm(request.POST, instance=request.user)\n if form.is_valid():\n form.save()\n return redirect('articles:index')\n else:\n form = CustomUserChangeForm(instance=request.user)\n context = {\n 'form': form,\n }\n return render(request, 'accounts/form.html', context)\n\n@login_required\ndef password_change(request):\n if request.method == 'POST':\n form = PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n form.save()\n update_session_auth_hash(request, form.user) # 비밀번호 update후 로그인 유지하는 함수\n return redirect('articles:index')\n else:\n form = PasswordChangeForm(request.user) # 반드시 첫번째 인자로 user\n context = {\n 'form': form\n }\n return render(request, 'accounts/form.html', context)\n\ndef profile(request, account_pk):\n User = get_user_model()\n user_profile = get_object_or_404(User, pk=account_pk)\n context = {\n 'user_profile': user_profile\n }\n return render(request, 'accounts/profile.html', context)\n\ndef follow(request, account_pk):\n User = get_user_model()\n user_profile = get_object_or_404(User, pk=account_pk)\n if user_profile != request.user:\n if request.user in user_profile.followers.all():\n user_profile.followers.remove(request.user)\n else:\n user_profile.followers.add(request.user)\n return redirect('accounts:profile', account_pk)","repo_name":"JiminLee411/django-crud","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"12136459036","text":"from collections import deque\n\nn = int(input())\n\ncommands = deque(input() for _ in range(n))\nstack = deque()\nwhile commands:\n cmd = commands.popleft().split()\n op = cmd[0]\n if op == \"PUSH\":\n val = cmd[1]\n stack.append(val)\n elif op == \"POP\":\n stack.pop()\n\nwhile stack:\n print(stack.pop())\n","repo_name":"sanqit/text-based-browser","sub_path":"Problems/Stack manipulation/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"73461834826","text":"from datastore.models import APIUser, Receipt, Item, AbsIngredient\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.template.defaultfilters import slugify\nfrom django.utils.timezone import utc\n\n\nfrom decimal import Decimal\nimport random\nfrom datetime import date, timedelta, datetime\n\n\ncomposite_foods = {\n 'Ham Sandwich' : [0.400, 2.80, {'ham':0.1, 'butter':0.01, 'white bread': 80}],\n 'Subway sandwich with turkey': [0.600, 4.90, {'turkey':0.15, 'white bread': 80, 'cheese slices':0.2 , }],\n 'Burrito': [0.450, 5.60, {'corn':0.1, 'beef':0.3, 'red kideny beans':'0.2'}],\n }\n\nstandard_pack = {\n 'butter': [0.25,0.4],\n 'almonds': [0.2],\n 'almond milk':[1,0.5],\n 'cheese slices':[0.15],\n 'chocolate': [0.1, 0.05],\n 'corn': [0.4],\n 'dry pasta': [0.75],\n 'low fat organic milk': [1, 0.8],\n 'non-free range eggs': [0.4],\n 'rice milk': [0.8],\n 'spaghetti sarbonara pre-made meal': [0.3],\n 'eggs': [0.2]\n}\n\nstore_names = ['Migros','Coop','Denner','Lidl','Aldi', 'Alnatura']\n\nclass Command(BaseCommand):\n help = 'Generates shopping data'\n\n\n def add_arguments(self, parser):\n\n parser.add_argument(\n '--count',\n default=10,\n type=int,\n help='Number of users to generate',\n )\n\n def handle(self, *args, **options):\n count = options['count']\n composite_food_probability = 0.4\n past_days = 365\n start_time = date.today() - timedelta(days=past_days)\n all_names = AbsIngredient.objects.all().values_list('display_name', flat=True)[:]\n max_items = 5\n max_50g = 30\n\n created = 0\n users = list(APIUser.objects.filter(email='test@test.com'))\n while created < count:\n rcpt = Receipt()\n rcpt.user = random.choice(users)\n rcpt.shop = random.choice(store_names)\n rcpt.currency = 'CHF'\n rcpt.timestamp = datetime.utcnow() - timedelta(days=random.randint(0,past_days), hours=random.randint(6,18))\n rcpt.save()\n\n # Generate standard, simple foods that are all known as base ingredients\n items = random.randint(1,max_items)\n for index in range(items):\n item = Item()\n item.receipt = rcpt\n item.display_name = random.choice(all_names)\n if item.display_name.lower() in standard_pack:\n item.kg = random.choice(standard_pack[item.display_name.lower()])\n else:\n item.kg = random.randint(1,max_50g) * 0.05\n item.price = random.randint(10,30) * item.kg\n item.save()\n\n self.stdout.write('Created: %s' % rcpt)\n created += 1\n\n","repo_name":"foodprint-tracker/foodprint-tracker-backend","sub_path":"src/datastore/management/commands/generate_shopping.py","file_name":"generate_shopping.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3252637290","text":"#!/usr/bin/env python3\n\n\"\"\"A module for generating a list of condensed hostnames.\n\nA module for generating a list of condensed hostnames, where condensing is\nmoving part of the full hostname into the top-level directory (TLD).\n\nAn example is if the hostname was \"egg\": this could also be represented as\n\"e.gg\", which would form part of a larger URI.\n\"\"\"\n\nimport argparse\n\n\nclass HostnameCondenserError(Exception):\n \"\"\"The generic base exception used by the hostname_condenser module.\"\"\"\n\n\nclass TLDProcessingError(HostnameCondenserError):\n \"\"\"Raise when the list of TLDs cannot be processed.\"\"\"\n\n\ndef get_top_level_domains(file_path):\n \"\"\"Get the top level domains from a specified file.\n\n Args:\n file_path (str): The path to a file containing one top level domain per\n line and optional comments (lines starting with a \"#\").\n\n Returns:\n A list of top level domains.\n\n Raises:\n TLDProcessingError: When top level domains cannot be read from\n file_path.\n \"\"\"\n try:\n with open(file_path, \"r\") as top_level_domains_file_handle:\n top_level_domains = [line.rstrip().lower() for line in\n top_level_domains_file_handle.readlines()\n if not line.startswith(\"#\")]\n return top_level_domains\n except Exception as exc:\n exception_message = \"Could not read TLDs from file: \" + file_path\n raise TLDProcessingError(exception_message) from exc\n\n\ndef get_default_top_level_domains():\n \"\"\"Get the default top level domains from an inbuilt file.\n\n Returns:\n A list of top level domains.\n\n Raises:\n TLDProcessingError: When top level domains cannot be read from the\n inbuilt file.\n \"\"\"\n top_level_domains_file_path = r\"data\\tlds-alpha-by-domain.txt\"\n\n return get_top_level_domains(top_level_domains_file_path)\n\n\ndef condense_hostname(hostname, top_level_domains=None):\n \"\"\"Generate valid condensed hostname/TLD pairs from a hostname.\n\n Args:\n hostname(str): The hostname to condense.\n top_level_domains(iterable): The top-level domains to use when\n condensing the hostname or None to use the inbuilt list.\n\n Returns:\n A list of valid hostname/TLD combinations.\n \"\"\"\n if top_level_domains is None:\n top_level_domains = get_default_top_level_domains()\n\n matching_top_level_domains = {top_level_domain for\n top_level_domain in\n top_level_domains if\n hostname[1:]\n .lower()\n .endswith(top_level_domain)}\n\n condensed_hostnames = {hostname.replace(tld, \"\") + \".\" + tld\n for tld in matching_top_level_domains}\n return sorted(list(condensed_hostnames))\n\nif __name__ == \"__main__\":\n PARSER = argparse.ArgumentParser(description=\"Generate a list of \"\n \"condensed hostnames.\")\n\n PARSER.add_argument(\"hostname\",\n help=\"A hostname to condense.\")\n PARSER.add_argument(\"-tlds\",\n \"--tld_file_path\",\n default=None,\n help=\"The path to a file containing one top level \"\n \"domain per line and optional comments (lines \"\n \"starting with a \\\"#\\\").\")\n\n ARGS = PARSER.parse_args()\n\n if ARGS.tld_file_path is not None:\n TOP_LEVEL_DOMAINS = get_top_level_domains(ARGS.tld_file_path)\n\n CONDENSED_HOSTNAMES = condense_hostname(ARGS.hostname,\n TOP_LEVEL_DOMAINS)\n else:\n CONDENSED_HOSTNAMES = condense_hostname(ARGS.hostname)\n\n if CONDENSED_HOSTNAMES:\n for condensed_hostname in CONDENSED_HOSTNAMES:\n print(condensed_hostname)\n else:\n print(\"No matches found for hostname \\\"{0}\\\"\".format(ARGS.hostname))\n","repo_name":"MattJamesChampion/hostname-condenser","sub_path":"hostname_condenser.py","file_name":"hostname_condenser.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41692912044","text":"# https://www.acmicpc.net/problem/16562\nimport sys\n\ninput = sys.stdin.readline\n\n\ndef find(x, parent):\n if x == parent[x]:\n return x\n\n parent[x] = find(parent[x], parent)\n return parent[x]\n\n\ndef union(x, y, parent):\n x, y = find(x, parent), find(y, parent)\n\n if parent[x] <= parent[y]:\n parent[y] = x\n else:\n parent[x] = y\n\n\nif __name__ == \"__main__\":\n N, M, k = map(int, input().split())\n money = [0] + list(map(int, input().split()))\n tree = list(range(N + 1))\n ans = [0 for _ in range(N + 1)]\n\n for _ in range(M):\n v, w = map(int, input().split())\n union(v, w, tree)\n\n for i in range(len(tree)):\n root = tree[i]\n if not ans[root]:\n ans[root] = money[i]\n continue\n if ans[root] > money[i]:\n ans[root] = money[i]\n continue\n\n if sum(ans) <= k:\n print(sum(ans))\n else:\n print(\"Oh no\")","repo_name":"AlmSmartDoctor/study-2023-03-algorithm-problem-solving","sub_path":"day12/assignment/이재혁/16562.py","file_name":"16562.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"11358957499","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nclass PlotScopeEvaluateLocal:\n\n def __init__(self):\n\n self.singles = pd.read_csv(\"single.csv\", index_col=\"residue number\")\n self.sums = pd.read_csv(\"sumy.csv\", index_col=\"chain\")\n\n def plot_profile(self):\n # not normalized, each value has separate row\n all_singles = self.singles.drop([\"mod_avg\", \"subunit\"], axis=1)\n all_singles['template'], all_singles['type'], all_singles['subunit'] = all_singles['full_name'].str.split('#', 2).str\n all_singles = all_singles.drop([\"full_name\"], axis=1)\n all_singles = all_singles.melt(id_vars=[\"residue\", \"subunit\", \"position\", \"template_ene\", 'template', 'type'])\n all_singles.rename(columns={'value': \"absolute DOPE\", 'variable': 'model'}, inplace=True)\n\n g = sns.FacetGrid(all_singles, col='template', row='type', col_order=['6i53', '6huk', '6hup', '6huo', '6huj', '6hug'],\n row_order=['WT', 'GLY', 'LYS'], aspect=1, height=1.7125)\n g.map(sns.lineplot, \"position\", \"absolute DOPE\", \"subunit\", ci='sd')\n g.map(sns.lineplot, \"position\", \"template_ene\", \"subunit\", palette=sns.color_palette(\"pastel\", 2))\n g.set_titles(\"{col_name}\")\n\n #g.set(xlabel=\"residue\", ylabel=\"absolute DOPE\")\n g.set_axis_labels(\"residue\", \"absolute DOPE\")\n plt.tight_layout()\n\n #g.fig.get_children()[-1].set_bbox_to_anchor((1.008, 1.15, 0, 0))\n\n #g.despine(trim=True)\n g.savefig(\"profile_DOPE.png\", dpi=300)\n\n def plot_sum(self):\n\n # at this stage, each column is for one model, 4 rows for two chains 1 and 2 and DOPE absolute and relative if one receptor type\n n_sums = self.sums.iloc[:, :].div(self.sums.template_ene, axis=0)\n all_sums = pd.concat([self.sums, n_sums], axis=0)\n all_sums = all_sums.drop([\"template_ene\", \"mod_avg\"], axis=1)\n\n # as index had mixed category data, now those are as additional columns\n all_sums['template'], all_sums['type'], all_sums['subunit'] = all_sums.index.str.split('#', 2).str\n all_sums.reset_index(drop=True)\n # tidy up categories for relative values\n rows = int(all_sums.shape[0] / 2)\n all_sums['DOPE'] = ['absolute'] * rows + ['relative'] * rows\n\n # melting to have each measurement in a row, it gives 400 rows, 4 energies for 100 models if one receptor type\n all_sums = all_sums.melt(id_vars=['subunit', 'template', 'DOPE', 'type'])\n all_sums.rename(columns={'variable': 'model'}, inplace=True)\n all_sums[\"model\"] = all_sums[\"model\"].str.slice_replace(0, 6, '')\n\n # pivoting to have all categories as index and two columns of DOPE abs and rel score\n # this give most readable format\n all_sums = all_sums.pivot_table(index=['template', 'type', 'subunit', 'model'], columns='DOPE', values='value')\n print(all_sums)\n\n # reseting to put categories from index into columns, just for easier plotting with seaborn\n all_sums.reset_index(inplace=True)\n sorted = all_sums.sort_values(['type', 'subunit', 'relative', ], ascending=[True, True, False])\n sorted.to_csv('parsed_sums.csv')\n\n g1 = sns.catplot(x=\"type\", y=\"absolute\", hue=\"subunit\", col=\"template\",\n kind=\"violin\", inner=\"quart\", split=True, data=all_sums, aspect=1, height=2.0,\n col_order=['6i53', '6huk', '6hup', '6huo', '6huj', '6hug'], order=['WT', 'GLY', 'LYS'],\n legend=False, col_wrap=4, sharex=False)\n g1.set_axis_labels(\"\", \"absolute DOPE\")\n g1.set_titles(\"{col_name}\")\n g1.set(ylim=(-0.61, -0.49))\n g1.despine(trim=True)\n plt.tight_layout()\n\n g1.savefig(\"violin_absoluteDOPE.png\", dpi=150)\n\n g2 = sns.catplot(x=\"type\", y=\"relative\", hue=\"subunit\", col=\"template\",\n kind=\"violin\", inner=\"quart\", split=True, data=all_sums, aspect=1, height=2.0,\n col_order=['6i53', '6huk', '6hup', '6huo', '6huj', '6hug'], order=['WT', 'GLY', 'LYS'],\n legend=False, col_wrap=4, sharex=False)\n g2.set_axis_labels(\"\", \"relative DOPE\")\n g2.set_titles(\"{col_name}\")\n g2.set(ylim=(0.8, 1.0))\n g2.despine(trim=True)\n plt.tight_layout()\n\n g2.savefig(\"violin_relativeDOPE.png\", dpi=150)\n'''\n g3 = sns.catplot(x=\"model\", y=\"relative\", hue=\"subunit\", row='type', sharex='row', aspect=3,\n kind=\"bar\", data=all_sums)\n g3.set(ylim=(0.85, 1))\n g3.set_xticklabels(rotation=90)\n g3.set_xticklabels(fontsize=10)\n g3.fig.get_children()[-1].set_bbox_to_anchor((0.9, 1.05, 0, 0))\n\n g3.savefig(\"bar_relativeDOPE.png\", dpi=300)\n'''\n\nif __name__ == '__main__':\n\n sns.set_style()\n sns.set_context(\"paper\")\n\n plot = PlotScopeEvaluateLocal()\n\n plot.plot_sum()\n plot.plot_profile()\n\n plt.tight_layout()\n plt.show()\n","repo_name":"michal2am/bioscripts","sub_path":"homology_modeling/model_evaluation/sns_DOPElocal/plot_local_DOPE.py","file_name":"plot_local_DOPE.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"81"} +{"seq_id":"41240402935","text":"import numpy as np \nimport matplotlib.pyplot as plt \nnp.random.seed(1)\n#Prepare datasets\ndata = np.genfromtxt('/home/khanh/Desktop/Deep Learning/data.csv',delimiter=\",\")\n\n#Splitting data\nX_train = data[0:90,0]\ny_train = data[0:90,1]\n\nX_test = data[90:,0]\ny_test = data[90:,1]\n\n#Reshape Training set\nX_train = X_train.reshape((90,1))\ny_train = y_train.reshape((90,1))\n\n#X_extend\none = np.ones((X_train.shape[0],1))\nX_extend = np.concatenate((one,X_train),axis=1)\n\ndef loss_function(A):\n N = X_extend.shape[0]\n return (0.5/N) * np.linalg.norm(np.dot(X_extend,A) - y_train)**2\n\ndef derivative(A): \n N = X_extend.shape[0]\n return (1/N) * np.dot(X_extend.T,np.dot(X_extend,A) - y_train)\n\ndef numeric_derivative(A):\n eps = 1e-3\n g = np.zeros_like(A)\n for i in range(len(A)):\n A_1 = A.copy()\n A_2 = A.copy()\n A_1[i] += eps\n A_2[i] -= eps\n g[i] = (loss_function(A_1) - loss_function(A_2)) / (2 * eps)\n return g \n\ndef checking_derivative(A):\n value1 = derivative(A)\n value2 = numeric_derivative(A)\n if np.linalg.norm(value1 - value2) < 1e-5:\n return True\n else :\n return False\n\nprint(\"Checking Derivative ===>\",checking_derivative(np.random.rand(2,1).reshape(2,1))) \n\ndef Gradient_Descent(A0,learning_rate):\n A = [A0]\n for i in range(100):\n new_A = A[-1] - learning_rate * derivative(A[-1])\n if np.linalg.norm(derivative(new_A)) / len(new_A) < 1e-3:\n break\n A.append(new_A)\n return A[-1]\n\nA_init = np.array([[5],[1]])\nA = Gradient_Descent(A_init,0.01)\n\nW = np.dot(np.linalg.pinv(np.dot(X_extend.T,X_extend)),np.dot(X_extend.T,y_train))\nW_0 = W[0][0]\nW_1 = W[1][0]\nplt.scatter(data[:,0],data[:,1],c='b',s=50)\nx = np.linspace(0,100,3,endpoint = True)\ny = W_0 + W_1 * x \nplt.plot(x,y,c='y',linewidth = 5)\nplt.show()\n#print(A[0][0])\n\n#print(W)\n","repo_name":"phanvinhkhanh1997/MachineLearning","sub_path":"Linear_Regression/Linear_Regression_with_BGD.py","file_name":"Linear_Regression_with_BGD.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3767121217","text":"\"\"\"Views for error pages.\"\"\"\n\nfrom django.views.generic import TemplateView\n\n\nclass BadRequest400(TemplateView):\n \"\"\"A 400 page.\"\"\"\n\n template_name = \"frontend/400.html\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Show the 400 page.\"\"\"\n return self.render_to_response(\n self.get_context_data(**kwargs), status=400\n )\n\n\nclass PermissionDenied403(TemplateView):\n \"\"\"A 403 page.\"\"\"\n\n template_name = \"frontend/403.html\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Show the 403 page.\"\"\"\n return self.render_to_response(\n self.get_context_data(**kwargs), status=403\n )\n\n\nclass PageNotFound404(TemplateView):\n \"\"\"A 404 page.\"\"\"\n\n template_name = \"frontend/404.html\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Show the 404 page.\"\"\"\n return self.render_to_response(\n self.get_context_data(**kwargs), status=404\n )\n\n\nclass ServerError500(TemplateView):\n \"\"\"A 500 page.\"\"\"\n\n template_name = \"frontend/500.html\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Show the 500 page.\"\"\"\n return self.render_to_response(\n self.get_context_data(**kwargs), status=500\n )\n","repo_name":"saltant-org/saltant","sub_path":"frontend/views/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"40372218804","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nADDRESS_FIELDS = (\n 'street', 'street2', 'zip', 'city', 'city_id', 'subdistrict_id', 'district_id', 'state_id', 'country_id'\n)\n\nclass Employee(models.Model):\n _name = 'mce_hr.employee'\n _description = 'Employee'\n _order = 'name'\n\n name = fields.Char(string=\"Employee Name\", required=True)\n register_id = fields.Char(string=\"Employee ID\", required=True)\n birth_date = fields.Date(string=\"Birth date\", required=True)\n joined_date = fields.Date(string=\"Joined date\", required=True)\n\n country_id = fields.Many2one('res.country', string='Country', ondelete='restrict',\n required=True)\n state_id = fields.Many2one(\"res.country.state\", string='State', ondelete='restrict',\n domain=\"[('country_id', '=?', country_id)]\", required=True)\n city_id = fields.Many2one(\"res.country.city\", string='City', ondelete='restrict',\n required=True)\n district_id = fields.Many2one(\"res.country.district\", string='District',\n ondelete='restrict', required=False)\n subdistrict_id = fields.Many2one(\"res.country.subdistrict\", string='Sub District',\n ondelete='restrict', required=False)\n\n street = fields.Char(string=\"Address Line 1\", required=True)\n street2 = fields.Char(string=\"Address Line 2\", required=False)\n zip = fields.Char(change_default=True, required=False)\n city = fields.Char(\"City Name\")\n contact_address = fields.Char(compute='_compute_contact_address', string='Complete Address')\n\n leave_ids = fields.One2many('mce_hr.leave', 'employee_id', string='Leave History',\n readonly=True, copy=True, auto_join=True, domain=[('state', '=', 'done')])\n leave_count = fields.Integer(readonly=True, store=True, compute='_compute_leave_count')\n leave_balance = fields.Integer(readonly=True, store=True, compute='_compute_leave_balance')\n\n @api.depends('leave_ids')\n def _compute_leave_count(self):\n for employee in self:\n employee.leave_count = sum(employee.leave_ids.mapped('duration'))\n\n @api.depends('leave_ids', 'joined_date')\n def _compute_leave_balance(self):\n for employee in self:\n employee.leave_balance = self.env['mce_hr.leave'].get_remaining_leave(employee)\n\n def action_leave_balance(self):\n return True\n\n @api.depends(lambda self: self._display_address_depends())\n def _compute_contact_address(self):\n for employee in self:\n employee.contact_address = employee._display_address()\n\n def _display_address_depends(self):\n # field dependencies of method _display_address()\n return self._formatting_address_fields() + [\n 'country_id', 'state_id',\n ]\n\n @api.model\n def _formatting_address_fields(self):\n \"\"\"Returns the list of address fields usable to format addresses.\"\"\"\n return self._address_fields()\n\n @api.onchange('country_id')\n def onchange_employee_country(self):\n domain = {'state_id': [], 'city_id': [], 'district_id': [], 'subdistrict_id': []}\n if self.country_id:\n list_domain = [('country_id', '=', self.country_id.id)]\n domain = {'state_id': list_domain, 'city_id': list_domain, 'district_id': list_domain,\n 'subdistrict_id': list_domain}\n if (self.state_id and self.state_id.country_id) and (self.state_id.country_id.id != self.country_id.id):\n self.state_id = False\n if (self.city_id and self.city_id.country_id) and (self.city_id.country_id.id != self.country_id.id):\n self.city_id = False\n if (self.district_id and self.district_id.country_id) and (\n self.district_id.country_id.id != self.country_id.id):\n self.district_id = False\n if (self.subdistrict_id and self.subdistrict_id.country_id) and (\n self.subdistrict_id.country_id.id != self.country_id.id):\n self.subdistrict_id = False\n return {'domain': domain}\n\n @api.onchange('state_id')\n def onchange_employee_state(self):\n domain = {'city_id': [], 'district_id': [], 'subdistrict_id': []}\n if self.state_id:\n list_domain = [('state_id', '=', self.state_id.id)]\n domain = {'city_id': list_domain, 'district_id': list_domain, 'subdistrict_id': list_domain}\n if (self.subdistrict_id and self.subdistrict_id.state_id) and (\n self.subdistrict_id.state_id.id != self.state_id.id):\n self.subdistrict_id = False\n if (self.district_id and self.district_id.state_id) and (self.district_id.state_id.id != self.state_id.id):\n self.district_id = False\n if (self.city_id and self.city_id.state_id) and (self.city_id.state_id.id != self.state_id.id):\n self.city_id = False\n self.country_id = self.state_id.country_id\n return {'domain': domain}\n\n @api.onchange('city_id')\n def onchange_employee_city(self):\n domain = {'district_id': [], 'subdistrict_id': []}\n if self.city_id:\n list_domain = [('city_id', '=', self.city_id.id)]\n domain = {'district_id': list_domain, 'subdistrict_id': list_domain}\n if (self.district_id and self.district_id.city_id) and (self.district_id.city_id.id != self.city_id.id):\n self.district_id = False\n self.city = self.city_id.name\n self.state_id = self.city_id.state_id\n self.country_id = self.city_id.country_id\n\n return {'domain': domain}\n\n @api.onchange('district_id')\n def onchange_employee_district(self):\n domain = {'subdistrict_id': []}\n if self.district_id:\n list_domain = [('district_id', '=', self.district_id.id)]\n domain = {'subdistrict_id': list_domain}\n if (self.subdistrict_id and self.subdistrict_id.district_id) and (\n self.subdistrict_id.district_id != self.district_id):\n self.subdistrict_id = False\n self.city_id = self.district_id.city_id\n self.state_id = self.district_id.state_id\n self.country_id = self.district_id.country_id\n return {'domain': domain}\n\n @api.onchange('subdistrict_id')\n def onchange_employee_subdistrict(self):\n if self.subdistrict_id:\n self.district_id = self.subdistrict_id.district_id\n self.city_id = self.subdistrict_id.city_id\n self.state_id = self.subdistrict_id.state_id\n self.country_id = self.subdistrict_id.country_id\n\n def _display_address(self):\n address_format_txt = '%(street)s\\n%(street2)s\\n%(subdistrict_name)s, %(district_name)s\\n%(city_name)s - %(' \\\n 'state_name)s\\n%(country_name)s\\n%(zip)s'\n address_format = address_format_txt or self.country_id.address_format\n args = {\n 'country_code': self.country_id.code or '',\n 'country_name': self.country_id.name or '',\n 'state_code': self.state_id.code or '',\n 'state_name': self.state_id.name or '',\n 'city_name': self.city_id and self.city_id.name or self.city or '',\n 'district_name': self.district_id.name or '',\n 'subdistrict_name': self.subdistrict_id.name or ''\n }\n for field in self._address_fields():\n args[field] = getattr(self, field) or ''\n if not self.subdistrict_id or not self.district_id:\n address_format = address_format.replace(',', '')\n if not (self.city or self.city_id) or not self.state_id:\n address_format = address_format.replace('-', '')\n return address_format % args\n\n @api.model\n def _address_fields(self):\n \"\"\" Returns the list of address fields that are synced from the parent\n when the `use_parent_address` flag is set. \"\"\"\n return list(ADDRESS_FIELDS)\n","repo_name":"fananimi/mce_hrs","sub_path":"models/mce_employee.py","file_name":"mce_employee.py","file_ext":"py","file_size_in_byte":7951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"8009947841","text":"from scrabble_analytics.models import Words, WordsSet\nfrom scrabble_analytics.utils import get_score\n\nf_db = r'.\\scrabble_analytics\\ref_data\\database_liste_de_mots.txt'\n\n\nWords.objects.all().delete()\nWordsSet.objects.all().delete()\n\ninsert_word_list = []\ni = 0\nstep = 0\ntotal = 406714\n\nwith open(f_db,'r') as f:\n for line in f:\n i += 1\n data = line[:-1]\n set_data = ''.join(sorted(set([l.upper() for l in data])))\n q_ws = WordsSet.objects.filter(Wordset_name=set_data)\n if q_ws.exists():\n insert_word_list.append(Words(Word_name=data,\n Score=get_score(data),\n Word_set= q_ws.first(),\n Word_name_len=len(data)\n ))\n else:\n q_wordsSet = WordsSet(Wordset_name=set_data)\n q_wordsSet.save()\n insert_word_list.append(Words(Word_name=data,\n Score=get_score(data),\n Word_set= q_wordsSet,\n Word_name_len=len(data)\n ))\n if i >= step:\n print(str(i/total*100) + ' %')\n step += total / 20\n\nWords.objects.bulk_create(insert_word_list)\n\n\n#! To execute it:\n#! python manage.py shell\n#! exec(open(r'.\\scrabble_analytics\\ref_data\\ref_data_words_v3.py').read())\n","repo_name":"paulfaucheux/Scrabble","sub_path":"scrabble_analytics/ref_data/ref_data_words_v3.py","file_name":"ref_data_words_v3.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21271082948","text":"from random import randint\n\nif __name__ == '__main__':\n #no of cells in middle row\n for k in range(1, 30):\n mid = k\n #no of test cases\n for _ in range(10):\n print (mid)\n for i in range(1, mid):\n for j in range(i):\n print (str(randint(-50,50)) + ' ', end='')\n print ()\n for i in range(mid):\n for j in range(mid-i):\n print (str(randint(-50,50)) + ' ', end='')\n print ()\n print (0)\n \n","repo_name":"ni9elf/CompetitiveProgramming","sub_path":"UVa/11002/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"3337105885","text":"# -*- coding: utf-8 -*-\nimport eyed3\nimport csv\n#Remove logs\neyed3.log.setLevel(\"ERROR\")\n\nsongDict = {}\nwith open('songMetaData.csv','r') as csvfile:\n\treader = csv.reader(csvfile, delimiter = \",\")\n\tfor row in reader:\n\t\tname = row[1].strip()\n\t\tsongDict[name] = [row[0],name,row[2]]\n\n\ndef isMono(filename):\n\taudiofile = eyed3.load(filename)\n\treturn audiofile.info.mode == 'Mono'\n\ndef getGenre(filename):\n\ttry:\n\t\treturn songDict[filename][2]\n\texcept:\n\t\treturn \"Unknown\"\n\n\n\n\t","repo_name":"RogueTechTeam/MusicLearning","sub_path":"audioFilesTools.py","file_name":"audioFilesTools.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"72737806666","text":"from random import randint\nfrom time import sleep\nfrom operator import itemgetter\njogadas = {}\nfor c in range(1,5):\n a = randint(1, 6)\n print(f'O PLAYER {c} tirou {a} no dado')\n jogadas[f'PLAYER {c}'] = a\n sleep(0.5)\nranking = []\nranking = sorted(jogadas.items(), key=itemgetter(1), reverse=True)\nprint('=-'*30)\nfor c in range(0,4):\n sleep(0.5)\n print(f' O {ranking[c][0]} ficou em {c + 1}° com {ranking[c][1]}')","repo_name":"costagguilherme/python-desafios","sub_path":"exercicios/dicionarios/desafio91.py","file_name":"desafio91.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5353190863","text":"from sdk.api.message import Message\nfrom sdk.exceptions import CoolsmsException\n\ndef sendMsg(deg): \n api_key = \"NCSYHZ5RDHYSX6MT\"\n api_secret = \"C1QTL9MBI05LYDKFNZO1STHSGCPLUCWT\"\n params = dict()\n params['type'] = 'sms'\n params['to'] = '01086461870' #받는번호\n params['from'] = '01086461870' #보내는번호\n params['text'] = '배전반 A에서 고온이 감지되었습니다! 배전반 A를 확인해주세요! 배전반A 온도: %.1f도'%(deg) #문자 내용\n cool = Message(api_key,api_secret)\n response =cool.send(params)\n return response \n","repo_name":"kyunha-kim/Thermal_Project","sub_path":"src/coolsms_API.py","file_name":"coolsms_API.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"34375097917","text":"import math\nimport random\nimport pyrosim\nfrom robot import ROBOT\n\nclass INDIVIDUAL:\n\tdef __init__(self):\n\t\tself.genome = random.random()*2-1\n\t\tself.fitness = 0\n\t\t\n\n\tdef Evaluate(self,pb):\n\t\tself.sim = pyrosim.Simulator(window_size = (1500,1500), play_paused=False, eval_time=500, play_blind=pb)\n\t\tself.robot=ROBOT(self.sim, self.genome)\n\t\tself.sim.start()\n\t\tself.sim.wait_to_finish()\n\t\tself.y = self.sim.get_sensor_data(sensor_id = self.robot.P4, svi=1)\n\t\tself.fitness = self.y[-1]\n\t\n\tdef Mutate(self):\n\t\tself.genome = random.gauss(self.genome, math.fabs(self.genome))\n\t\n","repo_name":"Rahavee/pyrosims","sub_path":"individual.py","file_name":"individual.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"10331261909","text":"\"\"\"\nType annotations for stepfunctions service client.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html)\n\nUsage::\n\n ```python\n import boto3\n from mypy_boto3_stepfunctions import SFNClient\n\n client: SFNClient = boto3.client(\"stepfunctions\")\n ```\n\"\"\"\nimport sys\nfrom typing import Any, Dict, List, Type, overload\n\nfrom botocore.client import BaseClient, ClientMeta\n\nfrom .literals import ExecutionStatusType, StateMachineTypeType\nfrom .paginator import (\n GetExecutionHistoryPaginator,\n ListActivitiesPaginator,\n ListExecutionsPaginator,\n ListMapRunsPaginator,\n ListStateMachinesPaginator,\n)\nfrom .type_defs import (\n CreateActivityOutputTypeDef,\n CreateStateMachineOutputTypeDef,\n DescribeActivityOutputTypeDef,\n DescribeExecutionOutputTypeDef,\n DescribeMapRunOutputTypeDef,\n DescribeStateMachineForExecutionOutputTypeDef,\n DescribeStateMachineOutputTypeDef,\n GetActivityTaskOutputTypeDef,\n GetExecutionHistoryOutputTypeDef,\n ListActivitiesOutputTypeDef,\n ListExecutionsOutputTypeDef,\n ListMapRunsOutputTypeDef,\n ListStateMachinesOutputTypeDef,\n ListTagsForResourceOutputTypeDef,\n LoggingConfigurationTypeDef,\n StartExecutionOutputTypeDef,\n StartSyncExecutionOutputTypeDef,\n StopExecutionOutputTypeDef,\n TagTypeDef,\n TracingConfigurationTypeDef,\n UpdateStateMachineOutputTypeDef,\n)\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal\n\n__all__ = (\"SFNClient\",)\n\nclass BotocoreClientError(BaseException):\n MSG_TEMPLATE: str\n\n def __init__(self, error_response: Dict[str, Any], operation_name: str) -> None:\n self.response: Dict[str, Any]\n self.operation_name: str\n\nclass Exceptions:\n ActivityDoesNotExist: Type[BotocoreClientError]\n ActivityLimitExceeded: Type[BotocoreClientError]\n ActivityWorkerLimitExceeded: Type[BotocoreClientError]\n ClientError: Type[BotocoreClientError]\n ExecutionAlreadyExists: Type[BotocoreClientError]\n ExecutionDoesNotExist: Type[BotocoreClientError]\n ExecutionLimitExceeded: Type[BotocoreClientError]\n InvalidArn: Type[BotocoreClientError]\n InvalidDefinition: Type[BotocoreClientError]\n InvalidExecutionInput: Type[BotocoreClientError]\n InvalidLoggingConfiguration: Type[BotocoreClientError]\n InvalidName: Type[BotocoreClientError]\n InvalidOutput: Type[BotocoreClientError]\n InvalidToken: Type[BotocoreClientError]\n InvalidTracingConfiguration: Type[BotocoreClientError]\n MissingRequiredParameter: Type[BotocoreClientError]\n ResourceNotFound: Type[BotocoreClientError]\n StateMachineAlreadyExists: Type[BotocoreClientError]\n StateMachineDeleting: Type[BotocoreClientError]\n StateMachineDoesNotExist: Type[BotocoreClientError]\n StateMachineLimitExceeded: Type[BotocoreClientError]\n StateMachineTypeNotSupported: Type[BotocoreClientError]\n TaskDoesNotExist: Type[BotocoreClientError]\n TaskTimedOut: Type[BotocoreClientError]\n TooManyTags: Type[BotocoreClientError]\n ValidationException: Type[BotocoreClientError]\n\nclass SFNClient(BaseClient):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html)\n \"\"\"\n\n meta: ClientMeta\n\n @property\n def exceptions(self) -> Exceptions:\n \"\"\"\n SFNClient exceptions.\n \"\"\"\n def can_paginate(self, operation_name: str) -> bool:\n \"\"\"\n Check if an operation can be paginated.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.can_paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#can_paginate)\n \"\"\"\n def close(self) -> None:\n \"\"\"\n Closes underlying endpoint connections.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.close)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#close)\n \"\"\"\n def create_activity(\n self, *, name: str, tags: List[\"TagTypeDef\"] = None\n ) -> CreateActivityOutputTypeDef:\n \"\"\"\n Creates an activity.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.create_activity)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#create_activity)\n \"\"\"\n def create_state_machine(\n self,\n *,\n name: str,\n definition: str,\n roleArn: str,\n type: StateMachineTypeType = None,\n loggingConfiguration: \"LoggingConfigurationTypeDef\" = None,\n tags: List[\"TagTypeDef\"] = None,\n tracingConfiguration: \"TracingConfigurationTypeDef\" = None\n ) -> CreateStateMachineOutputTypeDef:\n \"\"\"\n Creates a state machine.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.create_state_machine)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#create_state_machine)\n \"\"\"\n def delete_activity(self, *, activityArn: str) -> Dict[str, Any]:\n \"\"\"\n Deletes an activity.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.delete_activity)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#delete_activity)\n \"\"\"\n def delete_state_machine(self, *, stateMachineArn: str) -> Dict[str, Any]:\n \"\"\"\n Deletes a state machine.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.delete_state_machine)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#delete_state_machine)\n \"\"\"\n def describe_activity(self, *, activityArn: str) -> DescribeActivityOutputTypeDef:\n \"\"\"\n Describes an activity.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.describe_activity)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#describe_activity)\n \"\"\"\n def describe_execution(self, *, executionArn: str) -> DescribeExecutionOutputTypeDef:\n \"\"\"\n Provides all information about a state machine execution, such as the state\n machine associated with the execution, the execution input and output, and\n relevant execution metadata.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.describe_execution)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#describe_execution)\n \"\"\"\n def describe_map_run(self, *, mapRunArn: str) -> DescribeMapRunOutputTypeDef:\n \"\"\"\n Provides information about a Map Run's configuration, progress, and results.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.describe_map_run)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#describe_map_run)\n \"\"\"\n def describe_state_machine(self, *, stateMachineArn: str) -> DescribeStateMachineOutputTypeDef:\n \"\"\"\n Provides information about a state machine's definition, its IAM role Amazon\n Resource Name (ARN), and configuration.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.describe_state_machine)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#describe_state_machine)\n \"\"\"\n def describe_state_machine_for_execution(\n self, *, executionArn: str\n ) -> DescribeStateMachineForExecutionOutputTypeDef:\n \"\"\"\n Provides information about a state machine's definition, its execution role ARN,\n and configuration.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.describe_state_machine_for_execution)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#describe_state_machine_for_execution)\n \"\"\"\n def generate_presigned_url(\n self,\n ClientMethod: str,\n Params: Dict[str, Any] = None,\n ExpiresIn: int = 3600,\n HttpMethod: str = None,\n ) -> str:\n \"\"\"\n Generate a presigned url given a client, its method, and arguments.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.generate_presigned_url)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#generate_presigned_url)\n \"\"\"\n def get_activity_task(\n self, *, activityArn: str, workerName: str = None\n ) -> GetActivityTaskOutputTypeDef:\n \"\"\"\n Used by workers to retrieve a task (with the specified activity ARN) which has\n been scheduled for execution by a running state machine.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.get_activity_task)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#get_activity_task)\n \"\"\"\n def get_execution_history(\n self,\n *,\n executionArn: str,\n maxResults: int = None,\n reverseOrder: bool = None,\n nextToken: str = None,\n includeExecutionData: bool = None\n ) -> GetExecutionHistoryOutputTypeDef:\n \"\"\"\n Returns the history of the specified execution as a list of events.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.get_execution_history)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#get_execution_history)\n \"\"\"\n def list_activities(\n self, *, maxResults: int = None, nextToken: str = None\n ) -> ListActivitiesOutputTypeDef:\n \"\"\"\n Lists the existing activities.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.list_activities)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#list_activities)\n \"\"\"\n def list_executions(\n self,\n *,\n stateMachineArn: str = None,\n statusFilter: ExecutionStatusType = None,\n maxResults: int = None,\n nextToken: str = None,\n mapRunArn: str = None\n ) -> ListExecutionsOutputTypeDef:\n \"\"\"\n Lists all executions of a state machine or a Map Run.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.list_executions)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#list_executions)\n \"\"\"\n def list_map_runs(\n self, *, executionArn: str, maxResults: int = None, nextToken: str = None\n ) -> ListMapRunsOutputTypeDef:\n \"\"\"\n Lists all Map Runs that were started by a given state machine execution.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.list_map_runs)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#list_map_runs)\n \"\"\"\n def list_state_machines(\n self, *, maxResults: int = None, nextToken: str = None\n ) -> ListStateMachinesOutputTypeDef:\n \"\"\"\n Lists the existing state machines.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.list_state_machines)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#list_state_machines)\n \"\"\"\n def list_tags_for_resource(self, *, resourceArn: str) -> ListTagsForResourceOutputTypeDef:\n \"\"\"\n List tags for a given resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.list_tags_for_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#list_tags_for_resource)\n \"\"\"\n def send_task_failure(\n self, *, taskToken: str, error: str = None, cause: str = None\n ) -> Dict[str, Any]:\n \"\"\"\n Used by activity workers and task states using the `callback\n `__ pattern to report that the task identified\n by the `taskToken` failed.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.send_task_failure)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#send_task_failure)\n \"\"\"\n def send_task_heartbeat(self, *, taskToken: str) -> Dict[str, Any]:\n \"\"\"\n Used by activity workers and task states using the `callback\n `__ pattern to report to Step Functions that\n the task represented by the specified `taskToken` is still making progress.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.send_task_heartbeat)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#send_task_heartbeat)\n \"\"\"\n def send_task_success(self, *, taskToken: str, output: str) -> Dict[str, Any]:\n \"\"\"\n Used by activity workers and task states using the `callback\n `__ pattern to report that the task identified\n by the `taskToken` completed successfully.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.send_task_success)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#send_task_success)\n \"\"\"\n def start_execution(\n self, *, stateMachineArn: str, name: str = None, input: str = None, traceHeader: str = None\n ) -> StartExecutionOutputTypeDef:\n \"\"\"\n Starts a state machine execution.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.start_execution)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#start_execution)\n \"\"\"\n def start_sync_execution(\n self, *, stateMachineArn: str, name: str = None, input: str = None, traceHeader: str = None\n ) -> StartSyncExecutionOutputTypeDef:\n \"\"\"\n Starts a Synchronous Express state machine execution.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.start_sync_execution)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#start_sync_execution)\n \"\"\"\n def stop_execution(\n self, *, executionArn: str, error: str = None, cause: str = None\n ) -> StopExecutionOutputTypeDef:\n \"\"\"\n Stops an execution.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.stop_execution)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#stop_execution)\n \"\"\"\n def tag_resource(self, *, resourceArn: str, tags: List[\"TagTypeDef\"]) -> Dict[str, Any]:\n \"\"\"\n Add a tag to a Step Functions resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.tag_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#tag_resource)\n \"\"\"\n def untag_resource(self, *, resourceArn: str, tagKeys: List[str]) -> Dict[str, Any]:\n \"\"\"\n Remove a tag from a Step Functions resource See also: `AWS API Documentation\n `_\n **Request Syntax** response = client.untag_resource( resourceArn='string',\n tagKeys=[ 'string', ] ).\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.untag_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#untag_resource)\n \"\"\"\n def update_map_run(\n self,\n *,\n mapRunArn: str,\n maxConcurrency: int = None,\n toleratedFailurePercentage: float = None,\n toleratedFailureCount: int = None\n ) -> Dict[str, Any]:\n \"\"\"\n Updates an in-progress Map Run's configuration to include changes to the\n settings that control maximum concurrency and Map Run failure.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.update_map_run)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#update_map_run)\n \"\"\"\n def update_state_machine(\n self,\n *,\n stateMachineArn: str,\n definition: str = None,\n roleArn: str = None,\n loggingConfiguration: \"LoggingConfigurationTypeDef\" = None,\n tracingConfiguration: \"TracingConfigurationTypeDef\" = None\n ) -> UpdateStateMachineOutputTypeDef:\n \"\"\"\n Updates an existing state machine by modifying its `definition`, `roleArn`, or\n `loggingConfiguration`.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Client.update_state_machine)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/client.html#update_state_machine)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"get_execution_history\"]\n ) -> GetExecutionHistoryPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Paginator.GetExecutionHistory)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/paginators.html#getexecutionhistorypaginator)\n \"\"\"\n @overload\n def get_paginator(self, operation_name: Literal[\"list_activities\"]) -> ListActivitiesPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Paginator.ListActivities)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/paginators.html#listactivitiespaginator)\n \"\"\"\n @overload\n def get_paginator(self, operation_name: Literal[\"list_executions\"]) -> ListExecutionsPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Paginator.ListExecutions)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/paginators.html#listexecutionspaginator)\n \"\"\"\n @overload\n def get_paginator(self, operation_name: Literal[\"list_map_runs\"]) -> ListMapRunsPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Paginator.ListMapRuns)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/paginators.html#listmaprunspaginator)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"list_state_machines\"]\n ) -> ListStateMachinesPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.121/reference/services/stepfunctions.html#SFN.Paginator.ListStateMachines)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_stepfunctions/paginators.html#liststatemachinespaginator)\n \"\"\"\n","repo_name":"chrishollinworth/vscode-boto3-intellisense","sub_path":"typings/mypy_boto3_stepfunctions/client.pyi","file_name":"client.pyi","file_ext":"pyi","file_size_in_byte":22278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"1646949218","text":"import discord\nimport json\nfrom aiohttp import ClientSession\nfrom discord import app_commands\nfrom discord.ext import commands\nfrom udpy import AsyncUrbanClient\nfrom utils import send_embed,field\nfrom typing import Optional\n\nasync def get_udef(channel,term):\n uClient=AsyncUrbanClient()\n if term!=None:\n defs=await uClient.get_definition(term)\n else:\n defs=await uClient.get_random_definition()\n await uClient.session.close()\n if len(defs)!=0:\n fields=[field(\"Example:\",defs[0].example.replace('[','').replace(']',''),True)]\n embed=await send_embed(channel,\"Search: \"+defs[0].word,\"**Definition:**\\n*\"+defs[0].definition.replace('[','').replace(']','')+\"*\",author=\"Urban Dictionary\",fields=fields,send=False,thumbnail_url='https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/UD_logo-01.svg/220px-UD_logo-01.svg.png')\n return embed\n else:\n return -1\n\nasync def get_def(channel,term):\n url='https://api.dictionaryapi.dev/api/v2/entries/en_US/'\n async with ClientSession() as session:\n response=await session.get(url+term)\n response=await response.json()\n try:\n if response['title']==\"No Definitions Found\":\n return -1\n except TypeError:\n embeds=[]\n for i in range(len(response)):\n pronounciations=''\n for pron in response[i]['phonetics']:\n if pron['text']!=\"\":\n pronounciations+=\"`\"+pron['text']+\"` \"\n fields=[]\n try:\n for item in response[i]['meanings']:\n for j in range(len(item['definitions'])):\n try:\n fields.append(field(\"Part of Speech: \"+item['partOfSpeech'].title() if item['partOfSpeech']!=None else \"Noun\",item['definitions'][j]['definition']+\"\\n\\nExample: *\"+item['definitions'][j]['example']+\"*\"))\n except KeyError:\n fields.append(field(\"Part of Speech: \"+item['partOfSpeech'].title() if item['partOfSpeech']!=None else \"Noun\",item['definitions'][j]['definition']))\n embeds.append(await send_embed(\n channel,\n \"Defining: \"+response[i]['word'].title(),\n pronounciations,\n fields=fields,\n send=False\n ))\n except:\n continue\n return embeds\n \n\nclass dicti(commands.Cog):\n def __init__(self,client):\n self.client=client\n \n @commands.command(help=\"Retreives definition from Urban Dictionary.\")\n async def urban(self,ctx,*,term=None):\n msg=await ctx.send(f\"Grabbing definition for `{term}` from Urban Dictionary.\")\n embed=await get_udef(ctx.channel,term)\n if embed!=-1:\n await ctx.send(embed=embed)\n else:\n await send_embed(ctx.channel,'',f\"Term `{term}` was not found.\",discord.Colour.red(),footer='clear')\n await msg.delete()\n\n @app_commands.command(name='urban',description=\"Retreives definition from Urban Dictionary.\")\n @app_commands.describe(term='Term to search for, leave empty for random word.')\n async def _urban(self,interaction: discord.Interaction,term:Optional[str]):\n embed=await get_udef(interaction.channel,term)\n if embed!=-1:\n await interaction.response.send_message(embed=embed)\n else:\n response_embed=await send_embed(interaction.channel,'',f\"Term `{term}` was not found.\",discord.Colour.red(),footer='clear')\n await interaction.response.send_message(embed=response_embed)\n\n @commands.command(help=\"Retreives Definition.\")\n async def define(self,ctx,*,term):\n msg=await ctx.send(f\"Grabbing definition for `{term}`\")\n embeds=await get_def(ctx.channel,term)\n if embeds!=-1:\n for embed in embeds:\n await ctx.send(embed=embed)\n else:\n await send_embed(ctx.channel,'',f\"Term `{term}` was not found.\",discord.Colour.red(),footer='clear')\n await msg.delete()\n\n @app_commands.command(name='define',description=\"Retreives definition.\")\n @app_commands.describe(term='Term to search for')\n async def _define(self,interaction: discord.Interaction,term:str):\n embeds=await get_def(interaction.channel,term)\n if embeds!=-1:\n await interaction.response.send_message(embeds=embeds)\n else:\n response_embed=await send_embed(interaction.channel,'',f\"Term `{term}` was not found.\",discord.Colour.red(),footer='clear')\n await interaction.response.send_message(embed=response_embed)\n\nasync def setup(client):\n await client.add_cog(dicti(client))","repo_name":"abhinavgeethan/Private-Bot","sub_path":"extensions/dicti_cog.py","file_name":"dicti_cog.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41214950249","text":"import login as lg\nimport os\ng_ftp = None\ng_local_dir = \"/home/\"\n\n\ndef download(files):\n global g_ftp\n if g_ftp is None:\n print(\"ftp closed\")\n return\n f_len = len(files)\n buf_size = 1024\n for i in range(0, f_len):\n local_file = g_local_dir + files[i]\n fp = open(local_file, \"wb\")\n print(\"download %s\" % files[i])\n data_len = 0\n\n def cb(data):\n nonlocal data_len\n data_len += fp.write(data)\n os.system(\"clear\")\n print(\"download %s :%dB\" % (files[i], data_len))\n\n g_ftp.retrbinary(\"RETR %s\" % files[i], cb, buf_size)\n fp.close()\n\n\ndef upload(files):\n global g_ftp\n if g_ftp is None:\n print(\"ftp closed\")\n return\n f_len=len(files)\n buf_size = 1024\n for i in range(0, f_len):\n file_remote = files[i]\n local_file = g_local_dir + files[i]\n fp = open(local_file, 'rb')\n print(\"upload %s\" % files[i])\n g_ftp.storbinary('STOR %s' % file_remote, fp, buf_size)\n fp.close()\n\nif __name__ == '__main__':\n print(\"transfile\")\n lg.login()\n g_ftp = lg.g_ftp\n upload([\"smb.conf\"])\n # download([\"smb.conf\"])\n lg.login_out()\n print(\"finish\")\n","repo_name":"guanking/backup","sub_path":"python/ftp/transfile.py","file_name":"transfile.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"36176507440","text":"def on_button_pressed_a():\n pins.digital_write_pin(DigitalPin.P8, 1)\ninput.on_button_pressed(Button.A, on_button_pressed_a)\n\ndef on_button_pressed_b():\n pins.digital_write_pin(DigitalPin.P8, 0)\ninput.on_button_pressed(Button.B, on_button_pressed_b)\n\nstrip = neopixel.create(DigitalPin.P16, 4, NeoPixelMode.RGB)\n\ndef on_forever():\n if pins.analog_read_pin(AnalogPin.P0) > 10:\n pins.digital_write_pin(DigitalPin.P8, 0)\n basic.show_number(pins.analog_read_pin(AnalogPin.P0))\n basic.pause(100)\n strip.show_color(neopixel.colors(NeoPixelColors.GREEN))\n else:\n pins.digital_write_pin(DigitalPin.P8, 1)\n basic.show_number(pins.analog_read_pin(AnalogPin.P0))\n basic.pause(100)\n strip.show_color(neopixel.colors(NeoPixelColors.RED))\nbasic.forever(on_forever)\n","repo_name":"Alice049/bluetoothsensor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"39605275595","text":"class Solution:\n def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:\n stack, idx, n = [], 0, len(pushed)\n for target in popped:\n while (len(stack) == 0 or stack[-1] != target) and idx < n:\n stack.append(pushed[idx])\n idx += 1\n if stack[-1] != target:\n return False\n stack.pop()\n return True\n","repo_name":"allenhyp/LeetCodePractice","sub_path":"946_Validate_Stack_Sequences.py","file_name":"946_Validate_Stack_Sequences.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"29333798424","text":"# Python ≥3.5 is required\nimport sys\nassert sys.version_info\n\n# Scikit-Learn ≥0.20 is required\nimport sklearn\nassert sklearn.__version__\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\nnp.random.seed(42)\n\n# To plot pretty figures\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\nimport numpy as np\nimport numpy.random as rnd\nnp.random.seed(42)\n\nm = 100\nX = 6 * np.random.rand(m, 1) - 3\ny = 0.7 * X**2 + X + 1 + np.random.randn(m, 1)\nplt.plot(X, y, \"b.\")\nplt.xlabel(\"$x_1$\", fontsize=18)\nplt.ylabel(\"$y$\", rotation=0, fontsize=18)\nplt.axis([-3, 3, 0, 10])\n\nplt.show()\n\nfrom sklearn.preprocessing import PolynomialFeatures\npoly_features = PolynomialFeatures(degree=2, include_bias=False)\nX_poly = poly_features.fit_transform(X)\n\nfrom sklearn.linear_model import LinearRegression\nlin_reg = LinearRegression()\nlin_reg.fit(X_poly, y)\n\nX_new=np.linspace(-3, 3, 100).reshape(100, 1)\nX_new_poly = poly_features.transform(X_new)\ny_new = lin_reg.predict(X_new_poly)\nplt.plot(X, y, \"b.\")\nplt.plot(X_new, y_new, \"r-\", linewidth=2, label=\"Predictions\")\nplt.xlabel(\"$x_1$\", fontsize=18)\nplt.ylabel(\"$y$\", rotation=0, fontsize=18)\nplt.legend(loc=\"upper left\", fontsize=14)\nplt.axis([-3, 3, 0, 10])\n\nplt.show()\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\n\nfor style, width, degree in ((\"g-\", 1, 300), (\"b--\", 2, 2), (\"r-+\", 2, 1)):\n polybig_features = PolynomialFeatures(degree=degree, include_bias=False)\n std_scaler = StandardScaler()\n lin_reg = LinearRegression()\n polynomial_regression = Pipeline([\n (\"poly_features\", polybig_features),\n (\"std_scaler\", std_scaler),\n (\"lin_reg\", lin_reg),\n ])\n polynomial_regression.fit(X, y)\n y_newbig = polynomial_regression.predict(X_new)\n plt.plot(X_new, y_newbig, style, label=str(degree), linewidth=width)\n\nplt.plot(X, y, \"b.\", linewidth=3)\nplt.legend(loc=\"upper left\")\nplt.xlabel(\"$x_1$\", fontsize=18)\nplt.ylabel(\"$y$\", rotation=0, fontsize=18)\nplt.axis([-3, 3, 0, 10])\n\nplt.show()\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\n\ndef plot_learning_curves(model, X, y):\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)\n train_errors, val_errors = [], []\n for m in range(1, len(X_train)):\n model.fit(X_train[:m], y_train[:m])\n y_train_predict = model.predict(X_train[:m])\n y_val_predict = model.predict(X_val)\n train_errors.append(mean_squared_error(y_train[:m], y_train_predict))\n val_errors.append(mean_squared_error(y_val, y_val_predict))\n\n plt.plot(np.sqrt(train_errors), \"r-+\", linewidth=2, label=\"train\")\n plt.plot(np.sqrt(val_errors), \"b-\", linewidth=3, label=\"val\")\n plt.legend(loc=\"upper right\", fontsize=14)\n plt.xlabel(\"Training set size\", fontsize=14)\n plt.ylabel(\"RMSE\", fontsize=14)\n\nlin_reg = LinearRegression()\nplot_learning_curves(lin_reg, X, y)\nplt.axis([0, 80, 0, 3])\n\nplt.show()\n\nfrom sklearn.pipeline import Pipeline\n\npolynomial_regression = Pipeline([\n (\"poly_features\", PolynomialFeatures(degree=10, include_bias=False)),\n (\"lin_reg\", LinearRegression()),\n ])\n\nplot_learning_curves(polynomial_regression, X, y)\nplt.axis([0, 80, 0, 3])\n\nplt.show()\n\n","repo_name":"Goxo1512/Lab_AI","sub_path":"Lab1/L3/LR_3_task_6.py","file_name":"LR_3_task_6.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42314646371","text":"from views.custom_widgets_view.line_widget_view import LineWidgetView\nfrom helpers.utils import get_middle_point_of_line\nfrom models.edge_model import EdgeModel\nfrom typing import Callable\nfrom tkinter import Canvas\n\n\nclass LineWidgetPresenter(object):\n\n def __init__(self, on_line_click: Callable, edge_model: EdgeModel, canvas: Canvas) -> None:\n self.edge_model = edge_model\n self.on_line_click = on_line_click\n self.canvas = canvas\n self.view: LineWidgetView = LineWidgetView(self.canvas, self.edge_model, self.on_line_click_presenter)\n self.view.draw_line()\n self.init_view_elements()\n\n def init_view_elements(self) -> None:\n center_line_coordinates = get_middle_point_of_line(self.edge_model.start_node, self.edge_model.end_node)\n self.view.place_widget(center_line_coordinates)\n self.view.bind_line_label_to_click()\n\n def on_line_click_presenter(self) -> None:\n self.on_line_click(self)\n","repo_name":"puskini33/GraphProject","sub_path":"presenters/custom_widgets_presenter/line_widget_presenter.py","file_name":"line_widget_presenter.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"81"} +{"seq_id":"10987823661","text":"import matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport time\n\nimport os\nimport argparse\n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--file_name', dest='file', help='file that the data will be extracted',\n default=None, type=str)\n parser.add_argument('-p', '--path', dest='path', help='path to the file',\n default='./', type=str)\n args = parser.parse_args()\n return args\n\ndef animate(i):\n pullData = open(path,\"r\").read()\n data = pullData.split('\\n')\n xar = []\n yar = []\n for line in data:\n if len(line) > 1:\n y = float(line)\n yar.append(y)\n\n ax1.clear()\n ax1.plot(yar)\n\n\ndef main():\n file_name = args.file\n global path \n path = args.path if args.path != './' else args.path + file_name\n path = file_name\n\n print(path)\n\n fig = plt.figure()\n global ax1\n ax1 = fig.add_subplot(1,1,1)\n\n\n ani = animation.FuncAnimation(fig, animate, interval=1000)\n plt.show()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main()","repo_name":"CaioFerreiraB/TrafficBayesianDDQN","sub_path":"PER_DDQN/plot_real_time.py","file_name":"plot_real_time.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"81"} +{"seq_id":"8397141241","text":"\"\"\"\nSimple workflow using a Parallel state with two concurrent workflows.\n\"\"\"\nfrom rhodes.states import Parallel, StateMachine, Task\n\n\ndef build() -> StateMachine:\n lookup_address = StateMachine()\n lookup_address.start_with(\n Task(\n \"LookupAddress\",\n Resource=\"arn:aws:lambda:us-east-1:123456789012:function:AddressFinder\",\n )\n ).end()\n\n lookup_phone = StateMachine()\n lookup_phone.start_with(\n Task(\n \"LookupPhone\",\n Resource=\"arn:aws:lambda:us-east-1:123456789012:function:PhoneFinder\",\n )\n ).end()\n\n parallel_run = Parallel(\"LookupCustomerInfo\")\n parallel_run.add_branch(lookup_address)\n parallel_run.add_branch(lookup_phone)\n\n workflow = StateMachine(Comment=\"Parallel Example.\")\n workflow.start_with(parallel_run).end()\n\n return workflow\n","repo_name":"mattsb42/rhodes","sub_path":"examples/src/simple_parallel.py","file_name":"simple_parallel.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"72110116105","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nrequirements = ['zc.buildout', 'mako', ]\n\nsetup_requirements = ['pytest-runner', ]\n\ntest_requirements = ['pytest', ]\n\nsetup(\n author=\"Maksym Shalenyi\",\n author_email='maksym.shalenyi@gmail.com',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n description=\"Buildout recipe for making files out of Mako templates\",\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme + '\\n\\n' + history,\n include_package_data=True,\n keywords='buildout recipe mako template',\n name='buildout.recipe.mako_template',\n namespace_packages=['buildout', 'buildout.recipe'],\n packages=find_packages(exclude=['tests']),\n setup_requires=setup_requirements,\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/enkidulan/buildout.recipe.mako_template',\n version='0.1.3.dev0',\n zip_safe=False,\n entry_points={\"zc.buildout\": [\"default=buildout.recipe.mako_template:Recipe\"]},\n)\n","repo_name":"enkidulan/buildout.recipe.mako_template","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21882595349","text":"import matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation, writers\nimport numpy as np\nimport torch\nfrom pymo.parsers import BVHParser\nfrom pymo.viz_tools import *\nfrom pymo.preprocessing import *\nfrom matplotlib import pyplot as plt\n\nfrom common.dataset_locomotion import load_dataset\nfrom common.quaternion import qrot_np, qmul_np, from_scaled_angle_axis_np, qeuler_np, from_xform_xy_np, to_euler_np\nfrom networks.decompressor_trainer import DecompressorTrainer\nfrom networks.projector_trainer import ProjectorTrainer\nfrom networks.stepper_trainer import StepperTrainer\nfrom networks.utils import extract_locomotion_from_y_feature_vector, COMPRESSOR_PATH, DECOMPRESSOR_PATH, STEPPER_PATH, \\\n PROJECTOR_PATH\nfrom common.locomotion_utils import Y_LEN, X_LEN\n\n\ndef render_Y(y, skeleton, fps, action):\n # Extract required components\n y_pos, y_txy, y_vel, y_ang, y_rvel, y_rang = extract_locomotion_from_y_feature_vector(y, 1)\n n_frames = y_pos.shape[1]\n\n # Convert to quat and remove batch\n y_rot = from_xform_xy_np(y_txy[0].cpu().numpy())\n y_pos = y_pos[0].cpu().numpy()\n y_rvel = y_rvel[0].cpu().numpy()\n y_rang = y_rang[0].cpu().numpy()\n\n # Integrate root displacement\n\n y_root_rot = [action['rotations'][0, 0]]\n y_root_pos = [action['positions_local'][0, 0]]\n for i in range(1, n_frames):\n y_root_pos.append(y_root_pos[-1] + qrot_np(y_root_rot[-1], y_rvel[i - 1]) / fps)\n y_root_rot.append(qmul_np(y_root_rot[-1], from_scaled_angle_axis_np(\n qrot_np(y_root_rot[-1], y_rang[i - 1]) / fps)))\n\n y_root_pos = np.concatenate([p[np.newaxis] for p in y_root_pos])\n y_pos = np.concatenate([y_root_pos[:, np.newaxis], y_pos], axis=1)\n\n render_animation(y_pos, skeleton, fps, output='interactive')\n\n\ndef generate_decompressor_animation():\n dataset = load_dataset()\n action = dataset['S1']['jog_1_d0']\n\n compressor = torch.load(COMPRESSOR_PATH)\n decompressor = torch.load(DECOMPRESSOR_PATH)\n with torch.no_grad():\n device = dataset.device()\n\n y = torch.tensor(action['Y_feature'][np.newaxis]).to(device)\n q = torch.tensor(action['Q_feature'][np.newaxis]).to(device)\n x = torch.tensor(action['input_feature'][np.newaxis]).to(device)\n\n # Pass through compressor\n z = compressor.compress(y, q)\n y = decompressor.decompress(x, z)\n render_Y(y, dataset.skeleton(), dataset.fps(), action)\n\n\ndef generate_motion_matching_animation(projector_n_frames=20, simulate_n_frames=360):\n dataset = load_dataset()\n action = dataset['S1']['jog_1_d0']\n device = dataset.device()\n\n decompressor = torch.load(DECOMPRESSOR_PATH)\n stepper = torch.load(STEPPER_PATH)\n projector = torch.load(PROJECTOR_PATH)\n\n y = torch.zeros((1, simulate_n_frames, Y_LEN), dtype=torch.float32, device=device)\n\n with torch.no_grad():\n n_projects = simulate_n_frames // projector_n_frames # number of projector callings\n x = torch.as_tensor(action['input_feature'][0:1][np.newaxis], dtype=torch.float32, device=device)\n for i in range(n_projects):\n x_z = projector.project(x)\n predicted_x_z = stepper.predict_x_z(x_z, window=projector_n_frames)\n y_out = decompressor.decompress(predicted_x_z)\n y[:, i*projector_n_frames:(i+1)*projector_n_frames] = y_out\n x = predicted_x_z[:, -2:-1, :X_LEN]\n\n render_Y(y, dataset.skeleton(), dataset.fps(), action)\n\n\ndef render_animation(data, skeleton, fps, output='interactive', bitrate=1000):\n \"\"\"\n Render or show an animation. The supported output modes are:\n -- 'interactive': display an interactive figure\n (also works on notebooks if associated with %matplotlib inline)\n -- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).\n -- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).\n -- 'filename.gif': render and export the animation a gif file (requires imagemagick).\n \"\"\"\n x = 0\n y = 1\n z = 2\n radius = torch.max(skeleton.offsets()).item() * 5 # Heuristic that works well with many skeletons\n\n skeleton_parents = skeleton.parents()\n\n plt.ioff()\n fig = plt.figure(figsize=(4, 4))\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n ax.view_init(elev=20., azim=30)\n\n ax.set_xlim3d([-radius/2, radius/2])\n ax.set_zlim3d([0, radius])\n ax.set_ylim3d([-radius/2, radius/2])\n ax.set_aspect('auto')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n ax.dist = 7.5\n\n lines = []\n initialized = False\n\n trajectory = data[:, 0, [0, 2]]\n avg_segment_length = np.mean(np.linalg.norm(np.diff(trajectory, axis=0), axis=1)) + 1e-3\n draw_offset = int(25/avg_segment_length)\n spline_line, = ax.plot(*trajectory.T)\n camera_pos = trajectory\n height_offset = np.min(data[:, :, 1]) # Min height\n data = data.copy()\n data[:, :, 1] -= height_offset\n\n def update(frame):\n nonlocal initialized\n ax.set_xlim3d([-radius/2 + camera_pos[frame, 0], radius/2 + camera_pos[frame, 0]])\n ax.set_ylim3d([-radius/2 + camera_pos[frame, 1], radius/2 + camera_pos[frame, 1]])\n\n positions_world = data[frame]\n for i in range(positions_world.shape[0]):\n if skeleton_parents[i] == -1:\n continue\n if not initialized:\n col = 'red' if i in skeleton.joints_right() else 'black' # As in audio cables :)\n lines.append(ax.plot([positions_world[i, x], positions_world[skeleton_parents[i], x]],\n [positions_world[i, y], positions_world[skeleton_parents[i], y]],\n [positions_world[i, z], positions_world[skeleton_parents[i], z]], zdir='y', c=col))\n else:\n lines[i-1][0].set_xdata(np.array([positions_world[i, x], positions_world[skeleton_parents[i], x]]))\n lines[i-1][0].set_ydata(np.array([positions_world[i, y], positions_world[skeleton_parents[i], y]]))\n lines[i-1][0].set_3d_properties([positions_world[i, z], positions_world[skeleton_parents[i], z]], zdir='y')\n l = max(frame-draw_offset, 0)\n r = min(frame+draw_offset, trajectory.shape[0])\n spline_line.set_xdata(trajectory[l:r, 0])\n spline_line.set_ydata(np.zeros_like(trajectory[l:r, 0]))\n spline_line.set_3d_properties(trajectory[l:r, 1], zdir='y')\n initialized = True\n if output == 'interactive' and frame == data.shape[0] - 1:\n plt.close('all')\n\n fig.tight_layout()\n anim = FuncAnimation(fig, update, frames=np.arange(0, data.shape[0]), interval=1000/fps, repeat=False)\n if output == 'interactive':\n plt.show()\n return anim\n elif output == 'html':\n return anim.to_html5_video()\n elif output.endswith('.mp4'):\n Writer = writers['ffmpeg']\n writer = Writer(fps=fps, metadata={}, bitrate=bitrate)\n anim.save(output, writer=writer)\n elif output.endswith('.gif'):\n anim.save(output, dpi=80, writer='imagemagick')\n else:\n raise ValueError('Unsupported output format (only html, .mp4, and .gif are supported)')\n plt.close()\n\n\ndef vis_skeleton(path, out_path_prefix, frames=(0, 10, 20, 30, 40, 50)):\n parser = BVHParser()\n parsed_data = parser.parse(path)\n # parsed_data.skeleton = {k: v for k, v in parsed_data.skeleton.items() if not k.endswith('_Nub')}\n # for skeleton in parsed_data.skeleton.values():\n # skeleton['children'] = [c for c in skeleton['children'] if not c.endswith('_Nub')]\n\n mp = MocapParameterizer('position')\n\n positions = mp.fit_transform([parsed_data])\n # joints_to_visualize = [j for j in parsed_data.skeleton.keys() if not j.startswith(\"joint_0\")]\n joints_to_visualize = parsed_data.skeleton.keys()\n\n for frame in frames:\n draw_stickfigure(positions[0], frame=frame, joints=joints_to_visualize)\n plt.savefig(out_path_prefix + str(frame))\n\n\nif __name__ == '__main__':\n # generate_decompressor_animation()\n # generate_stepper_animation()\n generate_motion_matching_animation()\n","repo_name":"3Dori/learned-motion-matching","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":8184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"15827378361","text":"\"\"\"\nTrain on images split into directories. This assumes we've split\nour videos into frames and moved them to their respective folders.\n\nBased on:\nhttps://keras.io/preprocessing/image/\nand\nhttps://keras.io/applications/\n\"\"\"\nimport argparse\nimport numpy as np\nimport os\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.optimizers import SGD # Stochastic gradient descent: use 1 example for gradient descent in each iteration\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Model\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping,CSVLogger\nfrom data import DataSet\nimport os.path\nimport settings\nimport function_list as ff\ncg = settings.Experiment() \n\ndata = DataSet()\nmain_folder = os.path.join(cg.oct_main_dir,'UCF101')\n\nos.makedirs(os.path.join(main_folder,'checkpoints','inception'),exist_ok=True)\nmodel_name = 'inception'\n# Helper: Save the model.\ncheckpointer = ModelCheckpoint(\n filepath=os.path.join(main_folder, 'checkpoints', model_name,model_name+'.hdf5'),\n monitor='val_loss',\n verbose=1,\n save_best_only=True)\n\n# Helper: save the log\ncsv_logger = CSVLogger(os.path.join(main_folder, 'logs', model_name + '-' + 'training-log' + '.csv'))\n\n# Helper: Stop when we stop learning.\n#early_stopper = EarlyStopping(patience=10)\n\n# Helper: TensorBoard\n#tensorboard = TensorBoard(log_dir=os.path.join('data', 'logs'))\n\ndef get_generators():\n ''' look at the tutorial about imagedatagenerator.flow_from_directory: https://medium.com/@vijayabhaskar96/tutorial-image-classification-with-keras-flow-from-directory-and-generators-95f75ebe5720'''\n train_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n horizontal_flip=True,\n rotation_range=10.,\n width_shift_range=0.2,\n height_shift_range=0.2)\n\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n train_generator = train_datagen.flow_from_directory(\n os.path.join(main_folder, 'train_image'),\n target_size=(299, 299), # the size of my input images, every image will be resized to this size\n color_mode = 'rgb', # if black and white than set to \"greyscale\"\n batch_size=32,\n classes=data.classes,\n class_mode='categorical')\n\n validation_generator = test_datagen.flow_from_directory(\n os.path.join(main_folder,'test_image'),\n target_size=(299, 299),\n batch_size=32,\n color_mode = 'rgb',\n classes=data.classes,\n class_mode='categorical')\n\n return train_generator, validation_generator\n\ndef get_model(weights='imagenet'):\n # create the base pre-trained model\n base_model = InceptionV3(weights=weights, include_top=False)\n\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n # let's add a fully-connected layer\n x = Dense(1024, activation='relu')(x)\n # and a logistic layer\n predictions = Dense(len(data.classes), activation='softmax')(x)\n\n # this is the model we will train\n model = Model(inputs=base_model.input, outputs=predictions)\n return model\n\ndef freeze_all_but_top(model):\n \"\"\"Used to train just the top layers of the model, which are layers we add (one fully-connected layer and aone logistic layer)\"\"\"\n # first: train only the top layers (which were randomly initialized)\n # i.e. freeze all convolutional InceptionV3 layers\n for layer in model.layers[:-2]:\n layer.trainable = False\n\n # compile the model (should be done *after* setting layers to non-trainable)\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\n\n return model\n\ndef freeze_all_but_mid_and_top(model):\n \"\"\"After we fine-tune the dense layers, train deeper. \n total layer number = 313\"\"\"\n # we chose to train the top 2 inception blocks, i.e. we will freeze\n # the first 172 layers and unfreeze the rest:\n for layer in model.layers[:172]:\n layer.trainable = False\n for layer in model.layers[172:]:\n layer.trainable = True\n\n # we need to recompile the model for these modifications to take effect\n # we use SGD with a low learning rate\n model.compile(\n optimizer=SGD(lr=0.0001, momentum=0.9), # 0.9 is a default momentum used in SGD\n loss='categorical_crossentropy',\n metrics=['accuracy', 'top_k_categorical_accuracy'])\n\n return model\n\ndef train_model(model, nb_epoch, generators, callbacks=[]):\n train_generator, validation_generator = generators\n hist = model.fit_generator(\n train_generator,\n steps_per_epoch=100,\n validation_data=validation_generator,\n validation_steps=10,\n epochs=nb_epoch,\n callbacks=callbacks)\n \n return model,hist\n\ndef main(weights_file):\n model = get_model()\n generators = get_generators()\n\n if weights_file is None:\n print(\"Loading network from ImageNet weights.\")\n # Get and train the top layers.\n model = freeze_all_but_top(model)\n model,_ = train_model(model, 10, generators)\n else:\n print(\"Loading saved model: %s.\" % weights_file)\n model.load_weights(weights_file)\n\n # Get and train the mid layers.\n model = freeze_all_but_mid_and_top(model)\n model,hist = train_model(model,200,generators,[checkpointer,csv_logger])\n \n # # save history of training\n # train_acc_list = np.asarray(hist.history['acc'])\n # train_top_acc_list = np.asarray(hist.history['top_k_categorical_accuracy'])\n # val_acc_list = np.asarray(hist.history['val_acc'])\n # val_top_acc_list = np.asarray(hist.history['val_top_k_categorical_accuracy'])\n # val_loss_list = np.asarray(hist.history['val_loss'])\n\n # np.save(os.path.join(main_folder,'checkpoints','inception',model_name+'_train_acc'),train_acc_list)\n # np.save(os.path.join(main_folder,'checkpoints','inception',model_name+'_train_top_5_acc'),train_top_acc_list)\n # np.save(os.path.join(main_folder,'checkpoints','inception',model_name+'_val_acc'),val_acc_list)\n # np.save(os.path.join(main_folder,'checkpoints','inception',model_name+'_val_top_5_acc'),val_top_acc_list)\n # np.save(os.path.join(main_folder,'checkpoints','inception',model_name+'_val_loss'),val_loss_list)\n \n\nif __name__ == '__main__':\n weights_file = None\n main(weights_file)\n\n \n","repo_name":"zhennongchen/Synthesize_infarction_movie","sub_path":"Python/train_cnn.py","file_name":"train_cnn.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4385560286","text":"from .regression import MultiRegressionTest\nimport numpy as np\n\n\nclass TestTokamakConstantCurrentMulti(MultiRegressionTest):\n name = \"tokamak constant current multi\"\n filename = \"multi_tokamak_cte_current\"\n equilibrium = \"constant_current_tokamak\"\n geometry = \"cylindrical\"\n number_of_runs = 24\n gridpoints = 51\n\n j0 = (2.0 * 0.2) / np.linspace(1.9, 2.1, number_of_runs)\n\n parameters = {\"k2\": -2.0, \"k3\": 0.2, \"j0\": j0, \"cte_rho0\": 1.0, \"cte_B03\": 1.0}\n\n multispectrum_settings = {\n \"xdata\": 2 * 0.2 / j0,\n \"xlim\": (1.88, 2.12),\n \"ylim\": (-1e-3, 1e6),\n \"symlog\": 1e-8,\n }\n","repo_name":"n-claes/legolas","sub_path":"tests/regression_tests/test_multi_tokamak_cte_current.py","file_name":"test_multi_tokamak_cte_current.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"81"} +{"seq_id":"31149578097","text":"import fresnel\nimport itertools\nimport numpy as np\nfrom skimage.measure import marching_cubes_lewiner\nfrom pymatgen.core.bonds import CovalentBond\n\n\ndef make_unit_cell(scene, lattice_vects, line_width=0.05, color=[0., 0., 0.]):\n vertices = np.zeros((8, 3), dtype=np.float)\n vertices[1:4, :] = lattice_vects\n vertices[4, :] = vertices[1, :] + vertices[2, :]\n vertices[5, :] = vertices[1, :] + vertices[3, :]\n vertices[6, :] = vertices[2, :] + vertices[3, :]\n vertices[7, :] = np.sum(vertices[1:4, :], axis=0)\n\n unit_cell = fresnel.geometry.Cylinder(scene, N=12)\n unit_cell.points[:] = [\n [vertices[0, :], vertices[1, :]],\n [vertices[0, :], vertices[2, :]],\n [vertices[0, :], vertices[3, :]],\n [vertices[1, :], vertices[4, :]],\n [vertices[2, :], vertices[4, :]],\n [vertices[1, :], vertices[5, :]],\n [vertices[3, :], vertices[5, :]],\n [vertices[2, :], vertices[6, :]],\n [vertices[3, :], vertices[6, :]],\n [vertices[4, :], vertices[7, :]],\n [vertices[5, :], vertices[7, :]],\n [vertices[6, :], vertices[7, :]],\n ]\n\n unit_cell.radius[:] = line_width * np.ones(12)\n\n unit_cell.material = fresnel.material.Material(\n color=fresnel.color.linear(color)\n )\n unit_cell.material.solid = 1.\n\n return unit_cell\n\n\ndef make_bonds(scene, structure, line_width=0.08, tol=0.2, color=[.1, .1, .1]):\n bonds = []\n for site0, site1 in itertools.combinations(structure, 2):\n if CovalentBond.is_bonded(site0, site1, default_bl=0.5):\n dist = site0.distance(site1)\n if np.isclose(np.linalg.norm(site0.coords - site1.coords),\n dist, rtol=5e-3):\n bonds.append([site0.coords.tolist(), site1.coords.tolist()])\n # elif draw_pbc:\n # for p in itertools.product([1, 0, -1], repeat=3):\n # vec = site1.coords + np.dot(p, structure.lattice.matrix)\n # if np.isclose(np.linalg.norm(site0.coords - vec),\n # dist, rtol=5e-3):\n # bonds.append([site0.coords.tolist(), vec.tolist()])\n # break\n # for p in itertools.product([1, 0, -1], repeat=3):\n # vec = site0.coords + np.dot(p, structure.lattice.matrix)\n # if np.isclose(np.linalg.norm(vec - site1.coords),\n # dist, rtol=5e-3):\n # bonds.append([vec.tolist(), site1.coords.tolist()])\n # break\n\n bonds_geom = fresnel.geometry.Cylinder(scene, N=len(bonds))\n bonds_geom.points[:] = bonds\n bonds_geom.radius[:] = line_width * np.ones(len(bonds))\n bonds_geom.material = fresnel.material.Material(\n color=fresnel.color.linear(color)\n )\n return bonds_geom\n\n\ndef make_isosurface(scene, structure, grid_data, percent_max):\n verts, faces, _, _ = marching_cubes_lewiner(\n grid_data,\n level=percent_max * np.max(grid_data)\n )\n\n verts = verts[faces].reshape((3*faces.shape[0], 3))\n verts /= grid_data.shape\n verts = np.dot(verts, structure.lattice.matrix)\n\n mesh = fresnel.geometry.Mesh(scene, vertices=verts, N=1)\n mesh.color[:] = fresnel.color.linear([1., 0., 0.])\n mesh.material.solid = 0.\n mesh.material.primitive_color_mix = 1.\n mesh.material.roughness = 0.5\n mesh.material.specular = 0.7\n mesh.material.spec_trans = 0.75\n mesh.material.metal = 0.\n\n if np.min(grid_data) < 0.:\n verts, faces, _, _ = marching_cubes_lewiner(\n grid_data,\n level=percent_max * np.min(grid_data)\n )\n\n verts = verts[faces].reshape((3*faces.shape[0], 3))\n verts /= grid_data.shape\n verts = np.dot(verts, structure.lattice.matrix)\n\n mesh_neg = fresnel.geometry.Mesh(scene, vertices=verts, N=1)\n mesh_neg.color[:] = fresnel.color.linear([0., 0., 1.])\n mesh_neg.material.solid = 0.\n mesh_neg.material.primitive_color_mix = 1.\n mesh_neg.material.roughness = 0.5\n mesh_neg.material.specular = 0.7\n mesh_neg.material.spec_trans = 0.75\n mesh_neg.material.metal = 0.\n\n return [mesh, mesh_neg]\n\n return [mesh]\n","repo_name":"mturiansky/abcv","sub_path":"abcv/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"81"} +{"seq_id":"38795972953","text":"import pandas as pd\nfrom transformers import pipeline\nimport numpy as np\nfrom PIL import Image\nimport streamlit as st \n\n@st.cache_resource\ndef load_pipeline():\n try:\n # Create the pipeline for image-to-text\n\n pipe = pipeline(\"image-to-text\", model=\"nlpconnect/vit-gpt2-image-captioning\")\n \n return pipe\n except Exception as e:\n st.error(f\"Failed to load the pipeline: {e}\")\n return None\n\nst.set_page_config(layout='wide', page_icon=\"🧑🏻‍🎨\")\n\nst.subheader(\"Artwork Description Generator\", divider='red')\n\npipe=False\nif st.button(\"Click to Load the Model!\"):\n if st.checkbox(\"Caution! it might break the app, you can always see the model working @huggingface\"):\n pipe = load_pipeline()\n\n\n\n\nif pipe:\n uploaded_file = st.file_uploader(\"Upload an Artwork\", type=[\"jpg\", \"jpeg\", \"png\"])\n\n if uploaded_file is not None:\n\n\n image = Image.open(uploaded_file)\n img_arr = np.array(image)\n img_arr = img_arr/255\n image_place = st.empty()\n\n image_place.image(image, use_column_width=True)\n try:\n captions = pipe(image)\n image_place.image(image, use_column_width=True, caption=f\"{captions[0]['generated_text']}\")\n except Exception as e:\n st.error(f\"Error in generating caption: {e}\")\n","repo_name":"r0han99/neural-art-utility","sub_path":"pages/ImageToDescription.py","file_name":"ImageToDescription.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"31356934062","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 26 17:09:32 2014\n\n@author: francescocorea\n\"\"\"\n\n\"\"\"\nThis function computes the ngrams of a random sample of emails from the database and writes\na/two file/s with the results (bigrams.txt and/or trigrams.txt)\n\nUsage:\npython collocations --sample 0.5 --min_freq 1000 --max_ngrams 100 --word_len 3\n\"\"\"\nimport logging\n\nimport argparse\nimport math\nimport random\nimport MySQLdb as mdb\nimport specialwords as words\n\n#from ngrams import abb_dictionary\n\nlogging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)\nlogging.root.level = logging.INFO # ipython sometimes messes up the logging setup; restore\n\nparser = argparse.ArgumentParser(description=\"Generating a dictionary of stopwords\")\nparser.add_argument(\"--sample\",help=\"Size of sample in percentage\", required=True,type=float)\nparser.add_argument(\"--min_freq\",help=\"Minimal frequency of ocurrence to be considered\",required=True,type=int)\nparser.add_argument(\"--max_ngrams\",help=\"Maximal number of collocations to be found\",required=True,type=int)\nparser.add_argument(\"--word_len\",help=\"Minimal word length to be considered\",required=True,type=int)\n\n\ndef main():\n args = parser.parse_args()\n N = args.sample\n freq = args.min_freq\n n_col = args.max_ngrams\n min_len = args.word_len\n\n print (\"Sample Size: {0}*total\").format(N)\n print (\"Minimum frequency: {0}\").format(freq)\n print (\"Maximun number of collocations: {0}\").format(n_col)\n print (\"Minimum word length: {0}\").format(min_len)\n\n # Open the connection to the DB\n connection = mdb.connect('localhost', 'kpmg1', 's2ds','enron')\n cur=connection.cursor()\n\n\n # In our case the IDs are ordered by entry. Otherwise you could do:\n # cur.execute(\"SELECT COUNT(*) FROM emails;\")\n # The last ID number gives us the number of rows of the table.\n cur.execute(\"select id from emails order by id desc limit 1;\")\n res = cur.fetchall()\n size=[int(col) for row in res for col in row]\n\n\n # We generate a random sample of the entries.\n #random.seed(123)\n sample=random.sample(range(size[0]),int(math.floor(size[0]*N)))\n\n texts=[]\n\n # We query the emails in the sample and store them in a list\n for id in sample:\n cur.execute(\" select text from emails where id = {0} \".format(id))\n tmp=cur.fetchall()\n texts.append(tmp[0][0])\n\n # Join all the text into a string to be able to count the frequency of ocurrence\n raw=\" \".join(texts)\n\n # Call a function written in specialwords\n words.ngramsFinder(raw,freq, n_col,min_len)\n\n # Close all cursors\n connection.close()\n\n return\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"CoreaFr/Emails_project","sub_path":"find_ngrams.py","file_name":"find_ngrams.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"16497930804","text":"# Michael O'Regan\n# 25.07.2018\n# objective: to write a function to mulitply 2 numbner without using * or /\n# from Q 11,13 = 143, 5, 123 = 615\n\ndef sumultiply(x , y):\n total = 0 \n # total will become the answer\n for i in range(y):\n # we want the addition of x to be done y times\n total = total + x\n return total \n \nprint(sumultiply(10,10))\nprint(sumultiply(11,13))\nprint(sumultiply(5,123))","repo_name":"MichaelORegan/Spring-2018-Problems","sub_path":"p1sumultiply.py","file_name":"p1sumultiply.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13608539939","text":"class Constructor:\n def __init__(self, window, parent):\n self.window = window\n self.parent = parent\n self.fulltext = '' # will be appended to, constructing the code object into one string\n self.commandtext = ''\n self.importtext = '\\nimport '.join(self.parent.dependencies)\n self.basetext = '''\n\nclass MainWindow:\n def __init__(self):\n self.window = Tk()\n self.window.title('Tkinter Editor')\n self.scrW = 350\n self.scrH = 400\n self.window['bg'] = 'black'\n self.window['width'] = self.scrW\n self.window['height'] = self.scrH\n self.window.geometry(f'{self.window[\"width\"]}x{self.window[\"height\"]}')\n self.frames = [] # stores frames that hold widgets\n self.widgets = []\n\n''' # basetext acts as the starting point for the program to build on\n self.runtext = '''\\n\\nif __name__ == '__main__':\n root = MainWindow()\n root.appendWidgets()\n root.run()\n'''\n self.runfunc = '''\\n def run(self):\n self.window.mainloop()\n'''\n\n def appendWidgets(self):\n widgetText = ' def appendWidgets(self):\\n'\n for widget in self.parent.widgets:\n _, frameArgs = formatArgs(widget[0])\n widgetType, widgetArgs = formatArgs(widget[1])\n widgetText += f' self.frames.append(Frame(self.window, {frameArgs}))\\n'\n widgetText += f' self.frames[-1].place(x={widget[0].winfo_x()}, y={widget[0].winfo_y()})\\n'\n widgetText += f' self.frames[-1].pack_propagate(0)\\n'\n widgetText += f' self.widgets.append({widgetType}(self.frames[-1], {widgetArgs}))\\n'\n widgetText += f' self.widgets[-1].pack()\\n'\n return widgetText\n\n def build(self): # construct the program string\n self.fulltext += self.importtext\n self.fulltext += self.basetext\n self.fulltext += self.appendWidgets()\n self.fulltext += self.runfunc\n self.fulltext += self.runtext\n print(self.fulltext)\n\n\ndef formatArgs(widget): # format non-default args into input # pretty smart lol\n classtype = widget.__class__\n normdict = dict(classtype())\n compdict = dict(widget)\n print('dict')\n print(normdict)\n print(compdict)\n keys = [i for i in normdict if normdict[i] != compdict[i] and i != 'background'] # last one > stopping dupe\n values = [compdict[i] for i in keys]\n args = ', '.join([f'{i}=\"{j}\"' if i != 'command' else f'{i}={j}' for (i, j) in zip(keys, values)])\n # arguments for widgets\n print(keys)\n print(values)\n print(args)\n return classtype.__name__, args\n","repo_name":"Cmd858/TkinterEditor","sub_path":"Constructor.py","file_name":"Constructor.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"13199487860","text":"import logging\nimport sys\nfrom datetime import datetime\n\nfrom medusa.storage import Storage, format_bytes_str\n\n\nTIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'\n\n\ndef status(config, backup_name):\n with Storage(config=config.storage) as storage:\n\n try:\n cluster_backup = storage.get_cluster_backup(backup_name)\n except KeyError:\n logging.error('No such backup')\n sys.exit(1)\n\n if cluster_backup.is_complete():\n print('{.name}'.format(cluster_backup))\n else:\n print('{.name} [Incomplete!]'.format(cluster_backup))\n\n started = datetime.fromtimestamp(cluster_backup.started).strftime(TIMESTAMP_FORMAT)\n if cluster_backup.finished is None:\n print('- Started: {}, '\n 'Finished: never'.format(started))\n else:\n finished = datetime.fromtimestamp(cluster_backup.finished).strftime(TIMESTAMP_FORMAT)\n print('- Started: {}, '\n 'Finished: {}'.format(started, finished))\n\n complete_nodes = cluster_backup.complete_nodes()\n incomplete_nodes = cluster_backup.incomplete_nodes()\n missing_nodes = cluster_backup.missing_nodes()\n print('- {0} nodes completed, {1} nodes incomplete, {2} nodes missing'.format(\n len(complete_nodes), len(incomplete_nodes), len(missing_nodes)))\n\n if len(incomplete_nodes) > 0:\n print('- Incomplete nodes:')\n for node_backup in incomplete_nodes:\n print(' {}'.format(node_backup.fqdn))\n\n if len(missing_nodes) > 0:\n print('- Missing nodes:')\n for fqdn in missing_nodes:\n print(' {}'.format(fqdn))\n\n print('- {} files, {}'.format(\n cluster_backup.num_objects(),\n format_bytes_str(cluster_backup.size())\n ))\n","repo_name":"thelastpickle/cassandra-medusa","sub_path":"medusa/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":243,"dataset":"github-code","pt":"81"} +{"seq_id":"38821202638","text":"import sys\nimport argparse\nfrom collections import namedtuple\nfrom functools import partial\nimport heapq\n\n'''----------------------------------------------------------------------------\n\nSTATES\n\n----------------------------------------------------------------------------'''\n\nPoint = namedtuple('Point', ['x', 'y'])\n\nclass Node:\n def __init__(self, state, blank, opname, parent, g):\n self.state = state\n self.blank = blank\n self.opname = opname\n self.parent = parent\n self.g = g\n self.f = self.g + self.heuristic()\n\n def __eq__(self, other):\n return self.state == other.state\n\n def __hash__(self):\n return hash(self.state)\n\n def goal(self):\n GS = (\n 1, 2, 3, 4,\n 5, 6, 7, 8,\n 9, 10, 11, 12,\n 13, 14, 15, 0)\n return self.state == GS\n\n def heuristic(self):\n GOAL_COORDINATES = (\n Point(3,3), Point(0,0), Point(1,0), Point(2,0),\n Point(3,0), Point(0,1), Point(1,1), Point(2,1),\n Point(3,1), Point(0,2), Point(1,2), Point(2,2),\n Point(3,2), Point(0,3), Point(1,3), Point(2,3)) \n to_coords = lambda i: (i % 4, i // 4)\n h = 0\n for i in range(16):\n if (self.state[i] == 0): continue\n xc,yc = to_coords(i)\n xg,yg = GOAL_COORDINATES[self.state[i]]\n dx = abs(xg - xc)\n dy = abs(yg - yc)\n m = dx + dy\n if (m == 0): continue\n d = 0 if (abs(dx - dy) == 0) else abs(dx - dy) - 1\n md = 0 if (m - d == 0) else m - d - 1\n h += 1 + 5 * d + 3 * md\n return h\n\n\n'''----------------------------------------------------------------------------\n\nOPERATORS:\n\n----------------------------------------------------------------------------'''\n\ndef move(node, name, f):\n to_index = lambda point : point.x + 4 * point.y\n x, y = f(node.blank)\n if (x < 0 or x > 3 or y < 0 or y > 3):\n return None\n new_blank = Point(x, y)\n oldindex = to_index(node.blank)\n newindex = to_index(new_blank)\n state = list(node.state)\n state[oldindex], state[newindex] = state[newindex], state[oldindex]\n state = tuple(state)\n return Node(state, new_blank, name, node, node.g+1)\n\nOPERATORS = (\n partial(move, name = 'left', f = lambda p : (p.x-1, p.y)),\n partial(move, name = 'right',f = lambda p : (p.x+1, p.y)),\n partial(move, name = 'up', f = lambda p : (p.x, p.y-1)),\n partial(move, name = 'down', f = lambda p : (p.x, p.y+1)))\n\n\n'''----------------------------------------------------------------------------\n\npriorityq\n\n----------------------------------------------------------------------------'''\n\nclass priorityq:\n def __init__(self):\n self.pq = []\n self.items = {}\n self.counter = 0\n\n def enqueue(self, obj, priority):\n item = [priority, self.counter, obj]\n self.items[obj] = item\n heapq.heappush(self.pq, item)\n self.counter += 1\n\n def update_key(self, obj, priority):\n old_item = self.items.pop(obj)\n old_item[2] = None\n self.enqueue(obj, priority)\n\n def dequeue(self):\n while (self.pq):\n *_, item = heapq.heappop(self.pq)\n if (item):\n del self.items[item]\n return item\n raise KeyError('priorityq Underflow')\n\n def __contains__(self, key):\n return key in self.items\n\n def __len__(self):\n return len(self.items)\n\n def get(self, key):\n *_, item = self.items[key]\n return item\n\n def empty(self):\n return not self.pq\n\n\n'''----------------------------------------------------------------------------\n\nprint_state(node)\nsolution(node)\nget_initial_state()\n\n-----------------------------------------------------------------------------'''\n\ndef print_state(node):\n state = ['[]' if (x == 0) else x for x in node.state]\n for i in range(0,16,4):\n print('\\t{:2} {:2} {:2} {:2}'.format(*(state[i:i+4])))\n print()\n\ndef solution(node):\n solution = []\n while (node):\n solution.append(node)\n node = node.parent\n print('Solution({}):'.format(len(solution)-1))\n i = 0\n for node in reversed(solution):\n print('{}.\\t{}'.format(i, node.opname))\n print_state(node)\n i += 1\n\ndef get_initial_state():\n #prompt = 'Enter puzzle as a sequence of tile numbers (0 for blank): '\n #ins = tuple([int(x) for x in input(prompt).split() if x.isdigit()])\n #i = 0\n #while (i < len(ins) and ins[i] != 0):\n # i += 1\n #p = Point(i%4, i//4)\n #return Node(ins, p, 'start', None, 0)\n\n parser = argparse.ArgumentParser(description='15 puzzle solver: '\n 'enter tile numbers from top-left to bottom-right, with 0 for space')\n parser.add_argument('tiles', metavar='N', type=int, nargs=16,\n help='0 <= N <= 15')\n tiles = parser.parse_args().tiles\n encountered = [False for i in range(16)]\n for tile in tiles:\n if tile < 0 or tile > 15:\n parser.error('invalid tile')\n if encountered[tile]:\n parser.error('duplicate tiles')\n encountered[tile] = True\n i = 0\n while (i < len(tiles) and tiles[i] != 0):\n i += 1\n p = Point(i%4, i//4)\n return Node(tuple(tiles), p, 'start', None, 0)\n\n\n'''----------------------------------------------------------------------------\n\nA* search\n\n-----------------------------------------------------------------------------'''\n\nstart = get_initial_state()\nclosed = set()\nopenq = priorityq()\nopenq.enqueue(start, start.f)\nwhile (not openq.empty()):\n node = openq.dequeue()\n if (node.goal()):\n closed_nodes = len(closed)\n open_nodes = len(openq)\n total_nodes = closed_nodes + open_nodes\n print('Nodes generated: {} ({} open, {} closed)'.format(\n total_nodes, open_nodes, closed_nodes))\n solution(node)\n sys.exit(0)\n\n closed.add(node)\n for op in OPERATORS:\n child = op(node)\n if (not child): continue\n if (child not in closed and child not in openq):\n openq.enqueue(child, child.f)\n elif (child in openq and child.f < openq.get(child).f):\n openq.update_key(child, child.f)\n\nprint('No solution found')\n","repo_name":"br7552/fifteen_solver","sub_path":"fifteen.py","file_name":"fifteen.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"23311881118","text":"import pytest\nimport glob\n\npytestmark = pytest.mark.parametrize(\"ebnf_path\", glob.glob(\"tests/resources/valid/*\"))\n\ndef test_example_ast(ebnf_path):\n from parse_ebnf import AST\n from io import StringIO\n\n ast1 = AST()\n ast2 = AST()\n\n file = open(ebnf_path, 'r')\n\n ast1.parse(file.read);\n with StringIO('rule = term | another term;') as f:\n ast2.parse(f.read)\n\n print(repr(ast1.root.children[0]))\n\n file.close()\n\n","repo_name":"ChaosInventor/parse-ebnf","sub_path":"tests/test_ast_example.py","file_name":"test_ast_example.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"81"} +{"seq_id":"26399016577","text":"from functools import wraps\nfrom typing import List, Mapping, Union, Any\n\nfrom django.http import QueryDict, HttpResponseNotAllowed\nfrom django.utils.decorators import available_attrs\n\nfrom util.strutils import TemplateString\n\ndef make_querydict_from_request(func):\n @wraps(func, assigned=available_attrs(func))\n def inner(request, *args, **kwargs):\n if request.method not in ['GET', 'POST']:\n read_request_body_to(request, request.method)\n return func(request, *args, **kwargs)\n return inner\n\n\ndef require_http_methods_plus(method_types: List[str], required_args: Union[Mapping[str, List[str]], List[str]]=None,\n method_props: Mapping[str, List[str]]=None):\n \"\"\"\n Enhances the possible functionality of the standard require_http_methods function from django.\n\n If just method_types is provided then this decorator acts exactly the same way as the standard function.\n\n If required_args is provided as a list then whatever method type is provided, the view arguments are verified to\n ensure all arguments in required_args appear as a key in the view arguments.\n\n If required_args is provided as a dictionary, then if the method type appears in that dictionary as a key it must\n be mapped to a list of argument names. This list of argument names is then verified the same way as above.\n\n If method_props is provided it must be a dictionary. If the method_type appears in that dictionary then it must\n be mapped to a list of property names. This list of property names is then checked against the properties in the\n QueryDict for our request method. If any property is missing, an error occurs.\n\n :param method_types: List of Request Method names allowed to pass through\n :param required_args: List of requires arguments for any request method or Map from Request Method names to required\n arguments\n :param method_props: Map from Request Method names to required method properties\n \"\"\"\n\n if required_args is None:\n required_args = {}\n if method_props is None:\n method_props = {}\n\n invalid_type = TemplateString(\"{method} is not one of the allowed request methods ({types})!\")\n missing_props = TemplateString(\"Request of type {method} must have following properties: {props}\")\n missing_args = TemplateString(\"Request missing arguments. Has {args}, missing {missing}\")\n\n def decorator(func):\n @wraps(func, assigned=available_attrs(func))\n def inner(request, *args, **kwargs):\n\n # Verify method is at least a valid method type\n if request.method not in method_types:\n print(\"METHOD NOT ALLOWED\", invalid_type(method=request.method, types=method_types))\n return HttpResponseNotAllowed(method_types)\n\n # Check that all required properties are in QueryDict, if any are required\n method_dict = getattr(request, request.method)\n if request.method in method_props and len([x for x in method_props[request.method] if x not in method_dict]) > 0:\n print(\"METHOD NOT ALLOWED\", missing_props(method=request.method, props=method_props))\n return HttpResponseNotAllowed(\n method_types,\n reason=missing_props(method=request.method, props=method_props)\n )\n\n # Sanitize, either required_args is a list or map, normalize to a list\n required_args_list = required_args\n if isinstance(required_args, dict):\n required_args_list = required_args[request.method] if request.method in required_args else []\n\n # Check that all required arguments exist in the view arguments\n missing_args_list = [x for x in required_args_list if x not in kwargs]\n if len(missing_args_list) > 0:\n print(\"METHOD NOT ALLOWED\", missing_args(required_args_list-missing_args_list, missing_args_list))\n return HttpResponseNotAllowed(\n method_types,\n reason=missing_args(args=(required_args_list-missing_args_list), missing=missing_args_list)\n )\n\n return func(request, *args, **kwargs)\n\n return inner\n\n return decorator\n\n\ndef ajax_success(**kwargs) -> dict:\n kwargs.update({'success': True})\n return kwargs\n\n\ndef ajax_failure(**kwargs) -> dict:\n kwargs.update({'success': False})\n return kwargs\n\n\ndef is_safe_request(method: str) -> bool:\n while hasattr(method, 'method'):\n method = method.method\n return method in ('GET', 'HEAD')\n\n\ndef read_request_body_to_post(request) -> None:\n \"\"\"\n Takes a request and stores the request body into a POST QueryDict. By default only the GET QueryDict exists.\n\n :param request: Request object\n \"\"\"\n request.POST = QueryDict(request.body)\n\n\ndef read_request_body_to(request, method: str='POST') -> None:\n \"\"\"\n Takes a request method (or really any string) and a request object and stores the request body into a QueryDict\n which is then stored in the request at a property named after the method provided.\n\n read_request_body_to(req) -> req.POST now exists\n read_request_body_to(req, \"HEAD\") -> req.HEAD now exists\n read_request_body_to(req, \"delete\") -> req.DELETE now exists\n\n :param request:\n :param method:\n :return:\n \"\"\"\n setattr(request, method.upper(), QueryDict(request.body))\n\n","repo_name":"glossawy/FRS","sub_path":"util/viewutils.py","file_name":"viewutils.py","file_ext":"py","file_size_in_byte":5464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"24659654425","text":"import pandas as pd\nimport pickle\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import Ridge\n\nfrom sklearn.metrics import mean_squared_error\n\ndf = pd.read_parquet('./dataset/green_tripdata_2022-10.parquet')\n\ndf['duration'] = df.lpep_dropoff_datetime - df.lpep_pickup_datetime\ndf.duration = df.duration.apply(lambda td: td.total_seconds() / 60)\n\ndf = df[(df.duration >= 1) & (df.duration <= 60)]\n\ncategorical = ['PULocationID', 'DOLocationID']\nnumerical = ['trip_distance']\n\ndf[categorical] = df[categorical].astype(str)\n\ntrain_dicts = df[categorical + numerical].to_dict(orient='records')\n\ndv = DictVectorizer()\nX_train = dv.fit_transform(train_dicts)\n\ntarget = 'duration'\ny_train = df[target].values\n\nlr = LinearRegression()\nlr.fit(X_train, y_train)\n\ny_pred = lr.predict(X_train)\n\nprint(mean_squared_error(y_train, y_pred, squared=False))\n\ndef read_dataframe(filename):\n if filename.endswith('.csv'):\n df = pd.read_csv(filename)\n\n df.lpep_dropoff_datetime = pd.to_datetime(df.lpep_dropoff_datetime)\n df.lpep_pickup_datetime = pd.to_datetime(df.lpep_pickup_datetime)\n elif filename.endswith('.parquet'):\n df = pd.read_parquet(filename)\n\n df['duration'] = df.lpep_dropoff_datetime - df.lpep_pickup_datetime\n df.duration = df.duration.apply(lambda td: td.total_seconds() / 60)\n\n df = df[(df.duration >= 1) & (df.duration <= 60)]\n\n categorical = ['PULocationID', 'DOLocationID']\n df[categorical] = df[categorical].astype(str)\n \n return df\n\ndf_train = read_dataframe('./dataset/green_tripdata_2022-11.parquet')\ndf_val = read_dataframe('./dataset/green_tripdata_2022-12.parquet')\n\ndf_train['PU_DO'] = df_train['PULocationID'] + '_' + df_train['DOLocationID']\ndf_val['PU_DO'] = df_val['PULocationID'] + '_' + df_val['DOLocationID']\n\ncategorical = ['PU_DO'] #'PULocationID', 'DOLocationID']\nnumerical = ['trip_distance']\n\ndv = DictVectorizer()\n\ntrain_dicts = df_train[categorical + numerical].to_dict(orient='records')\nX_train = dv.fit_transform(train_dicts)\n\nval_dicts = df_val[categorical + numerical].to_dict(orient='records')\nX_val = dv.transform(val_dicts)\n\ntarget = 'duration'\ny_train = df_train[target].values\ny_val = df_val[target].values\n\nr = LinearRegression()\nlr.fit(X_train, y_train)\n\ny_pred = lr.predict(X_val)\n\nmean_squared_error(y_val, y_pred, squared=False)\n\nwith open('models/lin_reg.bin', 'wb') as f_out:\n pickle.dump((dv, lr), f_out)\n\nlr = Lasso(0.01)\nlr.fit(X_train, y_train)\n\ny_pred = lr.predict(X_val)\n\nprint(mean_squared_error(y_val, y_pred, squared=False))","repo_name":"CrookedNoob/mlops-practice","sub_path":"1_baseline_setup/duration_prediction.py","file_name":"duration_prediction.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"5143938539","text":"from odapui import util\nfrom typing import List\nfrom ruamel.yaml import YAML\nimport logging\nimport os\nimport typing\nimport arrow\nimport copy\nimport pickle\n\n\nyaml = YAML()\nyaml.preserve_quotes = True\nyaml.width = 40096\nyaml.indent(mapping=4, sequence=6, offset=4)\n\nreader = None\nREAD_FROM_PICKLE = False\n\n\ndef read_metadata_yaml(table_name: str) -> dict:\n parent_path = util.metadata_path()\n if parent_path[-1] == \"/\":\n parent_path = parent_path[:-1]\n\n if READ_FROM_PICKLE:\n with open(f\"{parent_path}/{table_name}.pkl\", \"rb\") as f:\n feed = pickle.load(f)\n else:\n with open(f\"{parent_path}/yaml/{table_name}.yaml\") as f:\n feed = yaml.load(f)\n return feed\n\n\nclass Table:\n def __init__(\n self, table_name: str, keys: List[str],\n no_quote_fields=[], double_quote_fields=[]\n ):\n self.table_name = table_name\n self.keys = keys\n self.from_yaml = None\n self.entries = None\n self.no_quote_fields = no_quote_fields\n self.double_quote_fields = double_quote_fields\n self.refresh()\n\n def refresh(self):\n self.from_yaml, self.entries = self.read_from_yaml(\n self.table_name, self.keys\n )\n self.build_columns()\n\n def read_from_yaml(self, table_name: str, keys: List):\n logging.info(\"loading yaml... \" + table_name)\n data_records = []\n\n from_yaml = read_metadata_yaml(table_name)\n for r in from_yaml[\"data_object\"][\"data_records\"]:\n row = r[\"row\"]\n data_records.append(row)\n return from_yaml, data_records\n\n def build_columns(self):\n key_name = \"ZZ_\" + self.table_name.upper() + \"_ID\"\n # by default build composite key\n for r in self.entries:\n r[key_name] = {\n k: r[k]\n for k in self.keys\n }\n\n def add_entry(self, entry):\n ex, _ = self.filter_entries_multi(\n [\n [key, entry[key]]\n for key in self.keys\n ]\n )\n if ex:\n raise ValueError(f\"cannot add {entry}. same key exists\")\n self.entries.append(entry)\n self.build_columns()\n\n def add_entries(self, entries):\n for to_add in entries:\n ex, _ = self.filter_entries_multi(\n [\n [key, to_add[key]]\n for key in self.keys\n ]\n )\n if ex:\n print(ex)\n raise ValueError(f\"cannot add {to_add}. same key exists\")\n self.entries.extend(entries)\n self.build_columns()\n\n def delete_entries(self, conds: typing.List[typing.List]):\n met, not_met = self.filter_entries_multi(conds)\n self.entries = not_met\n return met\n\n def delete_entries_any(self, filter_by, filter_vals):\n met, not_met = self.filter_entries_any(filter_by, filter_vals)\n self.entries = not_met\n\n def filter_entries_any(self, filter_by, filter_vals, order_by=None):\n met, not_met = [], []\n for x in self.entries:\n if x[filter_by] in filter_vals:\n met.append(x)\n else:\n not_met.append(x)\n return copy.deepcopy(met), copy.deepcopy(not_met)\n\n def filter_entries(self, filter_by, filter_val, order_by=None):\n met, not_met = self.filter_entries_multi(\n [\n [filter_by, filter_val]\n ],\n order_by\n )\n return copy.deepcopy(met)\n\n def filter_entries_multi(\n self, conds: typing.List[typing.List], order_by=None\n ):\n met = []\n not_met = []\n for x in self.entries:\n keep = True\n for cond in conds:\n if x[cond[0]] != cond[1]:\n keep = False\n break\n if keep:\n met.append(x)\n else:\n not_met.append(x)\n if order_by:\n met.sort(key=lambda x: x[order_by])\n return copy.deepcopy(met), copy.deepcopy(not_met)\n\n \"\"\"\n name: \"FEED_ID\"\n vals: [[\"FEED_ATTR_ID\", \"FEED_NAME\"], [\"FEED_ATTR_ID\", \"SOURCE_SYSTEM\"]]\n result: x[\"FEED_ID\"] = {\n \"FEED_NAME\": x[\"FEED_ATTR_ID\"][\"FEED_NAME\"],\n \"SOURCE_SYSTEM\": x[\"FEED_ATTR_ID\"][\"SOURCE_SYSTEM\"],\n }\n \"\"\"\n def add_element(self, name, vals: typing.List[typing.List[str]]):\n print(f\"adding element {name}; {vals}\")\n for ix, e in enumerate(self.entries):\n val = {}\n for strs in vals:\n sub_key = strs[-1]\n sub_val = e[strs[0]]\n for str in strs[1:]:\n sub_val = sub_val[str]\n val[sub_key] = sub_val\n self.entries[ix][name] = copy.deepcopy(val)\n\n def dump(self):\n res = []\n for e in self.entries:\n temp_e = copy.deepcopy(e)\n # remove ZZ_ keys\n for k in e.keys():\n if \"ZZ_\" in k:\n del temp_e[k]\n temp_e = util.format_entry(\n temp_e, self.no_quote_fields, self.double_quote_fields\n )\n res.append(temp_e)\n\n filepath = os.path.join(\n util.metadata_path(),\n f\"temp_{self.table_name}.yaml\",\n )\n self.from_yaml['data_object']['data_records'] = [\n {\"row\": e}\n for e in res\n ]\n with open(filepath, 'w') as f:\n yaml.dump(self.from_yaml, f)\n\n\nclass Feeds(Table):\n def build_columns(self):\n Table.build_columns(self)\n self.add_element(\n \"ZZ_FEED_ID2\",\n [\n [\"FEED_NAME\"], [\"DB_NAME\"]\n ]\n )\n\n\nclass FeedAttributes(Table):\n def build_columns(self):\n Table.build_columns(self)\n self.add_element(\n \"ZZ_FEED_ATTRIBUTE_ID_FLAT\",\n [\n [\"FEED_ID\", \"SOURCE_SYSTEM\"],\n [\"FEED_ID\", \"FEED_NAME\"],\n [\"ATTRIBUTE_NAME\"]\n ]\n )\n\n\nclass DataObjectAttributes(Table):\n def build_columns(self):\n Table.build_columns(self)\n self.add_element(\n \"ZZ_DATA_OBJECT_ATTRIBUTE_ID_FLAT\",\n [\n [\"DATA_OBJECT_ID\", \"DATA_OBJECT_NAME\"],\n [\"DATA_OBJECT_ID\", \"TGT_DB_NAME\"],\n [\"ATTRIBUTE_NAME\"]\n ]\n )\n\n\nclass FeedAttrDataObjectAttrs(Table):\n def build_columns(self):\n Table.build_columns(self)\n self.add_element(\n \"ZZ_FEED_ID\",\n [\n [\"FEED_ATTRIBUTE_ID\", \"FEED_NAME\"],\n [\"FEED_ATTRIBUTE_ID\", \"SOURCE_SYSTEM\"],\n ]\n )\n self.add_element(\n \"ZZ_DATA_OBJECT_ID\",\n [\n [\"DATA_OBJECT_ATTRIBUTE_ID\", \"DATA_OBJECT_NAME\"],\n [\"DATA_OBJECT_ATTRIBUTE_ID\", \"TGT_DB_NAME\"],\n ]\n )\n\n\nclass Reader:\n __instance__ = None\n\n def __init__(self):\n if Reader.__instance__ is None:\n self.feeds = None\n self.data_objects = None\n self.feed_attributes = None\n self.data_object_attributes = None\n self.feed_attr_data_object_attr = None\n self.feed_data_objects = None\n self.refresh()\n Reader.__instance__ = self\n else:\n print(\"You cannot create another Singleton instance\")\n\n def __getattr__(self, name):\n return getattr(Reader.__instance__, name)\n\n def refresh(self, force=True):\n print(f\"start refreshing all yamls...{arrow.now()}\")\n\n self.feeds = Feeds(\"feed\", [\"SOURCE_SYSTEM\", \"FEED_NAME\"])\n self.data_objects = Table(\n \"data_object\", [\"DATA_OBJECT_NAME\", \"TGT_DB_NAME\"])\n self.feed_attributes = FeedAttributes(\n \"feed_attribute\", [\"FEED_ID\", \"ATTRIBUTE_NAME\"],\n no_quote_fields=[\n \"ATTRIBUTE_NO\", \"ATTRIBUTE_LENGTH\",\n \"ATTRIBUTE_PRECISION\", \"NESTED_LEVEL\"\n ])\n self.data_object_attributes = DataObjectAttributes(\n \"data_object_attribute\", [\"DATA_OBJECT_ID\", \"ATTRIBUTE_NAME\"],\n [\"ATTRIBUTE_NO\"])\n self.feed_attr_data_object_attr = FeedAttrDataObjectAttrs(\n \"feed_attr_data_object_attr\",\n [\"FEED_ATTRIBUTE_ID\", \"DATA_OBJECT_ATTRIBUTE_ID\"],\n double_quote_fields=[\"TRANSFORM_FN\"])\n self.feed_data_objects = Table(\n \"feed_data_object\", [\"FEED_ID\", \"DATA_OBJECT_ID\"])\n self.dags = Table(\"dag\", [\"DAG_NAME\"])\n self.loads = Table(\"load\", [\"LOAD_NAME\"])\n self.data_object_data_objects = Table(\n \"data_object_data_object\", [\"LOAD_ID\"])\n print(f\"finished refreshing all yamls...{arrow.now()}\")\n\n @staticmethod\n def get_instance():\n if not Reader.__instance__:\n print(\"initialising instance\")\n Reader()\n return Reader.__instance__\n","repo_name":"binbenban/rds_ui","sub_path":"src/odapui/yaml_reader.py","file_name":"yaml_reader.py","file_ext":"py","file_size_in_byte":8978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"74795287305","text":"class Solution:\n def sumPrefixScores(self, words: List[str]) -> List[int]:\n prefix_dict = collections.Counter()\n \n for word in words:\n for i in range(1, len(word)+1):\n prefix_dict[word[:i]] += 1\n \n ans = []\n for word in words:\n _sum = 0\n for i in range(1, len(word)+1):\n _sum += prefix_dict[word[:i]]\n ans.append(_sum)\n return ans\n","repo_name":"novayo/LeetCode","sub_path":"2416_Sum_of_Prefix_Scores_of_Strings/try_1.py","file_name":"try_1.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"81"} +{"seq_id":"11212051216","text":"from datetime import datetime\nimport os\n\nfrom crowsetta import Transcriber\nimport numpy as np\n\nfrom . import audio, spect\nfrom .. import annotation\nfrom ..annotation import source_annot_map\nfrom ..logging import log_or_print\n\n\ndef from_files(data_dir,\n annot_format=None,\n labelset=None,\n output_dir=None,\n annot_file=None,\n audio_format=None,\n spect_format=None,\n spect_params=None,\n spect_output_dir=None,\n logger=None):\n \"\"\"prepare a dataset of vocalizations from a directory of audio or spectrogram files containing vocalizations,\n and (optionally) annotation for those files. The dataset is returned as a pandas DataFrame.\n\n Datasets are used to train neural networks, or for predicting annotations for the dataset itself using a\n trained neural network.\n\n Parameters\n ----------\n data_dir : str\n path to directory with audio or spectrogram files from which to make dataset\n annot_format : str\n format of annotations. Any format that can be used with the\n crowsetta library is valid. Default is None.\n labelset : set, list\n of str or int, set of labels for vocalizations. Default is None.\n If not None, then files will be skipped where the 'labels' array in the\n corresponding annotation contains labels that are not found in labelset\n output_dir : str\n path to location where data sets should be saved. Default is None,\n in which case data sets is saved in data_dir.\n load_spects : bool\n if True, load spectrograms. If False, return a VocalDataset without spectograms loaded.\n Default is True. Set to False when you want to create a VocalDataset for use\n later, but don't want to load all the spectrograms into memory yet.\n audio_format : str\n format of audio files. One of {'wav', 'cbin'}.\n spect_format : str\n format of array files containing spectrograms as 2-d matrices.\n One of {'mat', 'npz'}.\n annot_file : str\n Path to a single annotation file. Default is None.\n Used when a single file contains annotations for multiple audio files.\n spect_params : dict, vak.config.spect.SpectParamsConfig.\n Parameters for creating spectrograms.\n Default is None (implying that spectrograms are already made).\n spect_output_dir : str\n path to location where spectrogram files should be saved. Default is None,\n in which case it defaults to 'spectrograms_generated_{time stamp}'.\n\n Other Parameters\n ----------------\n logger : logging.Logger\n instance created by vak.logging.get_logger. Default is None.\n\n Returns\n -------\n vak_df : pandas.DataFrame\n the dataset prepared from the directory specified\n\n Notes\n -----\n If dataset is created from audio files, then .spect.npz files will be\n generated from the audio files and saved in output_dir.\n \"\"\"\n # ---- pre-conditions ----------------------------------------------------------------------------------------------\n if labelset is not None:\n if type(labelset) not in (set, list):\n raise TypeError(\n f\"type of labelset must be set or list, but type was: {type(labelset)}\"\n )\n\n if type(labelset) == list:\n labelset_set = set(labelset)\n if len(labelset) != len(labelset_set):\n raise ValueError(\n 'labelset contains repeated elements, should be a set (i.e. all members unique.\\n'\n f'Labelset was: {labelset}'\n )\n else:\n labelset = labelset_set\n\n if output_dir:\n if not os.path.isdir(output_dir):\n raise NotADirectoryError(\n f'output_dir not found: {output_dir}'\n )\n elif output_dir is None:\n output_dir = data_dir\n\n if audio_format is None and spect_format is None:\n raise ValueError(\"Must specify either audio_format or spect_format\")\n\n if audio_format and spect_format:\n raise ValueError(\"Cannot specify both audio_format and spect_format, \"\n \"unclear whether to create spectrograms from audio files or \"\n \"use already-generated spectrograms from array files\")\n\n if spect_output_dir:\n if not os.path.isdir(spect_output_dir):\n raise NotADirectoryError(\n f'spect_output_dir not found: {spect_output_dir}'\n )\n\n if annot_format is not None:\n if annot_file is None:\n annot_files = annotation.files_from_dir(annot_dir=data_dir,\n annot_format=annot_format)\n scribe = Transcriber(annot_format=annot_format)\n annot_list = scribe.from_file(annot_file=annot_files)\n else:\n scribe = Transcriber(annot_format=annot_format)\n annot_list = scribe.from_file(annot_file=annot_file)\n else: # if annot_format not specified\n annot_list = None\n\n # ------ if making dataset from audio files, need to make into array files first! ----------------------------------\n if audio_format:\n log_or_print(f'making array files containing spectrograms from audio files in: {data_dir}',\n logger=logger, level='info')\n audio_files = audio.files_from_dir(data_dir, audio_format)\n if annot_list:\n audio_annot_map = source_annot_map(audio_files, annot_list)\n if labelset: # then remove annotations with labels not in labelset\n # do this here instead of inside function call so that items get removed\n # from annot_list here and won't cause an error because they're still\n # in this list when we call spect.from_files\n for audio_file, annot in list(audio_annot_map.items()):\n # loop in a verbose way (i.e. not a comprehension)\n # so we can give user warning when we skip files\n annot_labelset = set(annot.seq.labels)\n # below, set(labels_mapping) is a set of that dict's keys\n if not annot_labelset.issubset(set(labelset)):\n # because there's some label in labels that's not in labelset\n audio_annot_map.pop(audio_file)\n log_or_print(\n f'found labels in {annot.annot_file} for {audio_file} not in labels_mapping, '\n f'skipping audio file: {audio_file}',\n logger=logger, level='info')\n audio_files = []\n annot_list = []\n for k,v in audio_annot_map.items():\n audio_files.append(k)\n annot_list.append(v)\n\n timenow = datetime.now().strftime('%y%m%d_%H%M%S')\n if spect_output_dir is None:\n spect_output_dir = os.path.join(output_dir,\n f'spectrograms_generated_{timenow}')\n os.makedirs(spect_output_dir)\n spect_files = audio.to_spect(audio_format=audio_format,\n spect_params=spect_params,\n output_dir=spect_output_dir,\n audio_files=audio_files,\n annot_list=annot_list,\n labelset=labelset)\n spect_format = 'npz'\n else: # if audio format is None\n spect_files = None\n\n from_files_kwargs = {\n 'spect_format': spect_format,\n 'labelset': labelset,\n 'annot_list': annot_list,\n 'annot_format': annot_format,\n }\n\n if spect_files:\n from_files_kwargs['spect_files'] = spect_files\n log_or_print(f'creating datasetfrom spectrogram files in: {output_dir}', logger=logger, level='info')\n else:\n from_files_kwargs['spect_dir'] = data_dir\n log_or_print(f'creating dataset from spectrogram files in: {data_dir}', logger=logger, level='info')\n\n vak_df = spect.to_dataframe(**from_files_kwargs, logger=logger)\n return vak_df\n\n\ndef add_split_col(df, split):\n \"\"\"add a 'split' column to a pandas DataFrame.\n Useful for assigning an entire dataset to the same \"split\",\n e.g. 'train' or 'predict'.\n All rows in the 'split' column will have the value specified.\n\n Parameters\n ----------\n df : pandas.DataFrame\n that represents a dataset of vocalizations\n split : str\n string that will be assigned to every row in the added \"split\" column.\n One of {'train', 'val', 'test', 'predict'}.\n \"\"\"\n if split not in {'train', 'val', 'test', 'predict'}:\n raise ValueError(\n f\"value for split should be one of {{'train', 'val', 'test', 'predict'}}, but was {split}\"\n )\n split_col = np.asarray([split for _ in range(len(df))], dtype='object')\n df['split'] = split_col\n return df\n\n\ndef validate_and_get_timebin_dur(df, expected_timebin_dur=None):\n \"\"\"validate timebin duration for a dataset represented by a pandas DataFrame\n\n checks that there is a single, unique value for the time bin duration of all\n spectrograms in the dataset, and if so, returns it\n\n Parameters\n ----------\n df : pandas.Dataframe\n created by dataframe.from_files or spect.to_dataframe\n expected_timebin_dur : float\n\n Returns\n -------\n timebin_dur : float\n duration of time bins for all spectrograms in the dataset\n \"\"\"\n timebin_dur = df['timebin_dur'].unique()\n if len(timebin_dur) > 1:\n raise ValueError(\n f'found more than one time bin duration in dataset: {timebin_dur}'\n )\n elif len(timebin_dur) == 1:\n timebin_dur = timebin_dur.item()\n\n if expected_timebin_dur:\n if timebin_dur != expected_timebin_dur:\n raise ValueError(\n 'timebin duration from dataset, {}, did not match expected timebin duration'\n )\n\n return timebin_dur\n\n\ndef split_dur(df, split):\n \"\"\"get duration of a split in the dataset\"\"\"\n return df[df['split'] == split]['duration'].sum()\n","repo_name":"Tubbz-alt/vak","sub_path":"src/vak/io/dataframe.py","file_name":"dataframe.py","file_ext":"py","file_size_in_byte":10276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"18620187645","text":"import pytest\n\nfrom os import sep\n\nfrom cavitometer_deconvolve.utils import read\n\n\ndef test_exception_if_wrong_file_format():\n with pytest.raises(Exception):\n _, _, _ = read.read_signal(f\"tests{sep}Measurements{sep}Two_Probes.dat\")\n\n\n@pytest.mark.parametrize(\n \"ext\",\n [\n \"csv\",\n \"xlsx\",\n ],\n)\nclass TestRead:\n FILENAME = f\"tests{sep}Measurements{sep}Two_Probes\"\n UNITS = [\"(ms)\", \"(mV)\", \"(mV)\"]\n\n def test_assertion_error_if_time_not_found(self, ext):\n with pytest.raises(AssertionError):\n _, _, _ = read.read_signal(f\"data{sep}hardware{sep}Probe_2.{ext}\")\n\n def test_units(self, ext):\n _, units, _ = read.read_signal(f\"{self.FILENAME}.{ext}\")\n assert units == self.UNITS\n\n @pytest.mark.parametrize(\n \"test_input, expected\",\n [\n (0, [0.00000000e00, -4.48541100e01, -5.18862200e-01]),\n (1, [2.00000000e-04, -5.38395800e01, -4.51715300e-01]),\n (2, [4.00000000e-04, -5.19838800e01, -6.95885700e-01]),\n (-1, [2.00059988e00, 2.98376300e01, -1.10487100e00]),\n ],\n )\n def test_read_signal(self, test_input, expected, ext):\n _, _, signal = read.read_signal(f\"{self.FILENAME}.{ext}\")\n assert signal.tolist()[test_input] == expected\n","repo_name":"blebon/cavitometer-deconvolve","sub_path":"tests/utils/test_read.py","file_name":"test_read.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"21643440987","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('klienti', '0003_auto_20170315_1405'),\n ('grafiks', '0006_remove_planotajs_sakums'),\n ('pieraksts', '0004_auto_20170316_1715'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Atteikumi',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ateikuma_laiks', models.DateTimeField(default=django.utils.timezone.now)),\n ('klients', models.ForeignKey(to='klienti.Klienti')),\n ('nodarbiba', models.ForeignKey(to='grafiks.Grafiks')),\n ],\n options={\n 'db_table': 'atteikumi',\n 'verbose_name': 'Atteikumi',\n },\n ),\n migrations.AlterModelOptions(\n name='pieraksti',\n options={'verbose_name': 'Pieraksti'},\n ),\n ]\n","repo_name":"svabis/vf","sub_path":"pieraksts/migrations/0005_auto_20170316_2013.py","file_name":"0005_auto_20170316_2013.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"4606654081","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app_store', '0007_applicationlist_applicationlistentry'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='applicationlist',\n name='image',\n field=models.URLField(default=''),\n preserve_default=False,\n ),\n ]\n","repo_name":"dynaware/appster","sub_path":"app_store/migrations/0008_applicationlist_image.py","file_name":"0008_applicationlist_image.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"41944045072","text":"def GetOmega(UEFC, opt_vars, AR, S):\r\n \r\n # YOU SHOULD NOT NEED TO CHANGE THIS FILE FOR THIS PROBLEM\r\n \r\n # Calculate the turn rate (in rad/s) from UEFC parameters and (opt_vars, \r\n # AR, S)\r\n \r\n R = opt_vars[1]\r\n V = UEFC.flight_velocity(opt_vars, AR, S)\r\n \r\n Omega = V / R # Turn rate (rad/s)\r\n \r\n return Omega\r\n","repo_name":"wpeale/UEFC","sub_path":"GetOmega.py","file_name":"GetOmega.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"42016850008","text":"import sys\nimport numpy as np #pour calcul matriciel\nfrom matplotlib import pyplot as plt\nfrom skimage import exposure\nfrom skimage.exposure import histogram\nfrom skimage import io #input output\nfrom skimage.filters import threshold_otsu\nimport matplotlib.cm as cm\nresult = sys.argv[1]\nimg=io.imread(\"images/\"+result)\n\nfrom skimage import color\nimg_Gray = color.rgb2gray(img)\nio.imsave('./images/new_img/'+result[:-4]+'_grey.jpg', img_Gray) \nfrom skimage.transform import resize\nres = resize(img_Gray, (100, 100))\nio.imsave('./images/new_img/'+result[:-4]+'_resized.jpg', res) \n\n\nr = np.histogram(res,256)[0]\ncumsum = np.cumsum(res)\nu=r\nv=[i for i in range(len(r))]\nx_cumsum=[i for i in range(len(cumsum))]\nplt.bar(v,r)\n#plt.show()\n#--------------------------\nfrom skimage import util\nimg_Gray_Inv = util.invert(img_Gray) # inverser les niveux de gris\n#io.imshow(img_Gray_Inv)\n#plt.show()\nplt.imsave('./images/new_img/'+result[:-4]+'_Gray_Inv.jpg', img_Gray_Inv, cmap=cm.gray) \n#---------hsv\nimg_hsv = color.rgb2hsv(img) # convertir en niveaux de gris\n#plt.imshow(img_hsv, cmap='gray')\n#plt.show()\nplt.imsave('./images/new_img/'+result[:-4]+'_hsv.jpg',img_hsv , cmap=cm.gray) \n#---------canal-------------\ncanal_h = img_hsv[:, :, 0] # faire soustraction de l'image h Teinte (H)\ncanal_s = img_hsv[:, :, 1] # faire soustraction de l'image s saturation (S)\ncanal_v = img_hsv[:, :, 2] # faire soustraction de l'image v luminance luminance (V) \nplt.imsave('./images/new_img/'+result[:-4]+'_canal_h.jpg',canal_h , cmap=cm.gray) \nplt.imsave('./images/new_img/'+result[:-4]+'_canal_s.jpg',canal_s , cmap=cm.gray) \nplt.imsave('./images/new_img/'+result[:-4]+'_canal_v.jpg',canal_v , cmap=cm.gray) \n\n#--------colors---------\nl=['red','green','blue']\n#for i,c in enumerate(l):\n # plt.plot(exposure.histogram(img[:, :, i])[0],color=c)\n#plt.show()\n#-----colors rgb---------------\nred=exposure.histogram(img[:, :, 0])[0]\ngreen=exposure.histogram(img[:, :, 1])[0]\nblue=exposure.histogram(img[:, :, 2])[0]\nbande_rouge = img[:, :, 0] # faire soustraction de l'image rouge\nbande_vert = img[:, :, 1] # faire soustraction de l'image vert\nbande_bleu = img[:, :, 2] # faire soustraction de l'image bleu\nplt.imsave('./images/new_img/'+result[:-4]+'_rouge.jpg', bande_rouge, cmap=cm.Reds_r) \nplt.imsave('./images/new_img/'+result[:-4]+'_vert.jpg', bande_vert, cmap=cm.Greens_r) \nplt.imsave('./images/new_img/'+result[:-4]+'_bleu.jpg', bande_bleu, cmap=cm.Blues_r) \n\n#---------th outso-------------\nth=threshold_otsu(img_Gray)\n#------------------\n#--- seuillage-------\nn,m=img_Gray.shape\nimage_binaire = np.zeros((n,m),dtype=np.uint8)\nfor i in range(n):\n for j in range(m):\n if img_Gray[i,j]>th:\n image_binaire[i,j]=1\nv_seuil = np.histogram(image_binaire,256)[0]\nplt.imsave('./images/new_img/'+result[:-4]+'_binaire.jpg', image_binaire, cmap=cm.gray) \n\n#-----end seuillage----------\n#-------etirement-------------\ng=255/(img.max()-img.min())\nd=img.min()*255/(img.max()-img.min())\ndef t(x,g,d):\n return g*x+d\ndef Etirement(img):\n n,m,k=img.shape\n image_noveau = np.zeros((n,m),dtype=np.float64)\n for i in range(k):\n bande = img[:, :, i]\n g=255/(bande.max()-bande.min())\n d=(bande.min()*255)/(bande.max()-bande.min())\n image_noveau+=t(bande,g,d)\n return image_noveau\nimage_noveau=Etirement(img)\nH_etirée,bins=np.histogram(image_noveau,bins=256)\nplt.imsave('./images/new_img/'+result[:-4]+'_etiree.jpg', image_noveau, cmap=cm.gray) \n\n\n# end etirement------------------\n#----------str---------------\ndef listToString(instr):\n emptystr=\"\"\n for ele in instr: \n emptystr += str(ele)+','\n return emptystr[:-1]\n#---------convert-----------\nu=listToString(r)\nv=listToString(v)\ncumsum=listToString(cumsum)\nx_cumsum=listToString(x_cumsum)\nred=listToString(red)\ngreen=listToString(green)\nblue=listToString(blue)\nv_seuil=listToString(v_seuil)\nH_etirée=listToString(H_etirée)\nth=str(th)\n#---------------\n#canny\nimport cv2\nimport numpy as np\nimg_canny = cv2.Canny(img,100,200)\n\n#sobel\nimg_gaussian = cv2.GaussianBlur(img_Gray,(3,3),0)\nimg_sobelx = cv2.Sobel(img_gaussian,cv2.CV_8U,1,0,ksize=5)\nimg_sobely = cv2.Sobel(img_gaussian,cv2.CV_8U,0,1,ksize=5)\nimg_sobel = img_sobelx + img_sobely\n\n#prewitt\nkernelx = np.array([[1,1,1],[0,0,0],[-1,-1,-1]])\nkernely = np.array([[-1,0,1],[-1,0,1],[-1,0,1]])\nimg_prewittx = cv2.filter2D(img_gaussian, -1, kernelx)\nimg_prewitty = cv2.filter2D(img_gaussian, -1, kernely)\n#cv2.imshow(\"Original Image\", img)\n#cv2.imshow(\"Canny\", img_canny)\nplt.imsave('./images/new_img/'+result[:-4]+'_canny.jpg', img_canny, cmap=cm.gray) \n#cv2.imshow(\"Sobel X\", img_sobelx)\nplt.imsave('./images/new_img/'+result[:-4]+'_sobelx.jpg', img_sobelx, cmap=cm.gray) \n#cv2.imshow(\"Sobel Y\", img_sobely)\nplt.imsave('./images/new_img/'+result[:-4]+'_sobely.jpg', img_sobely, cmap=cm.gray) \n#cv2.imshow(\"Sobel\", img_sobel)\nplt.imsave('./images/new_img/'+result[:-4]+'_sobel.jpg', img_sobel, cmap=cm.gray) \n#cv2.imshow(\"Prewitt X\", img_prewittx)\nplt.imsave('./images/new_img/'+result[:-4]+'_prewittx.jpg', img_prewittx, cmap=cm.gray) \n#cv2.imshow(\"Prewitt Y\", img_prewitty)\nplt.imsave('./images/new_img/'+result[:-4]+'_prewitty.jpg', img_prewitty, cmap=cm.gray) \n#cv2.imshow(\"Prewitt\", img_prewittx + img_prewitty)\nplt.imsave('./images/new_img/'+result[:-4]+'_Prewitt.jpg', img_prewittx + img_prewitty, cmap=cm.gray) \n#cv2.waitKey(0)\n#cv2.destroyAllWindows()\n#---------------------------------\nred_min = 190\nred_max = 255\ngreen_min = 180\ngreen_max = 255\nblue_min = 160\nblue_max = 255\ni=img\ngray_low = np.array([blue_min,green_min,red_min])\ngray_hi = np.array([blue_max,green_max,red_max])\n\nmask = cv2.inRange(img,gray_low,gray_hi)\n\nimg[mask>0] = (212, 167, 140)\n\n#cv2.imshow('img',img)\n#cv2.waitKey(0)\n#cv2.destroyAllWindows()\n#----------SQL----------------\nimport mysql.connector\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\",\n database=\"users\"\n)\nmycursor = mydb.cursor()\n\nsql = \"INSERT INTO uploadedimage VALUES ('\"+result+\"','\"+u+\"','\"+v+\"','\"+cumsum+\"','\"+x_cumsum+\"','\"+red+\"','\"+green+\"','\"+blue+\"','\"+th+\"','\"+v_seuil+\"','\"+H_etirée+\"')\"\nmycursor.execute(sql)\nmydb.commit()\n\n\nprint(mycursor.rowcount, \"record inserted.\") \n\n\n\n\n\n\n\n\n\n\n\n\n\nimage=i\n# convert to RGB\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n# reshape the image to a 2D array of pixels and 3 color values (RGB)\npixel_values = image.reshape((-1, 3))\n# convert to float\npixel_values = np.float32(pixel_values)\nprint(pixel_values.shape)\n# define stopping criteria\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)\n# number of clusters (K)\nk = 3\n_, labels, (centers) = cv2.kmeans(pixel_values, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\n# convert back to 8 bit values\ncenters = np.uint8(centers)\n\n# flatten the labels array\nlabels = labels.flatten()\n# convert all pixels to the color of the centroids\nsegmented_image = centers[labels.flatten()]\n# reshape back to the original image dimension\nsegmented_image = segmented_image.reshape(image.shape)\n# show the image\n#plt.imshow(segmented_image)\n# save the image\nplt.imsave('./images/new_img/'+result[:-4]+'_segmented.jpg', segmented_image) \n\n#plt.show()\n# disable only the cluster number 2 (turn the pixel into black)\nmasked_image = np.copy(image)\n# convert to the shape of a vector of pixel values\nmasked_image = masked_image.reshape((-1, 3))\n# color (i.e cluster) to disable\ncluster = 2\nmasked_image[labels == cluster] = [0, 0, 0]\n# convert back to original shape\nmasked_image = masked_image.reshape(image.shape)\n# show the image\n#plt.imshow(masked_image)\n# save the image\nplt.imsave('./images/new_img/'+result[:-4]+'_masked.jpg', masked_image) \n\n#plt.show()\n\nimg=i\nb,g,r = cv2.split(img)\nrgb_img = cv2.merge([r,g,b])\n\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n#plt.subplot(121),plt.imshow(rgb_img)\n#plt.title('Input Image'), plt.xticks([]), plt.yticks([])\n#plt.subplot(122),plt.imshow(thresh, 'gray')\n##plt.title(\"Otus's binary threshold\"), plt.xticks([]), plt.yticks([])\n#plt.show()\nplt.imsave('./images/new_img/'+result[:-4]+'_rgb.jpg', rgb_img) \nplt.imsave('./images/new_img/'+result[:-4]+'_otus.jpg', thresh, cmap=cm.gray) \n\n","repo_name":"islembenmaalem/Online-Image-Preprocessing","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"2393655640","text":"import os\nimport cv2\nfrom base_camera import BaseCamera\nimport numpy as np\n\nvideo_source = 2\nstart_record_video = False\nrecord_video = False\nrecord_file_name = \"front\"\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter(\"./vids/\"+record_file_name+\".avi\", fourcc, 20.0, (640, 480))\n\nclass Camera(BaseCamera):\n \n current_frame = 0\n \n def __init__(self):\n super(Camera, self).__init__()\n\n def frames():\n global video_source, record_video, record_file_name, start_record_video, fourcc, out\n camera = cv2.VideoCapture(video_source)\n if not camera.isOpened():\n raise RuntimeError('Could not start camera.')\n\n while True:\n if start_record_video:\n out = cv2.VideoWriter(\"./vids/\"+record_file_name+\".avi\", fourcc, 20.0, (640, 480))\n start_record_video = False\n\n # read current frame\n _, img = camera.read()\n Camera.current_frame = img\n if record_video:\n out.write(img)\n elif not record_video:\n out.release()\n\n # encode as a jpeg image and return it\n yield cv2.imencode('.jpg', img)[1].tobytes()\n\n","repo_name":"umbertochimenti/makersproject","sub_path":"SAM_rover/web_streaming/camera_opencv.py","file_name":"camera_opencv.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"81"} +{"seq_id":"33762388552","text":"from django.urls import path, include\nfrom curator.views import curator_view, StudentsCheckAdmin, LogoutView, StreamListView\n\napp_name = 'curator'\n\nurlpatterns = [\n path('', curator_view, name='curator'),\n path('courses/', include('courses.urls')),\n path('profile/', include('profiles.urls', namespace='profile')),\n path('schedule/', include('schedule.urls')),\n path('curator_check/students/', StudentsCheckAdmin.as_view(), name='students_admin_check'),\n path('streams/', StreamListView.as_view(), name='stream_list'),\n path('logout/', LogoutView.as_view(), name='logout'),\n # Другие пути для пользователей...\n]\n","repo_name":"workbench-kz/aviation","sub_path":"curator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"81"} +{"seq_id":"5638696880","text":"import os\nimport time\nimport subprocess\nimport re\nimport cherrypy\nimport fcntl\nimport select\nimport logging\nimport signal\nfrom subprocess import TimeoutExpired\n\n\ndef debug(msg):\n cherrypy.log.error(msg, context='transcoding', severity=logging.DEBUG)\n\n\ndef log(msg):\n cherrypy.log(msg, context='transcoding')\n\n\nclass FFMPEGError(Exception):\n pass\n\n\nclass FFMPEGTranscoderSubprocessTool(cherrypy.Tool):\n \"\"\"\n This tool makes sure the ffmpeg subprocess is ended\n properly when a request is cancelled\n \"\"\"\n def __init__(self):\n cherrypy.Tool.__init__(self, 'on_end_request',\n self.end, priority=20)\n\n def end(self):\n transcoding_transcoder = None\n\n if (hasattr(cherrypy.request, 'transcoding_transcoder') and\n cherrypy.request.transcoding_transcoder is not None):\n\n transcoding_transcoder = cherrypy.request.transcoding_transcoder\n\n cherrypy.request.transcoding_transcoder = None\n\n transcoding_transcoder.stop()\n\n if (hasattr(cherrypy.request, 'transcoding_track') and\n cherrypy.request.transcoding_track is not None):\n\n transcoding_track = cherrypy.request.transcoding_track\n\n cherrypy.request.transcoding_track = None\n\n cherrypy.engine.publish('transcoding.end',\n track=transcoding_track,\n transcoder=transcoding_transcoder)\n\n debug('\"%s\" transcoding ended.' % transcoding_track)\n\n\nclass Transcoder:\n def transcode(self):\n raise NotImplementedError()\n\n @staticmethod\n def outputs():\n raise NotImplementedError()\n\n\nclass FFMPEGTranscoder(Transcoder):\n def __init__(self, track, skip_seconds):\n self.track = track\n self.skip_seconds = skip_seconds\n self.stderr = None\n self.success = False\n self.error = None\n self.stopped = False\n self.process = None\n\n if cherrypy.request.app is not None:\n ffmpeg_cmd = cherrypy.request.app.config.get('opmuse').get('transcoding.ffmpeg_cmd')\n else:\n ffmpeg_cmd = None\n\n if ffmpeg_cmd is not None:\n self.ffmpeg_cmd = ffmpeg_cmd\n else:\n self.ffmpeg_cmd = 'ffmpeg'\n\n def __enter__(self):\n cherrypy.engine.publish('transcoding.start', transcoder=self, track=self.track)\n\n # http server has priority of 25, so specify 20 so this can\n # run before and stop this one which would otherwise cause\n # a \"deadlock\" with the http servers stopping.\n #\n # that's why we do this here, because otherwise close wont be called on\n # this generator and a sort of deadlock would occur.\n #\n # there's still one remaining issue here though and that is that\n # while the server is shutting down it still accepts new requests so\n # when this one is killed the client (i.e. browser) will just start\n # another request which will require another call to \"stop\" (e.g. SIGTERM).\n # dont know what to do about that one :/\n cherrypy.engine.subscribe('stop', self.stop, priority=20)\n\n self.filename = self.track.paths[0].path\n self.pretty_filename = self.track.paths[0].pretty_path\n\n ext = os.path.splitext(os.path.basename(self.filename))[1].lower()[1:]\n\n artist = self.track.artist.name if self.track.artist is not None else ''\n album = self.track.album.name if self.track.album is not None else ''\n title = self.track.name\n track_number = self.track.number if self.track.number is not None else 0\n\n if self.skip_seconds is not None:\n skip_seconds_args = ['-ss', str(self.skip_seconds)]\n else:\n skip_seconds_args = []\n\n args = ([self.ffmpeg_cmd] +\n skip_seconds_args +\n self.ffmpeg_input_args +\n # reads input at native frame rate, e.g. very handy for streaming.\n ['-re'] +\n ['-i', self.filename] +\n # always produce stereo output\n ['-ac', '2'] +\n # strip any video streams\n ['-vn'] +\n self.ffmpeg_output_args + [\n '-metadata', 'artist=%s' % artist,\n '-metadata', 'album=%s' % album,\n '-metadata', 'title=%s' % title,\n '-metadata', 'tracknumber=%s' % track_number,\n '-'])\n\n for index, arg in enumerate(args):\n if not isinstance(arg, bytes):\n arg = arg.encode('utf8')\n\n args[index] = arg.replace(b'EXT', ext)\n\n cherrypy.request.transcoding_track = self.track\n cherrypy.request.transcoding_transcoder = self\n\n try:\n self.process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=None)\n except Exception as e:\n self.error = 'Got \"%s\" when starting ffmpeg.' % str(e)\n return\n\n debug('transcoding with: %s' % b' '.join(args).decode('utf8', 'replace'))\n\n return self.transcode\n\n def __exit__(self, type, value, traceback):\n if self.process is not None:\n try:\n self.process.wait(10)\n except TimeoutExpired:\n self.stop()\n\n if self.process is not None and not self.stopped and self.process.returncode != 0:\n stderr_lines = self.stderr.decode('utf8', 'replace').split(\"\\n\")\n\n try:\n stderr_lines.remove(\"\")\n except ValueError:\n pass\n\n self.error = stderr_lines[-1]\n\n log('ffmpeg returned non-zero status \"%d\" and \"%s\".' % (self.process.returncode, self.error))\n elif self.error is not None:\n log('Got exception \"%s\".' % (self.error))\n else:\n self.success = True\n\n cherrypy.engine.publish('transcoding.done', track=self.track)\n\n debug('\"%s\" transcoding done.' % self.track)\n\n def stop(self):\n if self.stopped:\n return\n\n try:\n if self.process is not None:\n debug(\"Stopping ffmpeg %d\" % self.process.pid)\n\n self.process.send_signal(signal.SIGTERM)\n self.process.stdout.read()\n self.process.wait()\n except ProcessLookupError:\n pass\n finally:\n cherrypy.engine.unsubscribe('stop', self.stop)\n self.stopped = True\n\n @staticmethod\n def set_nonblocking(fileno):\n fcntl.fcntl(\n fileno, fcntl.F_SETFL, fcntl.fcntl(fileno, fcntl.F_GETFL) | os.O_NONBLOCK,\n )\n\n def read_process(self):\n FFMPEGTranscoder.set_nonblocking(self.process.stderr.fileno())\n\n poll = select.poll()\n\n poll.register(self.process.stdout, select.POLLIN | select.POLLHUP)\n poll.register(self.process.stderr, select.POLLIN | select.POLLHUP)\n\n pollc = 2\n\n events = poll.poll()\n\n initial_bitrate = self.initial_bitrate()\n\n bitrate = initial_bitrate\n seconds = 0\n\n while pollc > 0 and len(events) > 0:\n info = data = None\n\n for event in events:\n rfd, event = event\n\n if event & select.POLLIN:\n if rfd == self.process.stdout.fileno():\n data = self.process.stdout.read(bitrate)\n\n if rfd == self.process.stderr.fileno():\n readx = select.select([self.process.stderr.fileno()], [], [])[0]\n\n if readx:\n chunk = self.process.stderr.read()\n\n self.stderr = chunk\n\n if len(chunk) > 0:\n match = re.match(b'.*time=[ ]*(?P